max_stars_count
int64 301
224k
| text
stringlengths 6
1.05M
| token_count
int64 3
727k
|
---|---|---|
614 | [
{ "home": "England", "away": "Portugal", "homescore": "1", "awayscore": "0" },
{ "home": "Macedonia", "away": "Iran", "homescore": "1", "awayscore": "3" },
{ "home": "Seychelles", "away": "Algeria", "homescore": "0", "awayscore": "2" },
{ "home": "New York City FC", "away": "Real Salt Lake", "homescore": "2", "awayscore": "3" },
{ "home": "FC Dallas", "away": "Houston Dynamo", "homescore": "0", "awayscore": "0" },
{ "home": "LA Galaxy", "away": "Sporting Kansas City" },
{ "home": "Malaysia", "away": "Timor-Leste", "homescore": "3", "awayscore": "0" },
{ "home": "Laos", "away": "India", "homescore": "0", "awayscore": "1" },
{ "home": "Macedonia", "away": "Iran", "homescore": "1", "awayscore": "3" },
{ "home": "England", "away": "Portugal", "homescore": "1", "awayscore": "0" },
{ "home": "Seychelles", "away": "Algeria", "homescore": "0", "awayscore": "2" },
{ "home": "New York City FC", "away": "Real Salt Lake", "homescore": "2", "awayscore": "3" },
{ "home": "FC Dallas", "away": "Houston Dynamo", "homescore": "0", "awayscore": "0" },
{ "home": "LA Galaxy", "away": "Sporting Kansas City" },
{ "home": "Chinese Taipei", "away": "Cambodia", "homescore": "2", "awayscore": "2" },
{ "home": "Laos", "away": "India", "homescore": "0", "awayscore": "1" },
{ "home": "Malaysia", "away": "Timor-Leste", "homescore": "3", "awayscore": "0" },
{ "home": "Tajikistan", "away": "Bangladesh", "homescore": "5", "awayscore": "0" },
{ "home": "Maldives", "away": "Yemen", "homescore": "0", "awayscore": "2" },
{ "home": "<NAME>", "away": "<NAME>", "homescore": "2", "awayscore": "2" },
{ "home": "<NAME>", "away": "<NAME>", "homescore": "3", "awayscore": "3" },
{ "home": "<NAME>", "away": "AA P<NAME>ta", "homescore": "1", "awayscore": "2" },
{ "home": "Flamengo", "away": "EC Vitória", "homescore": "1", "awayscore": "0" },
{ "home": "Palmeiras", "away": "Gremio", "homescore": "4", "awayscore": "3" },
{ "home": "<NAME>", "away": "Independ<NAME>", "homescore": "2", "awayscore": "1" },
{ "home": "<NAME>", "away": "Millonarios", "homescore": "1", "awayscore": "0" },
{ "home": "<NAME>", "away": "Nagoya Grampus", "homescore": "0", "awayscore": "0" },
{ "home": "<NAME>", "away": "<NAME>", "homescore": "0", "awayscore": "0" },
{ "home": "Enyimba", "away": "Abia Warriors" }
] | 964 |
623 | package com.netease.nim.uikit.common.media.imagepicker.view;
import android.animation.Animator;
import android.animation.ObjectAnimator;
import android.content.Context;
import android.graphics.drawable.BitmapDrawable;
import android.view.MotionEvent;
import android.view.View;
import android.view.ViewGroup;
import android.view.ViewTreeObserver;
import android.view.WindowManager;
import android.view.animation.AccelerateDecelerateInterpolator;
import android.widget.AdapterView;
import android.widget.BaseAdapter;
import android.widget.ListView;
import android.widget.PopupWindow;
import com.netease.nim.uikit.R;
import com.netease.nim.uikit.common.util.sys.ScreenUtil;
public class FolderPopUpWindow extends PopupWindow implements View.OnClickListener {
private ListView listView;
private OnItemClickListener onItemClickListener;
public FolderPopUpWindow(Context context, BaseAdapter adapter) {
super(context);
final View view = View.inflate(context, R.layout.pop_folder, null);
listView = view.findViewById(R.id.listView);
listView.setAdapter(adapter);
setContentView(view);
setWidth(WindowManager.LayoutParams.MATCH_PARENT); //如果不设置,就是 AnchorView 的宽度
setHeight(WindowManager.LayoutParams.MATCH_PARENT);
setFocusable(true);
setOutsideTouchable(true);
setBackgroundDrawable(new BitmapDrawable());
setAnimationStyle(0);
view.getViewTreeObserver().addOnGlobalLayoutListener(new ViewTreeObserver.OnGlobalLayoutListener() {
@Override
public void onGlobalLayout() {
view.getViewTreeObserver().removeGlobalOnLayoutListener(this);
int maxHeight = ScreenUtil.dip2px(315);
int realHeight = listView.getHeight();
ViewGroup.LayoutParams listParams = listView.getLayoutParams();
listParams.height = realHeight > maxHeight ? maxHeight : realHeight;
listView.setLayoutParams(listParams);
//enterAnimator();
}
});
view.setOnTouchListener(new View.OnTouchListener() {
@Override
public boolean onTouch(View v, MotionEvent event) {
dismiss();
return true;
}
});
listView.setOnItemClickListener(new AdapterView.OnItemClickListener() {
@Override
public void onItemClick(AdapterView<?> adapterView, View view, int position, long l) {
if (onItemClickListener != null) {
onItemClickListener.onItemClick(adapterView, view, position, l);
}
}
});
}
private void enterAnimator() {
ObjectAnimator translationY = ObjectAnimator.ofFloat(listView, "translationY", -listView.getHeight(), 0);
translationY.setDuration(400);
translationY.start();
}
@Override
public void showAsDropDown(View anchor, int xoff, int yoff, int gravity) {
super.showAsDropDown(anchor, xoff, yoff, gravity);
enterAnimator();
}
@Override
public void dismiss() {
exitAnimator();
}
private void exitAnimator() {
ObjectAnimator translationY = ObjectAnimator.ofFloat(listView, "translationY", 0, -listView.getHeight());
translationY.setInterpolator(new AccelerateDecelerateInterpolator());
translationY.setDuration(400);
translationY.addListener(new Animator.AnimatorListener() {
@Override
public void onAnimationStart(Animator animation) {
listView.setVisibility(View.VISIBLE);
}
@Override
public void onAnimationEnd(Animator animation) {
FolderPopUpWindow.super.dismiss();
}
@Override
public void onAnimationCancel(Animator animation) {
}
@Override
public void onAnimationRepeat(Animator animation) {
}
});
translationY.start();
}
public void setOnItemClickListener(OnItemClickListener listener) {
this.onItemClickListener = listener;
}
public void setSelection(int selection) {
listView.setSelection(selection);
}
@Override
public void onClick(View v) {
dismiss();
}
public interface OnItemClickListener {
void onItemClick(AdapterView<?> adapterView, View view, int position, long l);
}
}
| 1,857 |
726 | package org.landy.business.identify.order.domain;
import java.sql.Date;
/**
* 订单明细相关信息
* @author landyl
* @create 2:28 PM 05/14/2018
*/
public class OrderDetail {
private Long orderDetailId;
private Integer requestId;
private Long orderId;
private Long customerId;
private String statusCode;
private Date statusDate;
private Date effectiveDate;
public Long getOrderDetailId() {
return orderDetailId;
}
public void setOrderDetailId(Long orderDetailId) {
this.orderDetailId = orderDetailId;
}
public Integer getRequestId() {
return requestId;
}
public void setRequestId(Integer requestId) {
this.requestId = requestId;
}
public Long getOrderId() {
return orderId;
}
public void setOrderId(Long orderId) {
this.orderId = orderId;
}
public Long getCustomerId() {
return customerId;
}
public void setCustomerId(Long customerId) {
this.customerId = customerId;
}
public String getStatusCode() {
return statusCode;
}
public void setStatusCode(String statusCode) {
this.statusCode = statusCode;
}
public Date getStatusDate() {
return statusDate;
}
public void setStatusDate(Date statusDate) {
this.statusDate = statusDate;
}
public Date getEffectiveDate() {
return effectiveDate;
}
public void setEffectiveDate(Date effectiveDate) {
this.effectiveDate = effectiveDate;
}
}
| 605 |
568 | <filename>MagicMirror/app/src/main/java/com/novoda/magicmirror/facerecognition/KeyToFaceMappings.java<gh_stars>100-1000
package com.novoda.magicmirror.facerecognition;
import android.view.KeyEvent;
import java.util.HashMap;
import java.util.Map;
public class KeyToFaceMappings {
private final Map<Integer, FaceExpression> faceExpressions;
public static KeyToFaceMappings newInstance() {
Map<Integer, FaceExpression> expressions = new HashMap<>();
expressions.put(KeyEvent.KEYCODE_1, FaceExpression.SAD);
expressions.put(KeyEvent.KEYCODE_2, FaceExpression.NEUTRAL);
expressions.put(KeyEvent.KEYCODE_3, FaceExpression.HAPPY);
expressions.put(KeyEvent.KEYCODE_4, FaceExpression.JOYFUL);
return new KeyToFaceMappings(expressions);
}
public KeyToFaceMappings(Map<Integer, FaceExpression> faceExpressions) {
this.faceExpressions = faceExpressions;
}
public FaceExpression getFaceFromKeyCode(int keyCode) {
return faceExpressions.get(keyCode);
}
public boolean contains(int keyCode) {
return faceExpressions.containsKey(keyCode);
}
}
| 422 |
647 | <gh_stars>100-1000
/*
PURPOSE: (Perform LU decomposition of matrix a)
REFERENCE: ((Numerical Recipes [FORTRAN]))
PROGRAMMERS: (((<NAME>) (<NAME>) (Jan 1993) (v1.0) (Init Release))) */
#include <stdio.h>
#include "trick/trick_math.h"
#define tiny 1.0e-20
int LU_dcmp( /* Return: Zero */
double **a, /* In: Matrix */
double d, /* Out: Determinant */
int n, /* In: Matrix dimension */
int *indx, /* Inout: Temporary storage */
double *vv)
{ /* Inout: Temporary storage */
int i, j, k;
double dum, sum, aamax;
int imax = 0, iverr;
iverr = 0;
d = 1.0;
for (i = 0; i < n; i++) {
aamax = 0.0;
for (j = 0; j < n; j++) {
if (fabs(a[i][j]) > aamax)
aamax = fabs(a[i][j]);
}
if (aamax == 0.0) {
iverr = 1;
return (iverr);
}
vv[i] = 1.0 / aamax;
}
for (j = 0; j < n; j++) {
if (j > 0) {
for (i = 0; i < j; i++) {
sum = a[i][j];
if (i > 0) {
for (k = 0; k < i; k++) {
sum -= a[i][k] * a[k][j];
}
a[i][j] = sum;
}
}
}
aamax = 0.0;
for (i = j; i < n; i++) {
sum = a[i][j];
if (j > 0) {
for (k = 0; k < j; k++) {
sum -= a[i][k] * a[k][j];
}
a[i][j] = sum;
}
dum = vv[i] * fabs(sum);
if (dum >= aamax) {
imax = i;
aamax = dum;
}
}
if (j != imax) {
for (k = 0; k < n; k++) {
dum = a[imax][k];
a[imax][k] = a[j][k];
a[j][k] = dum;
}
d = -d;
vv[imax] = vv[j];
}
indx[j] = imax;
if (j != n - 1) {
if (a[j][j] == 0.0)
a[j][j] = tiny;
dum = 1.0 / a[j][j];
for (i = j + 1; i < n; i++) {
a[i][j] = a[i][j] * dum;
}
}
}
if (a[n - 1][n - 1] == 0.0)
a[n - 1][n - 1] = tiny;
return (iverr);
}
| 1,613 |
1,768 | // Copyright (c) 2003 Compaq Corporation. All rights reserved.
// Portions Copyright (c) 2003 Microsoft Corporation. All rights reserved.
// Last modified on Mon 30 Apr 2007 at 13:26:33 PST by lamport
// modified on Thu Jan 10 18:33:42 PST 2002 by yuanyu
package tlc2.util;
public final class MemIntStack extends MemBasedSet implements IntStack {
private static final int MIN_CAPACITY = 1024;
public MemIntStack(String diskdir, String name) {
super(MIN_CAPACITY);
}
/* (non-Javadoc)
* @see tlc2.util.IntStack#pushInt(int)
*/
public final synchronized void pushInt(int x) {
if (this.size == this.elems.length) {
final int[] newElems = ensureCapacity(MIN_CAPACITY);
System.arraycopy(elems, 0, newElems, 0, this.size);
this.elems = newElems;
}
this.elems[this.size] = x;
this.size++;
}
/* (non-Javadoc)
* @see tlc2.util.IntStack#pushLong(long)
*/
public final synchronized void pushLong(long x) {
this.pushInt((int) (x & 0xFFFFFFFFL));
this.pushInt((int) (x >>> 32));
}
/* (non-Javadoc)
* @see tlc2.util.IntStack#popInt()
*/
public final synchronized int popInt() {
return this.elems[--this.size];
}
public final synchronized int peakInt() {
return peakInt(size - 1);
}
public final synchronized int peakInt(int pos) {
return this.elems[pos];
}
/* (non-Javadoc)
* @see tlc2.util.IntStack#popLong()
*/
public final synchronized long popLong() {
long high = this.popInt();
long low = this.popInt();
return (high << 32) | (low & 0xFFFFFFFFL);
}
public final synchronized long peakLong() {
long high = this.peakInt();
long low = this.peakInt();
return (high << 32) | (low & 0xFFFFFFFFL);
}
public final synchronized long peakLong(int pos) {
long high = this.peakInt(pos + 1);
long low = this.peakInt(pos);
return (high << 32) | (low & 0xFFFFFFFFL);
}
/* (non-Javadoc)
* @see tlc2.util.IntStack#reset()
*/
public final void reset() {
this.size = 0;
}
}
| 744 |
438 | <gh_stars>100-1000
def is_private(pattern):
"""
Check availability of pattern.
"""
if pattern:
return pattern.startswith('_')
| 60 |
584 | /*
* Copyright 2018-2021 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.data.repository.cdi;
import java.util.Optional;
import java.util.stream.Stream;
import javax.enterprise.inject.UnsatisfiedResolutionException;
import org.springframework.beans.factory.support.AbstractBeanDefinition;
import org.springframework.core.env.StandardEnvironment;
import org.springframework.core.io.ResourceLoader;
import org.springframework.core.io.support.PathMatchingResourcePatternResolver;
import org.springframework.core.type.classreading.CachingMetadataReaderFactory;
import org.springframework.core.type.classreading.MetadataReaderFactory;
import org.springframework.core.type.filter.TypeFilter;
import org.springframework.data.repository.config.CustomRepositoryImplementationDetector;
import org.springframework.data.repository.config.FragmentMetadata;
import org.springframework.data.repository.config.ImplementationDetectionConfiguration;
import org.springframework.data.repository.config.ImplementationLookupConfiguration;
import org.springframework.data.repository.config.RepositoryFragmentConfiguration;
import org.springframework.data.util.Optionals;
import org.springframework.data.util.Streamable;
import org.springframework.lang.Nullable;
import org.springframework.util.Assert;
import org.springframework.util.ClassUtils;
/**
* Context for CDI repositories. This class provides {@link ClassLoader} and
* {@link org.springframework.data.repository.core.support.RepositoryFragment detection} which are commonly used within
* CDI.
*
* @author <NAME>
* @since 2.1
*/
public class CdiRepositoryContext {
private final ClassLoader classLoader;
private final CustomRepositoryImplementationDetector detector;
private final MetadataReaderFactory metadataReaderFactory;
private final FragmentMetadata metdata;
/**
* Create a new {@link CdiRepositoryContext} given {@link ClassLoader} and initialize
* {@link CachingMetadataReaderFactory}.
*
* @param classLoader must not be {@literal null}.
*/
public CdiRepositoryContext(ClassLoader classLoader) {
this(classLoader, new CustomRepositoryImplementationDetector(new StandardEnvironment(),
new PathMatchingResourcePatternResolver(classLoader)));
}
/**
* Create a new {@link CdiRepositoryContext} given {@link ClassLoader} and
* {@link CustomRepositoryImplementationDetector}.
*
* @param classLoader must not be {@literal null}.
* @param detector must not be {@literal null}.
*/
public CdiRepositoryContext(ClassLoader classLoader, CustomRepositoryImplementationDetector detector) {
Assert.notNull(classLoader, "ClassLoader must not be null!");
Assert.notNull(detector, "CustomRepositoryImplementationDetector must not be null!");
ResourceLoader resourceLoader = new PathMatchingResourcePatternResolver(classLoader);
this.classLoader = classLoader;
this.metadataReaderFactory = new CachingMetadataReaderFactory(resourceLoader);
this.metdata = new FragmentMetadata(metadataReaderFactory);
this.detector = detector;
}
CustomRepositoryImplementationDetector getCustomRepositoryImplementationDetector() {
return detector;
}
/**
* Load a {@link Class} using the CDI {@link ClassLoader}.
*
* @param className
* @return
* @throws UnsatisfiedResolutionException if the class cannot be found.
*/
Class<?> loadClass(String className) {
try {
return ClassUtils.forName(className, classLoader);
} catch (ClassNotFoundException e) {
throw new UnsatisfiedResolutionException(String.format("Unable to resolve class for '%s'", className), e);
}
}
/**
* Discover {@link RepositoryFragmentConfiguration fragment configurations} for a {@link Class repository interface}.
*
* @param configuration must not be {@literal null}.
* @param repositoryInterface must not be {@literal null}.
* @return {@link Stream} of {@link RepositoryFragmentConfiguration fragment configurations}.
*/
Stream<RepositoryFragmentConfiguration> getRepositoryFragments(CdiRepositoryConfiguration configuration,
Class<?> repositoryInterface) {
CdiImplementationDetectionConfiguration config = new CdiImplementationDetectionConfiguration(configuration,
metadataReaderFactory);
return metdata.getFragmentInterfaces(repositoryInterface.getName()) //
.map(it -> detectRepositoryFragmentConfiguration(it, config)) //
.flatMap(Optionals::toStream);
}
/**
* Retrieves a custom repository interfaces from a repository type. This works for the whole class hierarchy and can
* find also a custom repository which is inherited over many levels.
*
* @param repositoryType The class representing the repository.
* @param cdiRepositoryConfiguration The configuration for CDI usage.
* @return the interface class or {@literal null}.
*/
Optional<Class<?>> getCustomImplementationClass(Class<?> repositoryType,
CdiRepositoryConfiguration cdiRepositoryConfiguration) {
ImplementationDetectionConfiguration configuration = new CdiImplementationDetectionConfiguration(
cdiRepositoryConfiguration, metadataReaderFactory);
ImplementationLookupConfiguration lookup = configuration.forFragment(repositoryType.getName());
Optional<AbstractBeanDefinition> beanDefinition = detector.detectCustomImplementation(lookup);
return beanDefinition.map(this::loadBeanClass);
}
private Optional<RepositoryFragmentConfiguration> detectRepositoryFragmentConfiguration(String fragmentInterfaceName,
CdiImplementationDetectionConfiguration config) {
ImplementationLookupConfiguration lookup = config.forFragment(fragmentInterfaceName);
Optional<AbstractBeanDefinition> beanDefinition = detector.detectCustomImplementation(lookup);
return beanDefinition.map(bd -> new RepositoryFragmentConfiguration(fragmentInterfaceName, bd));
}
@Nullable
private Class<?> loadBeanClass(AbstractBeanDefinition definition) {
String beanClassName = definition.getBeanClassName();
return beanClassName == null ? null : loadClass(beanClassName);
}
private static class CdiImplementationDetectionConfiguration implements ImplementationDetectionConfiguration {
private final CdiRepositoryConfiguration configuration;
private final MetadataReaderFactory metadataReaderFactory;
CdiImplementationDetectionConfiguration(CdiRepositoryConfiguration configuration,
MetadataReaderFactory metadataReaderFactory) {
this.configuration = configuration;
this.metadataReaderFactory = metadataReaderFactory;
}
/*
* (non-Javadoc)
* @see org.springframework.data.repository.config.CustomRepositoryImplementationDetector.ImplementationDetectionConfiguration#getImplementationPostfix()
*/
@Override
public String getImplementationPostfix() {
return configuration.getRepositoryImplementationPostfix();
}
/*
* (non-Javadoc)
* @see org.springframework.data.repository.config.CustomRepositoryImplementationDetector.ImplementationDetectionConfiguration#getBasePackages()
*/
@Override
public Streamable<String> getBasePackages() {
return Streamable.empty();
}
/*
* (non-Javadoc)
* @see org.springframework.data.repository.config.CustomRepositoryImplementationDetector.ImplementationDetectionConfiguration#getExcludeFilters()
*/
@Override
public Streamable<TypeFilter> getExcludeFilters() {
return Streamable.empty();
}
public MetadataReaderFactory getMetadataReaderFactory() {
return this.metadataReaderFactory;
}
}
}
| 2,208 |
595 | <reponame>erique/embeddedsw
/******************************************************************************
* Copyright (C) 2021 Xilinx, Inc. All rights reserved.
* SPDX-License-Identifier: MIT
******************************************************************************/
#ifndef XV_WARP_FILTER_L2_H /* prevent circular inclusions */
#define XV_WARP_FILTER_L2_H /* by using protection macros */
/***************************** Include Files *********************************/
#include "xv_warp_filter.h"
/************************** Constant Definitions *****************************/
/**************************** Type Definitions *******************************/
typedef struct {
u32 height;
u32 width;
u32 stride;
u32 format;
u32 valid_seg;
u32 lblock_count;
u32 line_num;
u32 reserved;
u64 src_buf_addr;
#if ((MAX_NR_PLANES == 2) || (MAX_NR_PLANES == 3) )
u64 src_buf_addr1;
#endif
#if (MAX_NR_PLANES == 3)
u64 src_buf_addr2;
#endif
u64 seg_table_addr;
u64 dest_buf_addr;
#if ((MAX_NR_PLANES == 2) || (MAX_NR_PLANES == 3) )
u64 dest_buf_addr1;
#endif
#if (MAX_NR_PLANES == 3)
u64 dest_buf_addr2;
#endif
#if (WRITE_INVALID == 1)
AXIMM_WRITE dest_buf_addr_i;
AXIMM_READ seg_table_addr_i;
#endif
u64 Warp_NextDescAddr;
} XVWarpFilter_Desc;
typedef struct {
u32 height;
u32 width;
u32 stride;
u32 format;
u64 src_buf_addr;
u64 seg_table_addr;
u64 dest_buf_addr;
} XVWarpFilter_InputConfigs;
/**************************** Function Prototypes *****************************/
void XVWarpFilter_InterruptEnable(XV_warp_filter *InstancePtr, u32 Mask);
void XVWarpFilter_InterruptDisable(XV_warp_filter *InstancePtr, u32 IrqMask);
void XVWarpFilter_SetCallback(XV_warp_filter *InstancePtr,
void *CallbackFunc, void *CallbackRef);
void *XVWarpFilter_IntrHandler(void *InstancePtr);
s32 XVWarpFilter_ProgramDescriptor(XV_warp_filter *InstancePtr, u32 DescNum,
XVWarpFilter_InputConfigs *configPtr, u32 valid_seg,
u32 lblock_count, u32 line_num);
void XVWarpFilter_ClearNumOfDescriptors(XV_warp_filter *InstancePtr);
s32 XVWarpFilter_SetNumOfDescriptors(XV_warp_filter *InstancePtr, u32 num_descriptors);
s32 XVWarpFilter_update_src_frame_addr(XV_warp_filter *InstancePtr,
u32 Descnum, u64 src_buf_addr);
s32 XVWarpFilter_update_dst_frame_addr(XV_warp_filter *InstancePtr,
u32 Descnum, u64 dest_buf_addr);
void XVWarpFilter_Start(XV_warp_filter *InstancePtr);
s32 XVWarpFilter_Stop(XV_warp_filter *InstancePtr);
#endif
| 900 |
1,133 | /**
*
* PixelFlow | Copyright (C) 2016 <NAME> - http://thomasdiewald.com
*
* A Processing/Java library for high performance GPU-Computing (GLSL).
* MIT License: https://opensource.org/licenses/MIT
*
*/
package SoftBody2D.SoftBody2D_GetStarted;
import java.util.ArrayList;
import com.thomasdiewald.pixelflow.java.DwPixelFlow;
import com.thomasdiewald.pixelflow.java.softbodydynamics.DwPhysics;
import com.thomasdiewald.pixelflow.java.softbodydynamics.constraint.DwSpringConstraint;
import com.thomasdiewald.pixelflow.java.softbodydynamics.constraint.DwSpringConstraint2D;
import com.thomasdiewald.pixelflow.java.softbodydynamics.particle.DwParticle2D;
import processing.core.*;
public class SoftBody2D_GetStarted extends PApplet {
//
// Getting started with verlet particles/softbody simulation.
//
// + Collision Detection
//
int viewport_w = 1280;
int viewport_h = 720;
int viewport_x = 230;
int viewport_y = 0;
// physics parameters
DwPhysics.Param param_physics = new DwPhysics.Param();
// physics simulation
DwPhysics<DwParticle2D> physics;
DwParticle2D[] particles = new DwParticle2D[15];
public void settings(){
size(viewport_w, viewport_h, P2D);
smooth(8);
}
public void setup() {
surface.setLocation(viewport_x, viewport_y);
// main library context
DwPixelFlow context = new DwPixelFlow(this);
context.print();
// context.printGL();
// physics object
physics = new DwPhysics<DwParticle2D>(param_physics);
// global physics parameters
param_physics.GRAVITY = new float[]{ 0, 0.5f };
param_physics.bounds = new float[]{ 0, 0, width, height };
param_physics.iterations_collisions = 4;
param_physics.iterations_springs = 4;
// particle parameters
DwParticle2D.Param param_particle = new DwParticle2D.Param();
param_particle.DAMP_BOUNDS = 0.50f;
param_particle.DAMP_COLLISION = 0.9990f;
param_particle.DAMP_VELOCITY = 0.9999991f;
// spring parameters
DwSpringConstraint.Param param_spring = new DwSpringConstraint.Param();
param_spring.damp_dec = 0.899999f;
param_spring.damp_inc = 0.000099999f;
// create particles + chain them together
for(int i = 0; i < particles.length; i++){
float radius = 10;
float px = width/2;
float py = 100 + i * radius * 3;
particles[i] = new DwParticle2D(i, px, py, radius, param_particle);
if(i > 0) DwSpringConstraint2D.addSpring(physics, particles[i-1], particles[i], param_spring);
}
// add all particles to the physics simulation
physics.setParticles(particles, particles.length);
frameRate(60);
}
public void draw() {
updateMouseInteractions();
// update physics simulation
physics.update(1);
// render
background(255);
// render springs: access the springs and use the current force for the line-color
noFill();
strokeWeight(1);
beginShape(LINES);
ArrayList<DwSpringConstraint> springs = physics.getSprings();
for(DwSpringConstraint spring : springs){
if(spring.enabled){
DwParticle2D pa = particles[spring.idxPa()];
DwParticle2D pb = particles[spring.idxPb()];
float force = Math.abs(spring.force);
float r = force*5000f;
float g = r/10;
float b = 0;
stroke(r,g,b);
vertex(pa.cx, pa.cy);
vertex(pb.cx, pb.cy);
}
}
endShape();
// render particles
noStroke();
fill(0);
for(int i = 0; i < particles.length; i++){
DwParticle2D particle = particles[i];
ellipse(particle.cx, particle.cy, particle.rad*2, particle.rad*2);
}
// stats, to the title window
String txt_fps = String.format(getClass().getName()+ " [particles %d] [frame %d] [fps %6.2f]", particles.length,frameCount, frameRate);
surface.setTitle(txt_fps);
}
//////////////////////////////////////////////////////////////////////////////
// User Interaction
//////////////////////////////////////////////////////////////////////////////
DwParticle2D particle_mouse = null;
public DwParticle2D findNearestParticle(float mx, float my, float search_radius){
float dd_min_sq = search_radius * search_radius;
DwParticle2D particle = null;
for(int i = 0; i < particles.length; i++){
float dx = mx - particles[i].cx;
float dy = my - particles[i].cy;
float dd_sq = dx*dx + dy*dy;
if( dd_sq < dd_min_sq){
dd_min_sq = dd_sq;
particle = particles[i];
}
}
return particle;
}
public void updateMouseInteractions(){
if(particle_mouse != null){
float[] mouse = {mouseX, mouseY};
particle_mouse.moveTo(mouse, 0.2f);
}
}
public void mousePressed(){
particle_mouse = findNearestParticle(mouseX, mouseY, 100);
if(particle_mouse != null){
particle_mouse.enable(false, false, false);
}
}
public void mouseReleased(){
if(particle_mouse != null){
if(mouseButton == LEFT ) particle_mouse.enable(true, true, true );
if(mouseButton == CENTER) particle_mouse.enable(true, false, false);
particle_mouse = null;
}
}
public static void main(String args[]) {
PApplet.main(new String[] { SoftBody2D_GetStarted.class.getName() });
}
} | 2,158 |
1,729 | <filename>source/3rdparty/easy_profiler/include/easy/arbitrary_value.h
/**
Lightweight profiler library for c++
Copyright(C) 2016-2019 <NAME>, <NAME>
Licensed under either of
* MIT license (LICENSE.MIT or http://opensource.org/licenses/MIT)
* Apache License, Version 2.0, (LICENSE.APACHE or http://www.apache.org/licenses/LICENSE-2.0)
at your option.
The MIT License
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished
to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
USE OR OTHER DEALINGS IN THE SOFTWARE.
The Apache License, Version 2.0 (the "License");
You may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
**/
#ifndef EASY_PROFILER_ARBITRARY_VALUE_H
#define EASY_PROFILER_ARBITRARY_VALUE_H
#include <easy/details/arbitrary_value_public_types.h>
//
// USING_EASY_PROFILER is defined in details/profiler_in_use.h
// if defined BUILD_WITH_EASY_PROFILER and not defined DISABLE_EASY_PROFILER
//
//
// BUILD_WITH_EASY_PROFILER is defined in CMakeLists.txt if your project is linked to easy_profiler.
//
//
// DISABLE_EASY_PROFILER may be defined manually in source-file before #include <easy/profiler.h>
// to disable profiler for certain source-file or project.
//
#ifdef USING_EASY_PROFILER
/** Macro used to create a unique Value Identification Number.
Use this if you want to change the same value from different places in your code.
Otherwise do not mind.
\code
struct A {
int someCount;
};
void foo(const A& a) {
EASY_VALUE("foo count", a.someCount); // Value ID is automatically set to (uint64_t)&a.someCount
// This notation is completely the same as EASY_VALUE("foo count", a.someCount, EASY_VIN(a.someCount));
}
void bar(const A& b) {
EASY_VALUE("bar count", b.someCount); // Same ID as for "foo count" if &b == &a (and different ID otherwise)
}
void baz(const A& c) {
EASY_VALUE("baz count", c.someCount, EASY_VIN(EASY_FUNC_NAME)); // Different ID from "foo count" and "bar count"
EASY_VALUE("qux count", 100500, EASY_VIN(EASY_FUNC_NAME)); // Same ID as for "baz count"
}
\endcode
\ingroup profiler
*/
# define EASY_VIN(member) ::profiler::ValueId(member)
/** Macro used to identify global value which would be recognized by it's name in GUI.
\code
struct A {
int someCount;
};
struct B {
int someOtherCount;
};
void foo(const A& a) {
EASY_VALUE("Count", a.someCount, EASY_GLOBAL_VIN);
}
void bar(const B& b) {
EASY_VALUE("Count", b.someOtherCount, EASY_GLOBAL_VIN); // Same ID as for foo::"Count" despite of &b != &a
}
\endcode
\ingroup profiler
*/
# define EASY_GLOBAL_VIN ::profiler::ValueId()
/** Macro used to identify unique value.
\code
struct A {
int someCount;
};
void foo(const A& a) {
// All these values would have different IDs despite of names, pointers and values are the same
EASY_VALUE("foo count", a.someCount, EASY_UNIQUE_VIN);
EASY_VALUE("foo count", a.someCount, EASY_UNIQUE_VIN);
EASY_VALUE("foo count", a.someCount, EASY_UNIQUE_VIN);
}
\endcode
\ingroup profiler
*/
# define EASY_UNIQUE_VIN ::profiler::ValueId(EASY_UNIQUE_DESC(__LINE__))
/** Macro used to store single arbitrary value.
\note Also stores a time-stamp of it's occurrence like an Event.
\note To store a dynamic array (which size is unknown at compile time), please, use EASY_ARRAY macro.
\note Currently arbitrary values support only compile-time names.
\sa EASY_ARRAY, EASY_TEXT, EASY_STRING
\ingroup profiler
*/
# define EASY_VALUE(name, value, ...)\
EASY_LOCAL_STATIC_PTR(const ::profiler::BaseBlockDescriptor*, EASY_UNIQUE_DESC(__LINE__), ::profiler::registerDescription(\
::profiler::extract_enable_flag(__VA_ARGS__), EASY_UNIQUE_LINE_ID, EASY_COMPILETIME_NAME(name),\
__FILE__, __LINE__, ::profiler::BlockType::Value, ::profiler::extract_color(__VA_ARGS__), false));\
::profiler::setValue(EASY_UNIQUE_DESC(__LINE__), value, ::profiler::extract_value_id(value, ## __VA_ARGS__));
/** Macro used to store an array of arbitrary values.
\note Also stores a time-stamp of it's occurrence like an Event.
\note Currently arbitrary values support only compile-time names.
\warning Max data size (sizeof(value) * size) is MAX_BLOCK_DATA_SIZE. Passing bigger size has undefined behavior.
\sa EASY_VALUE, EASY_TEXT, EASY_STRING, MAX_BLOCK_DATA_SIZE
\ingroup profiler
*/
# define EASY_ARRAY(name, value, size, ...)\
EASY_LOCAL_STATIC_PTR(const ::profiler::BaseBlockDescriptor*, EASY_UNIQUE_DESC(__LINE__), ::profiler::registerDescription(\
::profiler::extract_enable_flag(__VA_ARGS__), EASY_UNIQUE_LINE_ID, EASY_COMPILETIME_NAME(name),\
__FILE__, __LINE__, ::profiler::BlockType::Value, ::profiler::extract_color(__VA_ARGS__), false));\
::profiler::setValue(EASY_UNIQUE_DESC(__LINE__), value, ::profiler::extract_value_id(value, ## __VA_ARGS__), size);
/** Macro used to store custom text.
Could be C-string or std::string.
\note Also stores a time-stamp of it's occurrence like an Event.
\note Currently arbitrary values support only compile-time names.
\warning Max string length is MAX_BLOCK_DATA_SIZE (including trailing '\0'). Passing bigger size has undefined behavior.
\sa EASY_VALUE, EASY_ARRAY, EASY_STRING, MAX_BLOCK_DATA_SIZE
\ingroup profiler
*/
# define EASY_TEXT(name, text, ...)\
EASY_LOCAL_STATIC_PTR(const ::profiler::BaseBlockDescriptor*, EASY_UNIQUE_DESC(__LINE__), ::profiler::registerDescription(\
::profiler::extract_enable_flag(__VA_ARGS__), EASY_UNIQUE_LINE_ID, EASY_COMPILETIME_NAME(name),\
__FILE__, __LINE__, ::profiler::BlockType::Value, ::profiler::extract_color(__VA_ARGS__), false));\
::profiler::setText(EASY_UNIQUE_DESC(__LINE__), text, ::profiler::extract_value_id(text , ## __VA_ARGS__));
/** Macro used to store custom text of specified length.
Same as EASY_TEXT, but with explicitly specified length.
Use this for C-strings of known length (compile-time or run-time).
\note Recommendation (not a requirement): Take into account a zero-terminator '\0' symbol (e.g. strlen("BlaBla") + 1).
\note Also stores a time-stamp of it's occurrence like an Event.
\note Currently arbitrary values support only compile-time names.
\warning Max string length is MAX_BLOCK_DATA_SIZE (including trailing '\0'). Passing bigger size has undefined behavior.
\sa EASY_VALUE, EASY_ARRAY, EASY_TEXT, MAX_BLOCK_DATA_SIZE
\ingroup profiler
*/
# define EASY_STRING(name, text, size, ...)\
EASY_LOCAL_STATIC_PTR(const ::profiler::BaseBlockDescriptor*, EASY_UNIQUE_DESC(__LINE__), ::profiler::registerDescription(\
::profiler::extract_enable_flag(__VA_ARGS__), EASY_UNIQUE_LINE_ID, EASY_COMPILETIME_NAME(name),\
__FILE__, __LINE__, ::profiler::BlockType::Value, ::profiler::extract_color(__VA_ARGS__), false));\
::profiler::setText(EASY_UNIQUE_DESC(__LINE__), text, ::profiler::extract_value_id(text, ## __VA_ARGS__), size);
namespace profiler
{
extern "C" PROFILER_API void storeValue(const BaseBlockDescriptor* _desc, DataType _type, const void* _data,
uint16_t _size, bool _isArray, ValueId _vin);
template <class T>
inline void setValue(const BaseBlockDescriptor* _desc, T _value, ValueId _vin)
{
static_assert(!::std::is_pointer<T>::value,
"You should not pass pointers into EASY_VALUE. Pass a reference instead.");
using Type = typename ::std::remove_reference<typename ::std::remove_cv<T>::type>::type;
static_assert(StdToDataType<Type>::data_type != DataType::TypesCount,
"You should use standard builtin scalar types as profiler::Value type!");
storeValue(_desc, StdToDataType<Type>::data_type, &_value, static_cast<uint16_t>(sizeof(Type)), false, _vin);
}
///< WARNING: Passing _arraySize > (MAX_BLOCK_DATA_SIZE / sizeof(T)) may cause undefined behavior!
template <class T>
inline void setValue(const BaseBlockDescriptor* _desc, const T* _valueArray, ValueId _vin, uint16_t _arraySize)
{
static_assert(StdToDataType<T>::data_type != DataType::TypesCount,
"You should use standard builtin scalar types as profiler::Value type!");
storeValue(_desc, StdToDataType<T>::data_type, _valueArray, static_cast<uint16_t>(sizeof(T) * _arraySize), true, _vin);
}
template <class T, size_t N>
inline void setValue(const BaseBlockDescriptor* _desc, const T (&_value)[N], ValueId _vin)
{
static_assert(StdToDataType<T>::data_type != DataType::TypesCount,
"You should use standard builtin scalar types as profiler::Value type!");
static_assert(sizeof(_value) <= MAX_BLOCK_DATA_SIZE, "Maximum arbitrary values data size exceeded.");
storeValue(_desc, StdToDataType<T>::data_type, _value, static_cast<uint16_t>(sizeof(_value)), true, _vin);
}
///< WARNING: Passing _textLength > MAX_BLOCK_DATA_SIZE may cause undefined behavior!
inline void setText(const BaseBlockDescriptor* _desc, const char* _text, ValueId _vin, uint16_t _textLength)
{
storeValue(_desc, DataType::String, _text, _textLength, true, _vin);
}
///< WARNING: Passing _text with length > MAX_BLOCK_DATA_SIZE may cause undefined behavior!
inline void setText(const BaseBlockDescriptor* _desc, const char* _text, ValueId _vin)
{
storeValue(_desc, DataType::String, _text, static_cast<uint16_t>(strlen(_text) + 1), true, _vin);
}
///< WARNING: Passing _text with length > MAX_BLOCK_DATA_SIZE may cause undefined behavior!
inline void setText(const BaseBlockDescriptor* _desc, const ::std::string& _text, ValueId _vin)
{
storeValue(_desc, DataType::String, _text.c_str(), static_cast<uint16_t>(_text.size() + 1), true, _vin);
}
template <size_t N>
inline void setText(const BaseBlockDescriptor* _desc, const char (&_text)[N], ValueId _vin)
{
static_assert(N <= MAX_BLOCK_DATA_SIZE, "Maximum arbitrary values data size exceeded.");
storeValue(_desc, DataType::String, &_text[0], static_cast<uint16_t>(N), true, _vin);
}
} // end of namespace profiler.
#else
# if defined(__clang__)
# pragma clang diagnostic push
# pragma clang diagnostic ignored "-Wgnu-zero-variadic-macro-arguments"
# endif
# define EASY_GLOBAL_VIN
# define EASY_UNIQUE_VIN
# define EASY_VIN(member)
# define EASY_VALUE(...)
# define EASY_ARRAY(...)
# define EASY_TEXT(...)
# define EASY_STRING(...)
namespace profiler
{
inline void storeValue(const BaseBlockDescriptor*, DataType, const void*, uint16_t, bool, ValueId) {}
template <class T>
inline void setValue(const BaseBlockDescriptor*, T, ValueId) {}
template <class T>
inline void setValue(const BaseBlockDescriptor*, const T*, ValueId, uint16_t) {}
template <class T, size_t N>
inline void setValue(const BaseBlockDescriptor*, const T (&)[N], ValueId) {}
inline void setText(const BaseBlockDescriptor*, const char*, ValueId, uint16_t) {}
inline void setText(const BaseBlockDescriptor*, const char*, ValueId) {}
inline void setText(const BaseBlockDescriptor*, const ::std::string&, ValueId) {}
template <size_t N>
inline void setText(const BaseBlockDescriptor*, const char (&)[N], ValueId) {}
} // end of namespace profiler.
# if defined(__clang__)
# pragma clang diagnostic pop
# endif
#endif // USING_EASY_PROFILER
#endif // EASY_PROFILER_ARBITRARY_VALUE_H
| 4,620 |
892 | {
"schema_version": "1.2.0",
"id": "GHSA-g6ph-6cq5-3xgm",
"modified": "2022-05-01T23:52:57Z",
"published": "2022-05-01T23:52:57Z",
"aliases": [
"CVE-2008-2752"
],
"details": "Microsoft Word 2000 9.0.2812 and 2003 11.8106.8172 does not properly handle unordered lists, which allows user-assisted remote attackers to cause a denial of service (memory corruption and application crash) or possibly execute arbitrary code via a crafted .doc file. NOTE: some of these details are obtained from third party information.",
"severity": [
],
"affected": [
],
"references": [
{
"type": "ADVISORY",
"url": "https://nvd.nist.gov/vuln/detail/CVE-2008-2752"
},
{
"type": "WEB",
"url": "https://exchange.xforce.ibmcloud.com/vulnerabilities/43155"
},
{
"type": "WEB",
"url": "http://www.nullcode.com.ar/ncs/crash/video.htm"
},
{
"type": "WEB",
"url": "http://www.nullcode.com.ar/ncs/crash/video2.htm"
},
{
"type": "WEB",
"url": "http://www.securityfocus.com/bid/29769"
},
{
"type": "WEB",
"url": "http://www.securityfocus.com/data/vulnerabilities/exploits/crash-word-1.doc"
},
{
"type": "WEB",
"url": "http://www.securityfocus.com/data/vulnerabilities/exploits/crash-word-2.doc"
},
{
"type": "WEB",
"url": "http://www.securityfocus.com/data/vulnerabilities/exploits/crash-word-3.doc"
},
{
"type": "WEB",
"url": "http://www.securityfocus.com/data/vulnerabilities/exploits/crash-word-4.doc"
}
],
"database_specific": {
"cwe_ids": [
],
"severity": "HIGH",
"github_reviewed": false
}
} | 769 |
355 | <filename>core/src/main/java/net/tomp2p/p2p/PostRoutingFilter.java
package net.tomp2p.p2p;
import net.tomp2p.peers.PeerAddress;
/**
* Filters potential and direct hits from the result set.
*
* @author <NAME>
*
*/
public interface PostRoutingFilter {
/**
* @return <code>true</code> to reject / ignore a <strong>potential</strong> hit, otherwise
* <code>false</code>
*/
boolean rejectPotentialHit(PeerAddress peerAddress);
/**
* @return <code>true</code> to reject / ignore a <strong>direct</strong> hit, otherwise
* <code>false</code>
*/
boolean rejectDirectHit(PeerAddress peerAddress);
}
| 231 |
1,483 | /*
* Copyright Lealone Database Group.
* Licensed under the Server Side Public License, v 1.
* Initial Developer: zhh
*/
package org.lealone.sql;
import org.lealone.db.async.Future;
import org.lealone.db.result.Result;
import org.lealone.server.protocol.dt.DTransactionParameters;
public interface DistributedSQLCommand extends SQLCommand {
Future<Result> executeDistributedQuery(int maxRows, boolean scrollable, DTransactionParameters parameters);
Future<Integer> executeDistributedUpdate(DTransactionParameters parameters);
}
| 152 |
340 | //==================================================================================================
/**
EVE - Expressive Vector Engine
Copyright : EVE Contributors & Maintainers
SPDX-License-Identifier: MIT
**/
//==================================================================================================
#include "unit/algo/algo_test.hpp"
#include "unit/algo/iterator_concept_test.hpp"
#include <eve/views/reverse.hpp>
#include <eve/algo/ptr_iterator.hpp>
#include <array>
#include <numeric>
EVE_TEST_TYPES("Check reverse_iterator", algo_test::selected_types)
<typename T>(eve::as<T>)
{
alignas(sizeof(T)) std::array<eve::element_type_t<T>, T::size()> data;
std::iota(data.rbegin(), data.rend(), 0);
T values([](int i, int) { return i; });
auto replace = [&](auto v, auto ignore) { return eve::replace_ignored(v, ignore, T{0}); };
auto run_test_one_pair = [&](auto f, auto l) {
auto rev = eve::views::reverse(eve::algo::as_range(f, l));
algo_test::iterator_sentinel_test(rev.begin(), rev.end(), values, replace);
};
auto run_test = [&] <typename U>(U* f, U* l) {
using N = eve::fixed<T::size()>;
using aligned_p = eve::aligned_ptr<U, N>;
using u_it = eve::algo::ptr_iterator<U*, N>;
using a_it = eve::algo::ptr_iterator<aligned_p, N>;
u_it u_f(f);
u_it u_l(l);
a_it a_f(aligned_p{f});
a_it a_l(aligned_p{l});
run_test_one_pair(u_f, u_l);
run_test_one_pair(u_f, a_l);
run_test_one_pair(a_f, a_l);
run_test_one_pair(a_f, u_l);
if constexpr (!std::is_const_v<U>)
{
algo_test::writeable_readable_iterator(eve::views::reverse(a_l), values, replace);
algo_test::writeable_readable_iterator(eve::views::reverse(u_l), values, replace);
}
};
run_test(data.begin(), data.end());
run_test(data.cbegin(), data.cend());
};
TTS_CASE("reverse iterators const/non-const")
{
using from = eve::views::reverse_iterator<int*>;
using to = eve::views::reverse_iterator<int const*>;
TTS_CONSTEXPR_EXPECT( (std::convertible_to<from, to>) );
};
| 800 |
350 | <filename>Fetch data of any country/Source_code.py
'''
Fetch data of any Country with Python using Countryinfo
Author: <NAME>
'''
#import the necessary module!
from countryinfo import CountryInfo
name = 'India'
country = CountryInfo(name)
data1 = country.alt_spellings()
print(data1)
data2 = country.capital()
print(data2)
data3 = country.currencies()
print(data3)
data4 = country.languages()
print(data4)
data5 = country.timezones()
print(data5)
data6 = country.area()
print(data6)
data7 = country.borders()
print(data7)
data8 = country.calling_codes()
print(data8)
data9 = country.wiki()
print(data9)
data10 = country.info()
for x,y in data10.items():
print(f'{x} --> {y}')
| 289 |
407 | <filename>paas/tesla-authproxy/tesla-authproxy-server/src/main/java/com/alibaba/tesla/authproxy/util/AuthUtil.java
package com.alibaba.tesla.authproxy.util;
import com.alibaba.tesla.authproxy.model.mapper.AppExtMapper;
import com.alibaba.tesla.authproxy.model.mapper.UserMapper;
import com.alibaba.tesla.authproxy.model.AppExtDO;
import com.alibaba.tesla.authproxy.model.UserDO;
import com.alibaba.tesla.authproxy.service.TeslaUserService;
import lombok.extern.slf4j.Slf4j;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Component;
import javax.servlet.http.HttpServletRequest;
import java.io.UnsupportedEncodingException;
import java.security.MessageDigest;
import java.security.NoSuchAlgorithmException;
import java.text.DateFormat;
import java.text.SimpleDateFormat;
import java.util.Calendar;
import java.util.Date;
/**
* 验证相关工具类
*
* @author <EMAIL>
*/
@Component
@Slf4j
public class AuthUtil {
@Autowired
private AppExtMapper appExtMapper;
@Autowired
private UserMapper userMapper;
@Autowired
private TeslaUserService teslaUserService;
private static String getPasswdHash(String username, String password)
throws UnsupportedEncodingException, NoSuchAlgorithmException {
DateFormat df = new SimpleDateFormat("yyyyMMdd");
Date today = Calendar.getInstance().getTime();
String todayStr = df.format(today);
String rawHash = String.format("%s%s%s", username, todayStr, password);
byte[] bytesOfMessage = rawHash.getBytes("UTF-8");
MessageDigest md = MessageDigest.getInstance("MD5");
byte[] theDigest = md.digest(bytesOfMessage);
return convertHashBytes2Str(theDigest);
}
private static String getNextPasswdHash(String username, String password)
throws UnsupportedEncodingException, NoSuchAlgorithmException {
DateFormat df = new SimpleDateFormat("yyyyMMdd");
Calendar calendar = Calendar.getInstance();
calendar.add(Calendar.DATE, 1);
String todayStr = df.format(calendar.getTime());
String rawHash = String.format("%s%s%s", username, todayStr, password);
byte[] bytesOfMessage = rawHash.getBytes("UTF-8");
MessageDigest md = MessageDigest.getInstance("MD5");
byte[] theDigest = md.digest(bytesOfMessage);
return convertHashBytes2Str(theDigest);
}
private static String getPrePasswdHash(String username, String password)
throws UnsupportedEncodingException, NoSuchAlgorithmException {
DateFormat df = new SimpleDateFormat("yyyyMMdd");
Calendar calendar = Calendar.getInstance();
calendar.add(Calendar.DATE, -1);
String todayStr = df.format(calendar.getTime());
String rawHash = String.format("%s%s%s", username, todayStr, password);
byte[] bytesOfMessage = rawHash.getBytes("UTF-8");
MessageDigest md = MessageDigest.getInstance("MD5");
byte[] theDigest = md.digest(bytesOfMessage);
return convertHashBytes2Str(theDigest);
}
private static String convertHashBytes2Str(byte[] bytes) {
char hexDigits[] = {'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f'};
char str[] = new char[16 * 2];
int k = 0;
for (int i = 0; i < 16; i++) {
byte byte0 = bytes[i];
str[k++] = hexDigits[byte0 >>> 4 & 0xf];
str[k++] = hexDigits[byte0 & 0xf];
}
return new String(str);
}
/**
* 检查是否携带第三方 APP Header,如果携带则检查合法性
*
* @param request HTTP 请求
* @return 合法则返回 true
*/
public UserDO getExtAppUser(HttpServletRequest request) {
String authApp = request.getHeader("x-auth-app");
String authKey = request.getHeader("x-auth-key");
String authUser = request.getHeader("x-auth-user");
String authPassword = request.getHeader("x-auth-passwd");
// fakePassword 为了兼容曾经的老版本的错误 Header 头名称
String authFakePassword = request.getHeader("x-auth-password");
if (!StringUtil.isEmpty(authFakePassword)) {
authPassword = <PASSWORD>;
}
return getExtAppUser(authApp, authKey, authUser, authPassword);
}
/**
* 检查第三方应用 Header 是否合法
*
* @param authApp 第三方应用名
* @param authKey 应用 Key
* @param authUser 关联用户
* @param authPassword 关联用户 Secret Key
* @return 如果合法则返回 true
*/
public UserDO getExtAppUser(String authApp, String authKey, String authUser, String authPassword) {
if (StringUtil.isEmpty(authApp) ||
StringUtil.isEmpty(authKey) ||
StringUtil.isEmpty(authUser) ||
StringUtil.isEmpty(authPassword)) {
return null;
}
String commonLog = String.format("app=%s, key=%s, user=%s, password=%s",
authApp, authKey, authUser, authPassword);
// 校验 authApp 和 authKey 两项
AppExtDO appExt = appExtMapper.getByName(authApp);
if (null == appExt) {
log.info("No ext app found. {}", commonLog);
return null;
}
if (!authKey.equals(appExt.getExtAppKey())) {
log.info("Invalid ext app key. {}", commonLog);
return null;
}
// 校验 authUser 和 authPassword 两项
UserDO userDo = teslaUserService.getUserByLoginName(authUser);
if (null == userDo) {
log.info("Invalid x-auth-user provided, cannot find it. {}", commonLog);
return null;
}
String secretKey = userDo.getSecretKey();
if (null == secretKey || secretKey.length() == 0) {
log.info("Invalid x-auth-user provided, no secret key set. {}", commonLog);
return null;
}
String compareKey;
try {
compareKey = getPasswdHash(authUser, secretKey);
if (!compareKey.equals(authPassword)
&& !getNextPasswdHash(authUser, secretKey).equals(authPassword)
&& !getPrePasswdHash(authUser, secretKey).equals(authPassword)) {
log.info("Invalid x-auth-user provided, password check failed. {}", commonLog);
return null;
}
} catch (UnsupportedEncodingException | NoSuchAlgorithmException e) {
log.warn("Get password hash failed. {}, message={}", commonLog, e.getMessage());
return null;
}
return userDo;
}
}
| 2,887 |
372 | <gh_stars>100-1000
/* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil -*- */
/*
* clients/klist/klist.c
*
* Copyright 1990 by the Massachusetts Institute of Technology.
* All Rights Reserved.
*
* Export of this software from the United States of America may
* require a specific license from the United States Government.
* It is the responsibility of any person or organization contemplating
* export to obtain such a license before exporting.
*
* WITHIN THAT CONSTRAINT, permission to use, copy, modify, and
* distribute this software and its documentation for any purpose and
* without fee is hereby granted, provided that the above copyright
* notice appear in all copies and that both that copyright notice and
* this permission notice appear in supporting documentation, and that
* the name of M.I.T. not be used in advertising or publicity pertaining
* to distribution of the software without specific, written prior
* permission. Furthermore if you modify this software you must label
* your software as modified software and not distribute it in such a
* fashion that it might be confused with the original M.I.T. software.
* M.I.T. makes no representations about the suitability of
* this software for any purpose. It is provided "as is" without express
* or implied warranty.
*
*
* List out the contents of your credential cache or keytab.
*/
#include "autoconf.h"
#include <krb5.h>
#include <com_err.h>
#include <stdlib.h>
#ifdef HAVE_UNISTD_H
#include <unistd.h>
#endif
#include <string.h>
#include <stdio.h>
#include <time.h>
/* Need definition of INET6 before network headers, for IRIX. */
#if defined(HAVE_ARPA_INET_H)
#include <arpa/inet.h>
#endif
#ifndef _WIN32
#define GET_PROGNAME(x) (strrchr((x), '/') ? strrchr((x), '/')+1 : (x))
#else
#define GET_PROGNAME(x) max(max(strrchr((x), '/'), strrchr((x), '\\')) + 1,(x))
#endif
#ifndef _WIN32
#include <sys/socket.h>
#include <netdb.h>
#endif
extern int optind;
int show_flags = 0, show_time = 0, status_only = 0, show_keys = 0;
int show_etype = 0, show_addresses = 0, no_resolve = 0, print_version = 0;
int show_adtype = 0;
char *defname;
char *progname;
krb5_int32 now;
unsigned int timestamp_width;
krb5_context kcontext;
char * etype_string (krb5_enctype );
void show_credential (krb5_creds *);
void do_ccache (char *);
void do_keytab (char *);
void printtime (time_t);
void one_addr (krb5_address *);
void fillit (FILE *, unsigned int, int);
#define DEFAULT 0
#define CCACHE 1
#define KEYTAB 2
static void usage()
{
#define KRB_AVAIL_STRING(x) ((x)?"available":"not available")
fprintf(stderr, "Usage: %s [-e] [-V] [[-c] [-d] [-f] [-s] [-a [-n]]] %s",
progname, "[-k [-t] [-K]] [name]\n");
fprintf(stderr, "\t-c specifies credentials cache\n");
fprintf(stderr, "\t-k specifies keytab\n");
fprintf(stderr, "\t (Default is credentials cache)\n");
fprintf(stderr, "\t-e shows the encryption type\n");
fprintf(stderr, "\t-V shows the Kerberos version and exits\n");
fprintf(stderr, "\toptions for credential caches:\n");
fprintf(stderr, "\t\t-d shows the submitted authorization data types\n");
fprintf(stderr, "\t\t-f shows credentials flags\n");
fprintf(stderr, "\t\t-s sets exit status based on valid tgt existence\n");
fprintf(stderr, "\t\t-a displays the address list\n");
fprintf(stderr, "\t\t\t-n do not reverse-resolve\n");
fprintf(stderr, "\toptions for keytabs:\n");
fprintf(stderr, "\t\t-t shows keytab entry timestamps\n");
fprintf(stderr, "\t\t-K shows keytab entry DES keys\n");
exit(1);
}
int
main(argc, argv)
int argc;
char **argv;
{
int c;
char *name;
int mode;
progname = GET_PROGNAME(argv[0]);
name = NULL;
mode = DEFAULT;
/* V=version so v can be used for verbose later if desired. */
while ((c = getopt(argc, argv, "dfetKsnack45V")) != -1) {
switch (c) {
case 'd':
show_adtype = 1;
break;
case 'f':
show_flags = 1;
break;
case 'e':
show_etype = 1;
break;
case 't':
show_time = 1;
break;
case 'K':
show_keys = 1;
break;
case 's':
status_only = 1;
break;
case 'n':
no_resolve = 1;
break;
case 'a':
show_addresses = 1;
break;
case 'c':
if (mode != DEFAULT) usage();
mode = CCACHE;
break;
case 'k':
if (mode != DEFAULT) usage();
mode = KEYTAB;
break;
case '4':
fprintf(stderr, "Kerberos 4 is no longer supported\n");
exit(3);
break;
case '5':
break;
case 'V':
print_version = 1;
break;
default:
usage();
break;
}
}
if (no_resolve && !show_addresses) {
usage();
}
if (mode == DEFAULT || mode == CCACHE) {
if (show_time || show_keys)
usage();
} else {
if (show_flags || status_only || show_addresses)
usage();
}
if (argc - optind > 1) {
fprintf(stderr, "Extra arguments (starting with \"%s\").\n",
argv[optind+1]);
usage();
}
if (print_version) {
#ifdef _WIN32 /* No access to autoconf vars; fix somehow. */
printf("Kerberos for Windows\n");
#else
printf("%s version %s\n", PACKAGE_NAME, PACKAGE_VERSION);
#endif
exit(0);
}
name = (optind == argc-1) ? argv[optind] : 0;
now = time(0);
{
char tmp[BUFSIZ];
if (!krb5_timestamp_to_sfstring(now, tmp, 20, (char *) NULL) ||
!krb5_timestamp_to_sfstring(now, tmp, sizeof(tmp),
(char *) NULL))
timestamp_width = (int) strlen(tmp);
else
timestamp_width = 15;
}
{
krb5_error_code retval;
retval = krb5_init_context(&kcontext);
if (retval) {
com_err(progname, retval, "while initializing krb5");
exit(1);
}
if (mode == DEFAULT || mode == CCACHE)
do_ccache(name);
else
do_keytab(name);
}
return 0;
}
void do_keytab(name)
char *name;
{
krb5_keytab kt;
krb5_keytab_entry entry;
krb5_kt_cursor cursor;
char buf[BUFSIZ]; /* hopefully large enough for any type */
char *pname;
int code;
if (name == NULL) {
if ((code = krb5_kt_default(kcontext, &kt))) {
com_err(progname, code, "while getting default keytab");
exit(1);
}
} else {
if ((code = krb5_kt_resolve(kcontext, name, &kt))) {
com_err(progname, code, "while resolving keytab %s",
name);
exit(1);
}
}
if ((code = krb5_kt_get_name(kcontext, kt, buf, BUFSIZ))) {
com_err(progname, code, "while getting keytab name");
exit(1);
}
printf("Keytab name: %s\n", buf);
if ((code = krb5_kt_start_seq_get(kcontext, kt, &cursor))) {
com_err(progname, code, "while starting keytab scan");
exit(1);
}
if (show_time) {
printf("KVNO Timestamp");
fillit(stdout, timestamp_width - sizeof("Timestamp") + 2, (int) ' ');
printf("Principal\n");
printf("---- ");
fillit(stdout, timestamp_width, (int) '-');
printf(" ");
fillit(stdout, 78 - timestamp_width - sizeof("KVNO"), (int) '-');
printf("\n");
} else {
printf("KVNO Principal\n");
printf("---- --------------------------------------------------------------------------\n");
}
while ((code = krb5_kt_next_entry(kcontext, kt, &entry, &cursor)) == 0) {
if ((code = krb5_unparse_name(kcontext, entry.principal, &pname))) {
com_err(progname, code, "while unparsing principal name");
exit(1);
}
printf("%4d ", entry.vno);
if (show_time) {
printtime(entry.timestamp);
printf(" ");
}
printf("%s", pname);
if (show_etype)
printf(" (%s) " , etype_string(entry.key.enctype));
if (show_keys) {
printf(" (0x");
{
unsigned int i;
for (i = 0; i < entry.key.length; i++)
printf("%02x", entry.key.contents[i]);
}
printf(")");
}
printf("\n");
krb5_free_unparsed_name(kcontext, pname);
}
if (code && code != KRB5_KT_END) {
com_err(progname, code, "while scanning keytab");
exit(1);
}
if ((code = krb5_kt_end_seq_get(kcontext, kt, &cursor))) {
com_err(progname, code, "while ending keytab scan");
exit(1);
}
exit(0);
}
void do_ccache(name)
char *name;
{
krb5_ccache cache = NULL;
krb5_cc_cursor cur;
krb5_creds creds;
krb5_principal princ;
krb5_flags flags;
krb5_error_code code;
int exit_status = 0;
if (status_only)
/* exit_status is set back to 0 if a valid tgt is found */
exit_status = 1;
if (name == NULL) {
if ((code = krb5_cc_default(kcontext, &cache))) {
if (!status_only)
com_err(progname, code, "while getting default ccache");
exit(1);
}
} else {
if ((code = krb5_cc_resolve(kcontext, name, &cache))) {
if (!status_only)
com_err(progname, code, "while resolving ccache %s",
name);
exit(1);
}
}
flags = 0; /* turns off OPENCLOSE mode */
if ((code = krb5_cc_set_flags(kcontext, cache, flags))) {
if (code == KRB5_FCC_NOFILE) {
if (!status_only) {
com_err(progname, code, "(ticket cache %s:%s)",
krb5_cc_get_type(kcontext, cache),
krb5_cc_get_name(kcontext, cache));
#ifdef KRB5_KRB4_COMPAT
if (name == NULL)
do_v4_ccache(0);
#endif
}
} else {
if (!status_only)
com_err(progname, code,
"while setting cache flags (ticket cache %s:%s)",
krb5_cc_get_type(kcontext, cache),
krb5_cc_get_name(kcontext, cache));
}
exit(1);
}
if ((code = krb5_cc_get_principal(kcontext, cache, &princ))) {
if (!status_only)
com_err(progname, code, "while retrieving principal name");
exit(1);
}
if ((code = krb5_unparse_name(kcontext, princ, &defname))) {
if (!status_only)
com_err(progname, code, "while unparsing principal name");
exit(1);
}
if (!status_only) {
printf("Ticket cache: %s:%s\nDefault principal: %s\n\n",
krb5_cc_get_type(kcontext, cache),
krb5_cc_get_name(kcontext, cache), defname);
fputs("Valid starting", stdout);
fillit(stdout, timestamp_width - sizeof("Valid starting") + 3,
(int) ' ');
fputs("Expires", stdout);
fillit(stdout, timestamp_width - sizeof("Expires") + 3,
(int) ' ');
fputs("Service principal\n", stdout);
}
if ((code = krb5_cc_start_seq_get(kcontext, cache, &cur))) {
if (!status_only)
com_err(progname, code, "while starting to retrieve tickets");
exit(1);
}
while (!(code = krb5_cc_next_cred(kcontext, cache, &cur, &creds))) {
if (krb5_is_config_principal(kcontext, creds.server))
continue;
if (status_only) {
if (exit_status && creds.server->length == 2 &&
strcmp(creds.server->realm.data, princ->realm.data) == 0 &&
strcmp((char *)creds.server->data[0].data, "krbtgt") == 0 &&
strcmp((char *)creds.server->data[1].data,
princ->realm.data) == 0 &&
creds.times.endtime > now)
exit_status = 0;
} else {
show_credential(&creds);
}
krb5_free_cred_contents(kcontext, &creds);
}
if (code == KRB5_CC_END) {
if ((code = krb5_cc_end_seq_get(kcontext, cache, &cur))) {
if (!status_only)
com_err(progname, code, "while finishing ticket retrieval");
exit(1);
}
flags = KRB5_TC_OPENCLOSE; /* turns on OPENCLOSE mode */
if ((code = krb5_cc_set_flags(kcontext, cache, flags))) {
if (!status_only)
com_err(progname, code, "while closing ccache");
exit(1);
}
#ifdef KRB5_KRB4_COMPAT
if (name == NULL && !status_only)
do_v4_ccache(0);
#endif
exit(exit_status);
} else {
if (!status_only)
com_err(progname, code, "while retrieving a ticket");
exit(1);
}
}
char *
etype_string(enctype)
krb5_enctype enctype;
{
static char buf[100];
krb5_error_code retval;
if ((retval = krb5_enctype_to_name(enctype, FALSE, buf, sizeof(buf)))) {
/* XXX if there's an error != EINVAL, I should probably report it */
snprintf(buf, sizeof(buf), "etype %d", enctype);
}
return buf;
}
static char *
flags_string(cred)
register krb5_creds *cred;
{
static char buf[32];
int i = 0;
if (cred->ticket_flags & TKT_FLG_FORWARDABLE)
buf[i++] = 'F';
if (cred->ticket_flags & TKT_FLG_FORWARDED)
buf[i++] = 'f';
if (cred->ticket_flags & TKT_FLG_PROXIABLE)
buf[i++] = 'P';
if (cred->ticket_flags & TKT_FLG_PROXY)
buf[i++] = 'p';
if (cred->ticket_flags & TKT_FLG_MAY_POSTDATE)
buf[i++] = 'D';
if (cred->ticket_flags & TKT_FLG_POSTDATED)
buf[i++] = 'd';
if (cred->ticket_flags & TKT_FLG_INVALID)
buf[i++] = 'i';
if (cred->ticket_flags & TKT_FLG_RENEWABLE)
buf[i++] = 'R';
if (cred->ticket_flags & TKT_FLG_INITIAL)
buf[i++] = 'I';
if (cred->ticket_flags & TKT_FLG_HW_AUTH)
buf[i++] = 'H';
if (cred->ticket_flags & TKT_FLG_PRE_AUTH)
buf[i++] = 'A';
if (cred->ticket_flags & TKT_FLG_TRANSIT_POLICY_CHECKED)
buf[i++] = 'T';
if (cred->ticket_flags & TKT_FLG_OK_AS_DELEGATE)
buf[i++] = 'O'; /* D/d are taken. Use short strings? */
if (cred->ticket_flags & TKT_FLG_ANONYMOUS)
buf[i++] = 'a';
buf[i] = '\0';
return(buf);
}
void
printtime(tv)
time_t tv;
{
char timestring[BUFSIZ];
char fill;
fill = ' ';
if (!krb5_timestamp_to_sfstring((krb5_timestamp) tv,
timestring,
timestamp_width+1,
&fill)) {
printf("%s", timestring);
}
}
void
show_credential(cred)
register krb5_creds * cred;
{
krb5_error_code retval;
krb5_ticket *tkt;
char *name, *sname, *flags;
int extra_field = 0;
retval = krb5_unparse_name(kcontext, cred->client, &name);
if (retval) {
com_err(progname, retval, "while unparsing client name");
return;
}
retval = krb5_unparse_name(kcontext, cred->server, &sname);
if (retval) {
com_err(progname, retval, "while unparsing server name");
krb5_free_unparsed_name(kcontext, name);
return;
}
if (!cred->times.starttime)
cred->times.starttime = cred->times.authtime;
printtime(cred->times.starttime);
putchar(' '); putchar(' ');
printtime(cred->times.endtime);
putchar(' '); putchar(' ');
printf("%s\n", sname);
if (strcmp(name, defname)) {
printf("\tfor client %s", name);
extra_field++;
}
if (cred->times.renew_till) {
if (!extra_field)
fputs("\t",stdout);
else
fputs(", ",stdout);
fputs("renew until ", stdout);
printtime(cred->times.renew_till);
extra_field += 2;
}
if (extra_field > 3) {
fputs("\n", stdout);
extra_field = 0;
}
if (show_flags) {
flags = flags_string(cred);
if (flags && *flags) {
if (!extra_field)
fputs("\t",stdout);
else
fputs(", ",stdout);
printf("Flags: %s", flags);
extra_field++;
}
}
if (extra_field > 2) {
fputs("\n", stdout);
extra_field = 0;
}
if (show_etype) {
retval = krb5_decode_ticket(&cred->ticket, &tkt);
if (retval)
goto err_tkt;
if (!extra_field)
fputs("\t",stdout);
else
fputs(", ",stdout);
printf("Etype (skey, tkt): %s, ",
etype_string(cred->keyblock.enctype));
printf("%s ",
etype_string(tkt->enc_part.enctype));
extra_field++;
err_tkt:
if (tkt != NULL)
krb5_free_ticket(kcontext, tkt);
}
if (show_adtype) {
int i;
if (cred->authdata != NULL) {
if (!extra_field)
fputs("\t",stdout);
else
fputs(", ",stdout);
printf("AD types: ");
for (i = 0; cred->authdata[i] != NULL; i++) {
if (i)
printf(", ");
printf("%d", cred->authdata[i]->ad_type);
}
extra_field++;
}
}
/* if any additional info was printed, extra_field is non-zero */
if (extra_field)
putchar('\n');
if (show_addresses) {
if (!cred->addresses || !cred->addresses[0]) {
printf("\tAddresses: (none)\n");
} else {
int i;
printf("\tAddresses: ");
one_addr(cred->addresses[0]);
for (i=1; cred->addresses[i]; i++) {
printf(", ");
one_addr(cred->addresses[i]);
}
printf("\n");
}
}
krb5_free_unparsed_name(kcontext, name);
krb5_free_unparsed_name(kcontext, sname);
}
#include "port-sockets.h"
#include "socket-utils.h" /* for ss2sin etc */
#include "fake-addrinfo.h"
void one_addr(a)
krb5_address *a;
{
struct sockaddr_storage ss;
int err;
char namebuf[NI_MAXHOST];
memset (&ss, 0, sizeof (ss));
switch (a->addrtype) {
case ADDRTYPE_INET:
if (a->length != 4) {
broken:
printf ("broken address (type %d length %d)",
a->addrtype, a->length);
return;
}
{
struct sockaddr_in *sinp = ss2sin (&ss);
sinp->sin_family = AF_INET;
#ifdef HAVE_SA_LEN
sinp->sin_len = sizeof (struct sockaddr_in);
#endif
memcpy (&sinp->sin_addr, a->contents, 4);
}
break;
#ifdef KRB5_USE_INET6
case ADDRTYPE_INET6:
if (a->length != 16)
goto broken;
{
struct sockaddr_in6 *sin6p = ss2sin6 (&ss);
sin6p->sin6_family = AF_INET6;
#ifdef HAVE_SA_LEN
sin6p->sin6_len = sizeof (struct sockaddr_in6);
#endif
memcpy (&sin6p->sin6_addr, a->contents, 16);
}
break;
#endif
default:
printf ("unknown addrtype %d", a->addrtype);
return;
}
namebuf[0] = 0;
err = getnameinfo (ss2sa (&ss), socklen (ss2sa (&ss)),
namebuf, sizeof (namebuf), 0, 0,
no_resolve ? NI_NUMERICHOST : 0U);
if (err) {
printf ("unprintable address (type %d, error %d %s)", a->addrtype, err,
gai_strerror (err));
return;
}
printf ("%s", namebuf);
}
void
fillit(f, num, c)
FILE *f;
unsigned int num;
int c;
{
unsigned int i;
for (i=0; i<num; i++)
fputc(c, f);
}
| 10,225 |
14,668 | // Copyright 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include <utility>
#include "base/bind.h"
#include "base/synchronization/waitable_event.h"
#include "chrome/chrome_cleaner/ipc/ipc_test_util.h"
#include "chrome/chrome_cleaner/ipc/mojo_task_runner.h"
#include "chrome/chrome_cleaner/mojom/pup.mojom.h"
#include "chrome/chrome_cleaner/mojom/test_pup_typemap.mojom.h"
#include "chrome/chrome_cleaner/pup_data/pup_data.h"
#include "chrome/chrome_cleaner/test/test_extensions.h"
#include "chrome/chrome_cleaner/test/test_util.h"
#include "components/chrome_cleaner/test/test_name_helper.h"
#include "mojo/core/embedder/embedder.h"
#include "mojo/public/cpp/bindings/pending_receiver.h"
#include "mojo/public/cpp/bindings/pending_remote.h"
#include "mojo/public/cpp/bindings/receiver.h"
#include "mojo/public/cpp/bindings/remote.h"
#include "testing/gmock/include/gmock/gmock.h"
#include "testing/gtest/include/gtest/gtest.h"
#include "testing/multiprocess_func_list.h"
namespace chrome_cleaner {
namespace {
using base::WaitableEvent;
using testing::UnorderedElementsAreArray;
// Special result code returned by the Mojo connection error handler on child
// processes and expected by the parent process on typemap unmarshaling error
// tests.
constexpr int32_t kConnectionBrokenResultCode = 127;
class TestPUPTypemapImpl : public mojom::TestPUPTypemap {
public:
explicit TestPUPTypemapImpl(
mojo::PendingReceiver<mojom::TestPUPTypemap> receiver)
: receiver_(this, std::move(receiver)) {}
void EchoPUP(const PUPData::PUP& pup, EchoPUPCallback callback) override {
std::move(callback).Run(pup);
}
private:
mojo::Receiver<mojom::TestPUPTypemap> receiver_;
};
class EchoingParentProcess : public chrome_cleaner::ParentProcess {
public:
explicit EchoingParentProcess(scoped_refptr<MojoTaskRunner> mojo_task_runner)
: ParentProcess(std::move(mojo_task_runner)) {}
protected:
void CreateImpl(mojo::ScopedMessagePipeHandle mojo_pipe) override {
mojo::PendingReceiver<mojom::TestPUPTypemap> receiver(std::move(mojo_pipe));
impl_ = std::make_unique<TestPUPTypemapImpl>(std::move(receiver));
}
void DestroyImpl() override { impl_.reset(); }
private:
~EchoingParentProcess() override = default;
std::unique_ptr<TestPUPTypemapImpl> impl_;
};
class EchoingChildProcess : public chrome_cleaner::ChildProcess {
public:
explicit EchoingChildProcess(scoped_refptr<MojoTaskRunner> mojo_task_runner)
: ChildProcess(mojo_task_runner),
remote_(new mojo::Remote<mojom::TestPUPTypemap>(),
base::OnTaskRunnerDeleter(mojo_task_runner)) {}
void BindToPipe(mojo::ScopedMessagePipeHandle mojo_pipe,
WaitableEvent* event) {
remote_->Bind(mojo::PendingRemote<chrome_cleaner::mojom::TestPUPTypemap>(
std::move(mojo_pipe), 0));
// If the connection is lost while this object is processing a request or
// waiting for a response, then it will terminate with a special result code
// that will be expected by the parent process.
remote_->set_disconnect_handler(base::BindOnce(
[](bool* processing_request) {
if (*processing_request)
exit(kConnectionBrokenResultCode);
},
&processing_request_));
event->Signal();
}
PUPData::PUP EchoPUP(const PUPData::PUP& input) {
return Echo(input, base::BindOnce(&EchoingChildProcess::RunEchoPUP,
base::Unretained(this)));
}
private:
~EchoingChildProcess() override = default;
void RunEchoPUP(const PUPData::PUP& input,
mojom::TestPUPTypemap::EchoPUPCallback callback) {
(*remote_)->EchoPUP(input, std::move(callback));
}
template <typename EchoedValue>
EchoedValue Echo(
const EchoedValue& input,
base::OnceCallback<void(const EchoedValue&,
base::OnceCallback<void(const EchoedValue&)>)>
echoing_function) {
DCHECK(!processing_request_);
processing_request_ = true;
EchoedValue output;
WaitableEvent event(WaitableEvent::ResetPolicy::MANUAL,
WaitableEvent::InitialState::NOT_SIGNALED);
mojo_task_runner_->PostTask(
FROM_HERE,
base::BindOnce(std::move(echoing_function), input,
base::BindOnce(
[](EchoedValue* output_holder, WaitableEvent* event,
const EchoedValue& output) {
*output_holder = output;
event->Signal();
},
&output, &event)));
event.Wait();
processing_request_ = false;
return output;
}
std::unique_ptr<mojo::Remote<mojom::TestPUPTypemap>,
base::OnTaskRunnerDeleter>
remote_;
// All requests are synchronous and happen on the same sequence, so no
// synchronization is needed.
bool processing_request_ = false;
};
scoped_refptr<EchoingChildProcess> InitChildProcess() {
auto mojo_task_runner = MojoTaskRunner::Create();
auto child_process =
base::MakeRefCounted<EchoingChildProcess>(mojo_task_runner);
auto message_pipe_handle = child_process->CreateMessagePipeFromCommandLine();
WaitableEvent event(WaitableEvent::ResetPolicy::MANUAL,
WaitableEvent::InitialState::NOT_SIGNALED);
mojo_task_runner->PostTask(
FROM_HERE, base::BindOnce(&EchoingChildProcess::BindToPipe, child_process,
std::move(message_pipe_handle), &event));
event.Wait();
return child_process;
}
MULTIPROCESS_TEST_MAIN(EchoPUP) {
scoped_refptr<EchoingChildProcess> child_process = InitChildProcess();
PUPData::PUP pup;
pup.AddDiskFootprint(base::FilePath(L"C:\\Program Files\\File1.exe"));
pup.AddDiskFootprint(base::FilePath(L"C:\\Program Files\\File2.exe"));
pup.AddDiskFootprint(base::FilePath(L"C:\\Program Files\\File3.exe"));
pup.disk_footprints_info.Insert(base::FilePath(L"C:\\File1.exe"),
PUPData::FileInfo({UwS::FOUND_IN_MEMORY}));
pup.disk_footprints_info.Insert(
base::FilePath(L"C:\\File2.exe"),
PUPData::FileInfo({UwS::FOUND_IN_SHELL, UwS::FOUND_IN_CLSID}));
const PUPData::PUP echoed = child_process->EchoPUP(pup);
// Not using operator== because error messages only show the sequences of
// bytes for each object, which makes it very hard to identify the differences
// between both objects.
EXPECT_THAT(
echoed.expanded_disk_footprints.file_paths(),
UnorderedElementsAreArray(pup.expanded_disk_footprints.file_paths()));
EXPECT_EQ(pup.disk_footprints_info.map(), echoed.disk_footprints_info.map());
return ::testing::Test::HasNonfatalFailure();
}
MULTIPROCESS_TEST_MAIN(EchoPUP_ExtraData) {
scoped_refptr<EchoingChildProcess> child_process = InitChildProcess();
PUPData::PUP pup;
pup.AddDiskFootprint(base::FilePath(L"C:\\Program Files\\File1.exe"));
// Add some items which are not normally populated in the target process.
// Expect them not to be present after echoing the PUP, to validate that they
// aren't passed through the Mojo interface. This keeps the security boundary
// small.
constexpr wchar_t kWindowsCurrentVersionRegKeyName[] =
L"SOFTWARE\\Microsoft\\Windows\\CurrentVersion";
RegKeyPath version_key(HKEY_LOCAL_MACHINE, kWindowsCurrentVersionRegKeyName,
KEY_WOW64_32KEY);
pup.expanded_registry_footprints.emplace_back(version_key, L"value_name",
L"value_substring",
REGISTRY_VALUE_MATCH_EXACT);
pup.expanded_registry_footprints.emplace_back(version_key, L"", L"",
REGISTRY_VALUE_MATCH_EXACT);
pup.expanded_scheduled_tasks.push_back(L"Scheduled task 1");
pup.expanded_scheduled_tasks.push_back(L"Scheduled task 2");
pup.expanded_scheduled_tasks.push_back(L"Scheduled task 3");
const PUPData::PUP echoed = child_process->EchoPUP(pup);
// Not using operator== because error messages only show the sequences of
// bytes for each object, which makes it very hard to identify the differences
// between both objects.
EXPECT_THAT(
echoed.expanded_disk_footprints.file_paths(),
UnorderedElementsAreArray(pup.expanded_disk_footprints.file_paths()));
EXPECT_TRUE(echoed.expanded_registry_footprints.empty());
EXPECT_TRUE(echoed.expanded_scheduled_tasks.empty());
return ::testing::Test::HasNonfatalFailure();
}
MULTIPROCESS_TEST_MAIN(EchoPUPFailure_InvalidTraceLocation) {
scoped_refptr<EchoingChildProcess> child_process = InitChildProcess();
// Creates a PUP with a trace location that doesn't correspond to a valid
// UwS::TraceLocation enum value. This will trigger a deserialization error on
// the broker process that will cause the pipe to be closed. As a consequence,
// the connection error handler, defined in BindToPipe(), will terminate the
// child process with a special exit code that will be expected to be received
// by the parent process.
PUPData::PUP pup;
pup.disk_footprints_info.Insert(
base::FilePath(L"C:\\File1.exe"),
PUPData::FileInfo({UwS::FOUND_IN_UNINSTALLSTRING}));
pup.disk_footprints_info.Insert(
base::FilePath(L"C:\\File2.exe"),
PUPData::FileInfo({static_cast<UwS::TraceLocation>(-1)}));
const PUPData::PUP unused = child_process->EchoPUP(pup);
return ::testing::Test::HasNonfatalFailure();
}
// PUPTypemapTest is parametrized with:
// - expected_to_succeed_: whether the child process is expected to succeed or
// to fail;
// - child_main_function_: the name of the MULTIPROCESS_TEST_MAIN function for
// the child process.
typedef std::tuple<bool, std::string> PUPTypemapTestParams;
class PUPTypemapTest : public ::testing::TestWithParam<PUPTypemapTestParams> {
public:
void SetUp() override {
std::tie(expected_to_succeed_, child_main_function_) = GetParam();
mojo_task_runner_ = MojoTaskRunner::Create();
parent_process_ =
base::MakeRefCounted<EchoingParentProcess>(mojo_task_runner_);
}
protected:
std::string child_main_function_;
bool expected_to_succeed_;
scoped_refptr<MojoTaskRunner> mojo_task_runner_;
scoped_refptr<EchoingParentProcess> parent_process_;
};
TEST_P(PUPTypemapTest, Echo) {
int32_t exit_code = -1;
EXPECT_TRUE(parent_process_->LaunchConnectedChildProcess(child_main_function_,
&exit_code));
EXPECT_EQ(expected_to_succeed_ ? 0 : kConnectionBrokenResultCode, exit_code);
}
INSTANTIATE_TEST_SUITE_P(Success,
PUPTypemapTest,
testing::Combine(testing::Values(true),
testing::Values("EchoPUP",
"EchoPUP_ExtraData")),
GetParamNameForTest());
INSTANTIATE_TEST_SUITE_P(
Failure,
PUPTypemapTest,
testing::Combine(testing::Values(false),
testing::Values("EchoPUPFailure_InvalidTraceLocation")),
GetParamNameForTest());
} // namespace
} // namespace chrome_cleaner
| 4,623 |
4,526 | #ifndef ALTERNATIVE_PATH_ROUTING_HPP
#define ALTERNATIVE_PATH_ROUTING_HPP
#include "engine/datafacade.hpp"
#include "engine/internal_route_result.hpp"
#include "engine/algorithm.hpp"
#include "engine/search_engine_data.hpp"
#include "util/exception.hpp"
namespace osrm
{
namespace engine
{
namespace routing_algorithms
{
InternalManyRoutesResult alternativePathSearch(SearchEngineData<ch::Algorithm> &search_engine_data,
const DataFacade<ch::Algorithm> &facade,
const PhantomNodes &phantom_node_pair,
unsigned number_of_alternatives);
InternalManyRoutesResult alternativePathSearch(SearchEngineData<mld::Algorithm> &search_engine_data,
const DataFacade<mld::Algorithm> &facade,
const PhantomNodes &phantom_node_pair,
unsigned number_of_alternatives);
} // namespace routing_algorithms
} // namespace engine
} // namespace osrm
#endif
| 559 |
3,373 | <filename>scalyr_agent/third_party_tls/oscrypto/_win/_crypt32_ctypes.py
# coding: utf-8
from __future__ import unicode_literals, division, absolute_import, print_function
import sys
import ctypes
from ctypes import windll, wintypes, POINTER, Structure, c_void_p, c_char_p
from ctypes.wintypes import DWORD
from .._ffi import FFIEngineError
from .._types import str_cls
from ..errors import LibraryNotFoundError
from ._kernel32 import kernel32
__all__ = [
'crypt32',
'get_error',
]
try:
crypt32 = windll.crypt32
except (OSError) as e:
if str_cls(e).find('The specified module could not be found') != -1:
raise LibraryNotFoundError('crypt32.dll could not be found')
raise
HCERTSTORE = wintypes.HANDLE
HCERTCHAINENGINE = wintypes.HANDLE
HCRYPTPROV = wintypes.HANDLE
HCRYPTKEY = wintypes.HANDLE
PBYTE = c_char_p
if sys.maxsize > 2 ** 32:
ULONG_PTR = ctypes.c_uint64
else:
ULONG_PTR = ctypes.c_ulong
try:
class CRYPTOAPI_BLOB(Structure): # noqa
_fields_ = [
("cbData", DWORD),
("pbData", c_void_p),
]
CRYPT_INTEGER_BLOB = CRYPTOAPI_BLOB
CERT_NAME_BLOB = CRYPTOAPI_BLOB
CRYPT_BIT_BLOB = CRYPTOAPI_BLOB
CRYPT_OBJID_BLOB = CRYPTOAPI_BLOB
class CRYPT_ALGORITHM_IDENTIFIER(Structure): # noqa
_fields_ = [
("pszObjId", wintypes.LPSTR),
("Parameters", CRYPT_OBJID_BLOB),
]
class CERT_PUBLIC_KEY_INFO(Structure): # noqa
_fields_ = [
("Algorithm", CRYPT_ALGORITHM_IDENTIFIER),
("PublicKey", CRYPT_BIT_BLOB),
]
class CERT_EXTENSION(Structure): # noqa
_fields_ = [
("pszObjId", wintypes.LPSTR),
("fCritical", wintypes.BOOL),
("Value", CRYPT_OBJID_BLOB),
]
PCERT_EXTENSION = POINTER(CERT_EXTENSION)
class CERT_INFO(Structure): # noqa
_fields_ = [
("dwVersion", DWORD),
("SerialNumber", CRYPT_INTEGER_BLOB),
("SignatureAlgorithm", CRYPT_ALGORITHM_IDENTIFIER),
("Issuer", CERT_NAME_BLOB),
("NotBefore", kernel32.FILETIME),
("NotAfter", kernel32.FILETIME),
("Subject", CERT_NAME_BLOB),
("SubjectPublicKeyInfo", CERT_PUBLIC_KEY_INFO),
("IssuerUniqueId", CRYPT_BIT_BLOB),
("SubjectUniqueId", CRYPT_BIT_BLOB),
("cExtension", DWORD),
("rgExtension", POINTER(PCERT_EXTENSION)),
]
PCERT_INFO = POINTER(CERT_INFO)
class CERT_CONTEXT(Structure): # noqa
_fields_ = [
("dwCertEncodingType", DWORD),
("pbCertEncoded", c_void_p),
("cbCertEncoded", DWORD),
("pCertInfo", PCERT_INFO),
("hCertStore", HCERTSTORE)
]
PCERT_CONTEXT = POINTER(CERT_CONTEXT)
class CERT_ENHKEY_USAGE(Structure): # noqa
_fields_ = [
('cUsageIdentifier', DWORD),
('rgpszUsageIdentifier', POINTER(POINTER(wintypes.BYTE))),
]
PCERT_ENHKEY_USAGE = POINTER(CERT_ENHKEY_USAGE)
class CERT_TRUST_STATUS(Structure): # noqa
_fields_ = [
('dwErrorStatus', DWORD),
('dwInfoStatus', DWORD),
]
class CERT_CHAIN_ELEMENT(Structure): # noqa
_fields_ = [
('cbSize', DWORD),
('pCertContext', PCERT_CONTEXT),
('TrustStatus', CERT_TRUST_STATUS),
('pRevocationInfo', c_void_p),
('pIssuanceUsage', PCERT_ENHKEY_USAGE),
('pApplicationUsage', PCERT_ENHKEY_USAGE),
('pwszExtendedErrorInfo', wintypes.LPCWSTR),
]
PCERT_CHAIN_ELEMENT = POINTER(CERT_CHAIN_ELEMENT)
class CERT_SIMPLE_CHAIN(Structure): # noqa
_fields_ = [
('cbSize', DWORD),
('TrustStatus', CERT_TRUST_STATUS),
('cElement', DWORD),
('rgpElement', POINTER(PCERT_CHAIN_ELEMENT)),
('pTrustListInfo', c_void_p),
('fHasRevocationFreshnessTime', wintypes.BOOL),
('dwRevocationFreshnessTime', DWORD),
]
PCERT_SIMPLE_CHAIN = POINTER(CERT_SIMPLE_CHAIN)
class CERT_CHAIN_CONTEXT(Structure): # noqa
_fields_ = [
('cbSize', DWORD),
('TrustStatus', CERT_TRUST_STATUS),
('cChain', DWORD),
('rgpChain', POINTER(PCERT_SIMPLE_CHAIN)),
('cLowerQualityChainContext', DWORD),
('rgpLowerQualityChainContext', c_void_p),
('fHasRevocationFreshnessTime', wintypes.BOOL),
('dwRevocationFreshnessTime', DWORD),
]
PCERT_CHAIN_CONTEXT = POINTER(CERT_CHAIN_CONTEXT)
class CERT_USAGE_MATCH(Structure): # noqa
_fields_ = [
('dwType', DWORD),
('Usage', CERT_ENHKEY_USAGE),
]
class CERT_CHAIN_PARA(Structure): # noqa
_fields_ = [
('cbSize', DWORD),
('RequestedUsage', CERT_USAGE_MATCH),
]
class CERT_CHAIN_POLICY_PARA(Structure): # noqa
_fields_ = [
('cbSize', DWORD),
('dwFlags', DWORD),
('pvExtraPolicyPara', c_void_p),
]
class SSL_EXTRA_CERT_CHAIN_POLICY_PARA(Structure): # noqa
_fields_ = [
('cbSize', DWORD),
('dwAuthType', DWORD),
('fdwChecks', DWORD),
('pwszServerName', wintypes.LPCWSTR),
]
class CERT_CHAIN_POLICY_STATUS(Structure): # noqa
_fields_ = [
('cbSize', DWORD),
('dwError', DWORD),
('lChainIndex', wintypes.LONG),
('lElementIndex', wintypes.LONG),
('pvExtraPolicyStatus', c_void_p),
]
crypt32.CertOpenStore.argtypes = [
wintypes.LPCSTR,
DWORD,
HCRYPTPROV,
DWORD,
c_void_p
]
crypt32.CertOpenStore.restype = HCERTSTORE
crypt32.CertAddEncodedCertificateToStore.argtypes = [
HCERTSTORE,
DWORD,
PBYTE,
DWORD,
DWORD,
POINTER(PCERT_CONTEXT)
]
crypt32.CertAddEncodedCertificateToStore.restype = wintypes.BOOL
crypt32.CertGetCertificateChain.argtypes = [
HCERTCHAINENGINE,
PCERT_CONTEXT,
POINTER(kernel32.FILETIME),
HCERTSTORE,
POINTER(CERT_CHAIN_PARA),
DWORD,
c_void_p,
POINTER(PCERT_CHAIN_CONTEXT)
]
crypt32.CertGetCertificateChain.restype = wintypes.BOOL
crypt32.CertVerifyCertificateChainPolicy.argtypes = [
ULONG_PTR,
PCERT_CHAIN_CONTEXT,
POINTER(CERT_CHAIN_POLICY_PARA),
POINTER(CERT_CHAIN_POLICY_STATUS)
]
crypt32.CertVerifyCertificateChainPolicy.restype = wintypes.BOOL
crypt32.CertFreeCertificateChain.argtypes = [
PCERT_CHAIN_CONTEXT
]
crypt32.CertFreeCertificateChain.restype = None
crypt32.CertOpenSystemStoreW.argtypes = [
wintypes.HANDLE,
wintypes.LPCWSTR
]
crypt32.CertOpenSystemStoreW.restype = HCERTSTORE
crypt32.CertEnumCertificatesInStore.argtypes = [
HCERTSTORE,
PCERT_CONTEXT
]
crypt32.CertEnumCertificatesInStore.restype = PCERT_CONTEXT
crypt32.CertCloseStore.argtypes = [
HCERTSTORE,
DWORD
]
crypt32.CertCloseStore.restype = wintypes.BOOL
crypt32.CertGetEnhancedKeyUsage.argtypes = [
PCERT_CONTEXT,
DWORD,
c_void_p,
POINTER(DWORD)
]
crypt32.CertGetEnhancedKeyUsage.restype = wintypes.BOOL
except (AttributeError):
raise FFIEngineError('Error initializing ctypes')
setattr(crypt32, 'FILETIME', kernel32.FILETIME)
setattr(crypt32, 'CERT_ENHKEY_USAGE', CERT_ENHKEY_USAGE)
setattr(crypt32, 'CERT_CONTEXT', CERT_CONTEXT)
setattr(crypt32, 'PCERT_CONTEXT', PCERT_CONTEXT)
setattr(crypt32, 'CERT_USAGE_MATCH', CERT_USAGE_MATCH)
setattr(crypt32, 'CERT_CHAIN_PARA', CERT_CHAIN_PARA)
setattr(crypt32, 'CERT_CHAIN_POLICY_PARA', CERT_CHAIN_POLICY_PARA)
setattr(crypt32, 'SSL_EXTRA_CERT_CHAIN_POLICY_PARA', SSL_EXTRA_CERT_CHAIN_POLICY_PARA)
setattr(crypt32, 'CERT_CHAIN_POLICY_STATUS', CERT_CHAIN_POLICY_STATUS)
setattr(crypt32, 'PCERT_CHAIN_CONTEXT', PCERT_CHAIN_CONTEXT)
def get_error():
error = ctypes.GetLastError()
return (error, ctypes.FormatError(error))
| 4,159 |
1,178 | <reponame>leozz37/makani<gh_stars>1000+
# Copyright 2020 Makani Technologies LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Aerodynamic properties of the system used in the controller."""
from makani.analysis.control import simple_aero
from makani.config import mconfig
@mconfig.Config
def MakeParams():
# See CalcSimpleAeroModel in analysis/control/simple_aero.py.
# Reduce lift coefficient from the low incidence model based on information
# from CFD and flight testing. See the CL offset in
# config/m600/sim/aero_sim.py.
CL_offset = -0.125
simple_aero_model = {
'dCL_dalpha': 7.39413425,
'CL_0': 2.02175,
'base_flaps': [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
'dCL_dflap': [0.25382030, 0.31971044, 0.35924453, 0.38101693,
0.42112398, 0.37414144, 0.55175835, 0.00343774],
'dCY_dbeta': -1.4205533,
'CY_0': -0.08687,
'dCD_dalpha': 0.79990,
'CD_0': 0.10567,
}
# The 1.0E-3 is a required argument and is the relative tolerance allowed for
# coefficient comparison.
simple_aero.CheckSimpleAeroModel('m600/m600_aswing_baseline.json',
simple_aero_model, 1e-3,
crosswind_trimmed=True,
CL_0_offset=CL_offset)
return simple_aero_model
| 756 |
364 | <filename>Source/AsyncLoadingScreen/Public/SLoadingWidget.h<gh_stars>100-1000
/************************************************************************************
* *
* Copyright (C) 2020 <NAME>. *
* Website: https://github.com/truong-bui/AsyncLoadingScreen *
* Licensed under the MIT License. See 'LICENSE' file for full license information. *
* *
************************************************************************************/
#pragma once
#include "Widgets/SCompoundWidget.h"
#include "Widgets/Images/SThrobber.h"
#include "LoadingScreenSettings.h"
class FDeferredCleanupSlateBrush;
struct FLoadingWidgetSettings;
/**
* Loading Widget base class
*/
class SLoadingWidget : public SCompoundWidget
{
public:
// SWidgetOverrides
virtual int32 OnPaint(const FPaintArgs& Args, const FGeometry& AllottedGeometry, const FSlateRect& MyCullingRect, FSlateWindowElementList& OutDrawElements, int32 LayerId, const FWidgetStyle& InWidgetStyle, bool bParentEnabled) const override;
/** Gets the combined value of the animation properties as a single SThrobber::EAnimation value. */
SThrobber::EAnimation GetThrobberAnimation(FThrobberSettings ThrobberSettings) const;
/** Construct loading icon*/
void ConstructLoadingIcon(const FLoadingWidgetSettings& Settings);
protected:
// Placeholder widgets
TSharedRef<SWidget> LoadingIcon = SNullWidget::NullWidget;
// Image slate brush list
TArray<TSharedPtr<FDeferredCleanupSlateBrush>> CleanupBrushList;
// Play image sequence in reverse
bool bPlayReverse = false;
// Current image sequence index
mutable int32 ImageIndex = 0;
// Current total delta time
mutable float TotalDeltaTime = 0.0f;
//Time in second to update the images, the smaller value the faster of the animation. A zero value will update the images every frame.
float Interval = 0.05f;
// Getter for text visibility
EVisibility GetLoadingWidgetVisibility() const;
};
| 600 |
3,095 | #include "mimeattachment.h"
#include <QFileInfo>
MimeAttachment::MimeAttachment(QFile *file)
: MimeFile(file)
{
}
MimeAttachment::~MimeAttachment()
{
}
void MimeAttachment::prepare()
{
this->header += "Content-disposition: attachment\r\n";
MimeFile::prepare();
}
| 111 |
1,585 | <reponame>j-xiong/ompi<filename>ompi/mpi/fortran/mpif-h/win_flush_all_f.c
/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil -*- */
/*
* Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
* University Research and Technology
* Corporation. All rights reserved.
* Copyright (c) 2004-2005 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights
* reserved.
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2011-2012 Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2014 Los Alamos National Security, LLC. All rights
* reserved.
* Copyright (c) 2015 Research Organization for Information Science
* and Technology (RIST). All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#include "ompi_config.h"
#include "ompi/mpi/fortran/mpif-h/bindings.h"
#if OMPI_BUILD_MPI_PROFILING
#if OPAL_HAVE_WEAK_SYMBOLS
#pragma weak PMPI_WIN_FLUSH_ALL = ompi_win_flush_all_f
#pragma weak pmpi_win_flush_all = ompi_win_flush_all_f
#pragma weak pmpi_win_flush_all_ = ompi_win_flush_all_f
#pragma weak pmpi_win_flush_all__ = ompi_win_flush_all_f
#pragma weak PMPI_Win_flush_all_f = ompi_win_flush_all_f
#pragma weak PMPI_Win_flush_all_f08 = ompi_win_flush_all_f
#else
OMPI_GENERATE_F77_BINDINGS (PMPI_WIN_FLUSH_ALL,
pmpi_win_flush_all,
pmpi_win_flush_all_,
pmpi_win_flush_all__,
pompi_win_flush_all_f,
(MPI_Fint *win, MPI_Fint *ierr),
(win, ierr) )
#endif
#endif
#if OPAL_HAVE_WEAK_SYMBOLS
#pragma weak MPI_WIN_FLUSH_ALL = ompi_win_flush_all_f
#pragma weak mpi_win_flush_all = ompi_win_flush_all_f
#pragma weak mpi_win_flush_all_ = ompi_win_flush_all_f
#pragma weak mpi_win_flush_all__ = ompi_win_flush_all_f
#pragma weak MPI_Win_flush_all_f = ompi_win_flush_all_f
#pragma weak MPI_Win_flush_all_f08 = ompi_win_flush_all_f
#else
#if ! OMPI_BUILD_MPI_PROFILING
OMPI_GENERATE_F77_BINDINGS (MPI_WIN_FLUSH_ALL,
mpi_win_flush_all,
mpi_win_flush_all_,
mpi_win_flush_all__,
ompi_win_flush_all_f,
(MPI_Fint *win, MPI_Fint *ierr),
(win, ierr) )
#else
#define ompi_win_flush_all_f pompi_win_flush_all_f
#endif
#endif
void ompi_win_flush_all_f(MPI_Fint *win, MPI_Fint *ierr)
{
int c_ierr;
MPI_Win c_win = PMPI_Win_f2c(*win);
c_ierr = PMPI_Win_flush_all(c_win);
if (NULL != ierr) *ierr = OMPI_INT_2_FINT(c_ierr);
}
| 1,596 |
669 | <reponame>flygod1159/MxEngine
// Copyright(c) 2019 - 2020, #Momo
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met :
//
// 1. Redistributions of source code must retain the above copyright notice, this
// list of conditions and the following disclaimer.
//
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and /or other materials provided with the distribution.
//
// 3. Neither the name of the copyright holder nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
// DISCLAIMED.IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
// DAMAGES(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
// OR TORT(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#pragma once
#include "Utilities/ECS/Component.h"
namespace MxEngine
{
class CameraSSAO
{
MAKE_COMPONENT(CameraSSAO);
uint8_t sampleCount = 4;
uint8_t blurIterations = 1;
uint8_t blurLOD = 2;
float intensity = 3.0f;
float radius = 1.0f;
public:
CameraSSAO() = default;
float GetIntensity() const;
float GetRadius() const;
size_t GetBlurIterations() const;
size_t GetBlurLOD() const;
size_t GetSampleCount() const;
void SetSampleCount(size_t samples);
void SetIntensity(float intensity);
void SetRadius(float radius);
void SetBlurIterations(size_t iterations);
void SetBlurLOD(size_t lod);
};
} | 788 |
4,339 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.ignite.internal.jdbc2;
import java.util.UUID;
import org.apache.ignite.lang.IgniteCallable;
/**
* Task for close query cursor on remote node.
*/
class JdbcCloseCursorTask implements IgniteCallable<Void> {
/** Serial version uid. */
private static final long serialVersionUID = 0L;
/** Cursor ID to close. */
private final UUID curId;
/**
* @param curId Cursor ID.
*/
public JdbcCloseCursorTask(UUID curId) {
this.curId = curId;
}
/** {@inheritDoc} */
@Override public Void call() throws Exception {
JdbcQueryTask.remove(curId);
return null;
}
}
| 448 |
623 | // Copyright (C) 2021 The Android Open Source Project
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.gerrit.server.git.receive;
/**
* Push option that can be specified on push.
*
* <p>On push the option has to be specified as {@code -o <pluginName>~<name>=<value>}, or if a
* value is not required as {@code -o <pluginName>~<name>}.
*/
public interface PluginPushOption {
/** The name of the push option. */
public String getName();
/** The description of the push option. */
public String getDescription();
}
| 286 |
2,542 | <reponame>gridgentoo/ServiceFabricAzure
// ------------------------------------------------------------
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License (MIT). See License.txt in the repo root for license information.
// ------------------------------------------------------------
#pragma once
namespace Naming
{
class GatewayPerformanceCounters
{
DENY_COPY(GatewayPerformanceCounters)
public:
BEGIN_COUNTER_SET_DEFINITION(
L"6D18D6F9-7799-40B8-924D-F923D89C0134",
L"Naming Gateway",
L"Counters for Naming Gateway",
Common::PerformanceCounterSetInstanceType::Multiple)
COUNTER_DEFINITION( 0+1, Common::PerformanceCounterType::RawData32, L"Client Connections", L"Number of client connections" )
COUNTER_DEFINITION( 0+2, Common::PerformanceCounterType::RawData32, L"Outstanding Client Requests", L"Number of outstanding client requests" )
END_COUNTER_SET_DEFINITION()
DECLARE_COUNTER_INSTANCE( NumberOfClientConnections )
DECLARE_COUNTER_INSTANCE( NumberOfOutstandingClientRequests )
BEGIN_COUNTER_SET_INSTANCE(GatewayPerformanceCounters)
DEFINE_COUNTER_INSTANCE( NumberOfClientConnections, 0+1 )
DEFINE_COUNTER_INSTANCE( NumberOfOutstandingClientRequests, 0+2 )
END_COUNTER_SET_INSTANCE()
};
typedef std::shared_ptr<GatewayPerformanceCounters> GatewayPerformanceCountersSPtr;
}
| 563 |
831 | <gh_stars>100-1000
/*
* Copyright (C) 2014 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.android.tools.idea.tests.gui.framework.fixture;
import com.android.tools.idea.navigator.nodes.apk.ApkModuleNode;
import com.android.tools.idea.navigator.nodes.apk.ndk.LibFolderNode;
import com.android.tools.idea.navigator.nodes.apk.ndk.LibraryNode;
import com.android.tools.idea.navigator.nodes.apk.ndk.NdkSourceNode;
import com.android.tools.idea.tests.gui.framework.GuiTests;
import com.android.tools.idea.tests.gui.framework.matcher.Matchers;
import com.google.common.base.Strings;
import com.google.common.collect.Lists;
import com.intellij.ide.projectView.ProjectView;
import com.intellij.ide.projectView.ProjectViewNode;
import com.intellij.ide.projectView.impl.AbstractProjectViewPane;
import com.intellij.ide.projectView.impl.ProjectViewTree;
import com.intellij.ide.projectView.impl.nodes.NamedLibraryElementNode;
import com.intellij.ide.projectView.impl.nodes.PsiDirectoryNode;
import com.intellij.ide.util.treeView.AbstractTreeStructure;
import com.intellij.openapi.actionSystem.KeyboardShortcut;
import com.intellij.openapi.actionSystem.Shortcut;
import com.intellij.openapi.keymap.KeymapManager;
import com.intellij.openapi.projectRoots.JavaSdk;
import com.intellij.openapi.projectRoots.Sdk;
import com.intellij.openapi.roots.JdkOrderEntry;
import com.intellij.openapi.roots.LibraryOrSdkOrderEntry;
import com.intellij.openapi.util.SystemInfo;
import com.intellij.openapi.vfs.VirtualFile;
import com.intellij.openapi.wm.impl.content.BaseLabel;
import com.intellij.ui.tree.AsyncTreeModel;
import com.intellij.util.ui.tree.TreeUtil;
import java.awt.event.KeyEvent;
import org.fest.swing.core.MouseButton;
import org.fest.swing.core.Robot;
import org.fest.swing.edt.GuiQuery;
import org.fest.swing.edt.GuiTask;
import org.fest.swing.fixture.JTreeFixture;
import org.fest.swing.timing.Wait;
import org.jetbrains.annotations.NotNull;
import javax.swing.*;
import javax.swing.tree.TreeModel;
import java.awt.*;
import java.util.List;
import java.util.concurrent.atomic.AtomicReference;
import static com.android.tools.idea.tests.gui.framework.UiTestUtilsKt.fixupWaiting;
import static com.android.tools.idea.tests.gui.framework.UiTestUtilsKt.waitForIdle;
import static com.google.common.truth.Truth.assertThat;
import static org.junit.Assert.assertTrue;
public class ProjectViewFixture extends ToolWindowFixture {
@NotNull private final IdeFrameFixture ideFrameFixture;
public ProjectViewFixture(@NotNull IdeFrameFixture ideFrameFixture) {
super("Project", ideFrameFixture.getProject(), fixupWaiting(ideFrameFixture.robot()));
this.ideFrameFixture = ideFrameFixture;
}
@NotNull
public PaneFixture selectProjectPane() {
return selectPane("Project");
}
@NotNull
public PaneFixture selectAndroidPane() {
return selectPane("Android");
}
/**
* Given a list of relative paths, finds if they all belong to the Project.
* @param paths The list of relative paths with / used as separators
*/
public void assertFilesExist(@NotNull String... paths) {
VirtualFile baseDir = myProject.getBaseDir();
for (String path : paths) {
VirtualFile file = baseDir.findFileByRelativePath(path);
assertTrue("File doesn't exist: " + path, file != null && file.exists());
}
}
private void changePane(@NotNull String paneName) {
myToolWindow.getComponent().requestFocusInWindow();
Component projectDropDown = GuiTests.waitUntilFound(myRobot, Matchers.byText(BaseLabel.class, "Project:"));
if (SystemInfo.isMac) {
myRobot.click(projectDropDown.getParent());
}
else {
Shortcut shortcut = KeymapManager.getInstance().getActiveKeymap().getShortcuts("ShowContent")[0];
KeyStroke firstKeyStroke = ((KeyboardShortcut)shortcut).getFirstKeyStroke();
myRobot.pressAndReleaseKey(firstKeyStroke.getKeyCode(), firstKeyStroke.getModifiers());
}
String paneFullName = "Content name=" + paneName;
GuiTests.clickPopupMenuItemMatching(s -> s.equals(paneFullName), projectDropDown, myRobot);
}
@NotNull
public String getCurrentViewId() {
return ProjectView.getInstance(myProject).getCurrentViewId();
}
@NotNull
private PaneFixture selectPane(String name) {
activate();
changePane(name);
final ProjectView projectView = ProjectView.getInstance(myProject);
return new PaneFixture(ideFrameFixture, projectView.getCurrentProjectViewPane(), myRobot).waitForTreeToFinishLoading();
}
public static class PaneFixture {
@NotNull private final IdeFrameFixture myIdeFrameFixture;
@NotNull private final AbstractProjectViewPane myPane;
@NotNull private final Robot myRobot;
@NotNull private final JTreeFixture myTree;
PaneFixture(@NotNull IdeFrameFixture ideFrameFixture, @NotNull AbstractProjectViewPane pane, @NotNull Robot robot) {
myIdeFrameFixture = ideFrameFixture;
myPane = pane;
myRobot = robot;
myTree = new JTreeFixture(myRobot, GuiTests.waitUntilShowing(myRobot, Matchers.byType(ProjectViewTree.class)));
}
@NotNull
private PaneFixture waitForTreeToFinishLoading() {
return waitForTreeToFinishLoading(5);
}
@NotNull
private PaneFixture waitForTreeToFinishLoading(long secondsToWait) {
TreeModel model = myTree.target().getModel();
if (model instanceof AsyncTreeModel) { // otherwise there's nothing to wait for, as the tree loading should be synchronous
Wait.seconds(secondsToWait).expecting("tree to load").until(() -> !(((AsyncTreeModel) model).isProcessing()));
waitForIdle();
}
return this;
}
@NotNull
public PaneFixture expand() {
return expand(5);
}
@NotNull
public PaneFixture expand(long secondsToWait) {
GuiTask.execute(() -> TreeUtil.expandAll(myPane.getTree()));
waitForTreeToFinishLoading(secondsToWait);
return this;
}
@NotNull
private AbstractTreeStructure getTreeStructure() {
final AtomicReference<AbstractTreeStructure> treeStructureRef = new AtomicReference<>();
Wait.seconds(1).expecting("AbstractTreeStructure to be built").until(() -> GuiQuery.getNonNull(() -> {
try {
treeStructureRef.set(myPane.getTreeBuilder().getTreeStructure());
return true;
}
catch (NullPointerException e) {
// expected;
}
return false;
}));
return treeStructureRef.get();
}
@NotNull
private NodeFixture findApkNode() {
AbstractTreeStructure treeStructure = getTreeStructure();
ApkModuleNode apkNode = GuiQuery.getNonNull(() -> {
for (Object child : treeStructure.getChildElements(treeStructure.getRootElement())) {
if(child instanceof ApkModuleNode) {
return (ApkModuleNode)child;
}
}
throw new IllegalStateException("Unable to find 'APK module' node");
});
return new NodeFixture(apkNode, treeStructure);
}
@NotNull
private NodeFixture findNativeLibrariesNode(@NotNull NodeFixture parentNode) {
for (NodeFixture child : parentNode.getChildren()) {
if (child.myNode instanceof LibFolderNode) {
return child;
}
}
throw new IllegalStateException("Unable to find the child native library node under given parent node");
}
@NotNull
public NodeFixture findNativeLibraryNodeFor(@NotNull String libraryName) {
List<NodeFixture> nativeLibs = findNativeLibrariesNode(findApkNode()).getChildren();
for (NodeFixture child : nativeLibs) {
if (child.myNode instanceof LibraryNode) {
String libName = child.myNode.toTestString(null);
if(libName != null) {
if(libraryName.equals(libName)) {
return child;
}
}
}
}
throw new IllegalStateException("Unable to find native library node for " + libraryName);
}
public IdeFrameFixture clickPath(@NotNull final String... paths) {
return clickPath(MouseButton.LEFT_BUTTON, paths);
}
public IdeFrameFixture clickPath(@NotNull MouseButton button, @NotNull final String... paths) {
StringBuilder totalPath = new StringBuilder(paths[0]);
for (int i = 1; i < paths.length; i++) {
myTree.selectPath(totalPath.toString());
myTree.robot().pressAndReleaseKey(KeyEvent.VK_ADD);
totalPath.append('/').append(paths[i]);
}
myTree.clickPath(totalPath.toString(), button);
return myIdeFrameFixture;
}
public IdeFrameFixture deletePath(@NotNull final String... pathSegments) {
return clickPath(MouseButton.RIGHT_BUTTON, pathSegments)
.openFromMenu(DeleteDialogFixture::find, "Delete...")
.unsafeDelete();
}
}
public static class NodeFixture {
@NotNull private final ProjectViewNode<?> myNode;
@NotNull private final AbstractTreeStructure myTreeStructure;
NodeFixture(@NotNull ProjectViewNode<?> node, @NotNull AbstractTreeStructure treeStructure) {
myNode = node;
myTreeStructure = treeStructure;
}
@NotNull
public List<NodeFixture> getChildren() {
final List<NodeFixture> children = Lists.newArrayList();
GuiTask.execute(
() -> {
for (Object child : myTreeStructure.getChildElements(myNode)) {
if (child instanceof ProjectViewNode) {
children.add(new NodeFixture((ProjectViewNode<?>)child, myTreeStructure));
}
}
});
return children;
}
public boolean isJdk() {
if (myNode instanceof NamedLibraryElementNode) {
LibraryOrSdkOrderEntry orderEntry = ((NamedLibraryElementNode)myNode).getValue().getOrderEntry();
if (orderEntry instanceof JdkOrderEntry) {
Sdk sdk = ((JdkOrderEntry)orderEntry).getJdk();
return sdk.getSdkType() instanceof JavaSdk;
}
}
return false;
}
public boolean isSourceFolder() {
return myNode instanceof PsiDirectoryNode || myNode instanceof NdkSourceNode;
}
@NotNull
public NodeFixture requireDirectory(@NotNull String name) {
assertThat(myNode).isInstanceOf(PsiDirectoryNode.class);
assertThat(myNode.getVirtualFile().getName()).isEqualTo(name);
return this;
}
@Override
public String toString() {
return Strings.nullToEmpty(myNode.getName());
}
}
}
| 4,018 |
1,546 | <reponame>RadekBledowski/lawnchair<filename>quickstep/src/com/android/quickstep/RecentsAnimationController.java
/*
* Copyright (C) 2018 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.android.quickstep;
import static com.android.launcher3.util.Executors.MAIN_EXECUTOR;
import static com.android.launcher3.util.Executors.UI_HELPER_EXECUTOR;
import android.view.IRecentsAnimationController;
import android.view.SurfaceControl;
import android.window.PictureInPictureSurfaceTransaction;
import androidx.annotation.NonNull;
import androidx.annotation.UiThread;
import com.android.launcher3.util.Preconditions;
import com.android.launcher3.util.RunnableList;
import com.android.systemui.shared.recents.model.ThumbnailData;
import com.android.systemui.shared.system.InteractionJankMonitorWrapper;
import com.android.systemui.shared.system.RecentsAnimationControllerCompat;
import com.android.systemui.shared.system.RemoteAnimationTargetCompat;
import java.util.function.Consumer;
/**
* Wrapper around RecentsAnimationControllerCompat to help with some synchronization
*/
public class RecentsAnimationController {
private final RecentsAnimationControllerCompat mController;
private final Consumer<RecentsAnimationController> mOnFinishedListener;
private final boolean mAllowMinimizeSplitScreen;
private boolean mUseLauncherSysBarFlags = false;
private boolean mSplitScreenMinimized = false;
private boolean mFinishRequested = false;
private RunnableList mPendingFinishCallbacks = new RunnableList();
public RecentsAnimationController(RecentsAnimationControllerCompat controller,
boolean allowMinimizeSplitScreen,
Consumer<RecentsAnimationController> onFinishedListener) {
mController = controller;
mOnFinishedListener = onFinishedListener;
mAllowMinimizeSplitScreen = allowMinimizeSplitScreen;
}
/**
* Synchronously takes a screenshot of the task with the given {@param taskId} if the task is
* currently being animated.
*/
public ThumbnailData screenshotTask(int taskId) {
return mController.screenshotTask(taskId);
}
/**
* Indicates that the gesture has crossed the window boundary threshold and system UI can be
* update the system bar flags accordingly.
*/
public void setUseLauncherSystemBarFlags(boolean useLauncherSysBarFlags) {
if (mUseLauncherSysBarFlags != useLauncherSysBarFlags) {
mUseLauncherSysBarFlags = useLauncherSysBarFlags;
UI_HELPER_EXECUTOR.execute(() -> {
mController.setAnimationTargetsBehindSystemBars(!useLauncherSysBarFlags);
});
}
}
/**
* Indicates that the gesture has crossed the window boundary threshold and we should minimize
* if we are in splitscreen.
*/
public void setSplitScreenMinimized(boolean splitScreenMinimized) {
if (!mAllowMinimizeSplitScreen) {
return;
}
if (mSplitScreenMinimized != splitScreenMinimized) {
mSplitScreenMinimized = splitScreenMinimized;
UI_HELPER_EXECUTOR.execute(() -> {
SystemUiProxy p = SystemUiProxy.INSTANCE.getNoCreate();
if (p != null) {
p.setSplitScreenMinimized(splitScreenMinimized);
}
});
}
}
/**
* Remove task remote animation target from
* {@link RecentsAnimationCallbacks#onTaskAppeared(RemoteAnimationTargetCompat)}}.
*/
@UiThread
public void removeTaskTarget(@NonNull RemoteAnimationTargetCompat target) {
UI_HELPER_EXECUTOR.execute(() -> mController.removeTask(target.taskId));
}
@UiThread
public void finishAnimationToHome() {
finishController(true /* toRecents */, null, false /* sendUserLeaveHint */);
}
@UiThread
public void finishAnimationToApp() {
finishController(false /* toRecents */, null, false /* sendUserLeaveHint */);
}
/** See {@link #finish(boolean, Runnable, boolean)} */
@UiThread
public void finish(boolean toRecents, Runnable onFinishComplete) {
finish(toRecents, onFinishComplete, false /* sendUserLeaveHint */);
}
/**
* @param onFinishComplete A callback that runs on the main thread after the animation
* controller has finished on the background thread.
* @param sendUserLeaveHint Determines whether userLeaveHint flag will be set on the pausing
* activity. If userLeaveHint is true, the activity will enter into
* picture-in-picture mode upon being paused.
*/
@UiThread
public void finish(boolean toRecents, Runnable onFinishComplete, boolean sendUserLeaveHint) {
Preconditions.assertUIThread();
finishController(toRecents, onFinishComplete, sendUserLeaveHint);
}
@UiThread
public void finishController(boolean toRecents, Runnable callback, boolean sendUserLeaveHint) {
if (mFinishRequested) {
// If finishing, add to pending finish callbacks, otherwise, if finished, adding to the
// destroyed RunnableList will just trigger the callback to be called immediately
mPendingFinishCallbacks.add(callback);
return;
}
// Finish not yet requested
mFinishRequested = true;
mOnFinishedListener.accept(this);
mPendingFinishCallbacks.add(callback);
UI_HELPER_EXECUTOR.execute(() -> {
mController.finish(toRecents, sendUserLeaveHint);
InteractionJankMonitorWrapper.end(InteractionJankMonitorWrapper.CUJ_QUICK_SWITCH);
InteractionJankMonitorWrapper.end(InteractionJankMonitorWrapper.CUJ_APP_CLOSE_TO_HOME);
MAIN_EXECUTOR.execute(mPendingFinishCallbacks::executeAllAndDestroy);
});
}
/**
* @see IRecentsAnimationController#cleanupScreenshot()
*/
@UiThread
public void cleanupScreenshot() {
UI_HELPER_EXECUTOR.execute(() -> mController.cleanupScreenshot());
}
/**
* @see RecentsAnimationControllerCompat#detachNavigationBarFromApp
*/
@UiThread
public void detachNavigationBarFromApp(boolean moveHomeToTop) {
UI_HELPER_EXECUTOR.execute(() -> mController.detachNavigationBarFromApp(moveHomeToTop));
}
/**
* @see IRecentsAnimationController#animateNavigationBarToApp(long)
*/
@UiThread
public void animateNavigationBarToApp(long duration) {
UI_HELPER_EXECUTOR.execute(() -> mController.animateNavigationBarToApp(duration));
}
/**
* @see IRecentsAnimationController#setWillFinishToHome(boolean)
*/
@UiThread
public void setWillFinishToHome(boolean willFinishToHome) {
UI_HELPER_EXECUTOR.execute(() -> mController.setWillFinishToHome(willFinishToHome));
}
/**
* Sets the final surface transaction on a Task. This is used by Launcher to notify the system
* that animating Activity to PiP has completed and the associated task surface should be
* updated accordingly. This should be called before `finish`
* @param taskId for which the leash should be updated
* @param finishTransaction the transaction to transfer to the task surface control after the
* leash is removed
* @param overlay the surface control for an overlay being shown above the pip (can be null)
*/
public void setFinishTaskTransaction(int taskId,
PictureInPictureSurfaceTransaction finishTransaction,
SurfaceControl overlay) {
UI_HELPER_EXECUTOR.execute(
() -> mController.setFinishTaskTransaction(taskId, finishTransaction, overlay));
}
/**
* Enables the input consumer to start intercepting touches in the app window.
*/
public void enableInputConsumer() {
UI_HELPER_EXECUTOR.submit(() -> {
mController.hideCurrentInputMethod();
mController.setInputConsumerEnabled(true);
});
}
/** @return wrapper controller. */
public RecentsAnimationControllerCompat getController() {
return mController;
}
}
| 3,148 |
350 | <filename>cocotb/config.py
#!/usr/bin/env python
###############################################################################
# Copyright (c) 2013 Potential Ventures Ltd
# Copyright (c) 2013 SolarFlare Communications Inc
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of Potential Ventures Ltd,
# SolarFlare Communications Inc nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL POTENTIAL VENTURES LTD BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
###############################################################################
"""
Module for querying the cocotb configuration
This module provides information in module global variables and through a
``main()`` function that is used in the cocotb-config script.
Global variables:
share_dir: str, path where the cocotb data is stored
makefiles_dir: str, path where the cocotb makefiles are installed
libs_dir: str, path where the cocotb interface libraries are located
"""
import argparse
import os
import sys
import textwrap
import cocotb
import cocotb._vendor.find_libpython as find_libpython
__all__ = ["share_dir", "makefiles_dir", "libs_dir"]
share_dir = os.path.join(os.path.dirname(cocotb.__file__), "share")
makefiles_dir = os.path.join(os.path.dirname(cocotb.__file__), "share", "makefiles")
libs_dir = os.path.join(os.path.dirname(cocotb.__file__), "libs")
# On Windows use mixed mode "c:/a/b/c" as this work in all cases
if os.name == "nt":
libs_dir = libs_dir.replace("\\", "/")
def help_vars_text():
if "dev" in cocotb.__version__:
doclink = "https://docs.cocotb.org/en/latest/building.html"
else:
doclink = f"https://docs.cocotb.org/en/v{cocotb.__version__}/building.html"
# NOTE: make sure to keep "helpmsg" aligned with documentation/source/building.rst
# Also keep it at 80 chars.
helpmsg = textwrap.dedent(
"""\
The following variables are environment variables:
Cocotb
------
TOPLEVEL Instance in the hierarchy to use as the DUT
RANDOM_SEED Random seed, to recreate a previous test stimulus
COCOTB_ANSI_OUTPUT Force cocotb to print or not print in color
COCOTB_REDUCED_LOG_FMT Display log lines shorter
COCOTB_ATTACH Pause time value in seconds before the simulator start
COCOTB_ENABLE_PROFILING Performance analysis of the Python portion of cocotb
COCOTB_LOG_LEVEL Default logging level (default INFO)
COCOTB_RESOLVE_X How to resolve X, Z, U, W on integer conversion
MEMCHECK HTTP port to use for debugging Python memory usage
LIBPYTHON_LOC Absolute path to libpython
Regression Manager
------------------
COCOTB_PDB_ON_EXCEPTION Drop into the Python debugger (pdb) on exception
MODULE Modules to search for test functions (comma-separated)
TESTCASE Test function(s) to run (comma-separated list)
COCOTB_RESULTS_FILE File name for xUnit XML tests results
COVERAGE Report Python coverage (also HDL for some simulators)
GPI
---
GPI_EXTRA Extra libraries to load at runtime (comma-separated)
Scheduler
---------
COCOTB_SCHEDULER_DEBUG Enable additional output of coroutine scheduler
For details, see {}"""
).format(doclink)
return helpmsg
def lib_name(interface: str, simulator: str) -> str:
"""
Return the name of interface library for given interface (VPI/VHPI/FLI) and simulator.
"""
interface_name = interface.lower()
supported_interfaces = ["vpi", "vhpi", "fli"]
if interface_name not in supported_interfaces:
raise ValueError(
"Wrong interface used. Supported: " + ", ".join(supported_interfaces)
)
simulator_name = simulator.lower()
supported_sims = [
"icarus",
"questa",
"modelsim",
"ius",
"xcelium",
"vcs",
"ghdl",
"riviera",
"activehdl",
"cvc",
]
if simulator not in supported_sims:
raise ValueError(
"Wrong simulator name. Supported: " + ", ".join(supported_sims)
)
if simulator_name in ["questa", "cvc"]:
library_name = "modelsim"
elif simulator_name == "xcelium":
library_name = "ius"
elif simulator_name in ["riviera", "activehdl"]:
library_name = "aldec"
else:
library_name = simulator_name
if library_name == "icarus":
lib_ext = ""
elif os.name == "nt":
lib_ext = ".dll"
else:
lib_ext = ".so"
# check if compiled with msvc
if os.path.isfile(os.path.join(libs_dir, "cocotb.dll")):
lib_prefix = ""
else:
lib_prefix = "lib"
return lib_prefix + "cocotb" + interface_name + "_" + library_name + lib_ext
def lib_name_path(interface, simulator):
"""
Return the absolute path of interface library for given interface (VPI/VHPI/FLI) and simulator
"""
library_name_path = os.path.join(libs_dir, lib_name(interface, simulator))
# On Windows use mixed mode "c:/a/b/c" as this work in all cases
if os.name == "nt":
return library_name_path.replace("\\", "/")
return library_name_path
def _findlibpython():
libpython_path = find_libpython.find_libpython()
if libpython_path is None:
sys.exit(1)
return libpython_path
class PrintAction(argparse.Action):
def __init__(self, option_strings, dest, text=None, **kwargs):
super().__init__(option_strings, dest, nargs=0, **kwargs)
self.text = text
def __call__(self, parser, namespace, values, option_string=None):
print(self.text)
parser.exit()
class PrintFuncAction(argparse.Action):
def __init__(self, option_strings, dest, function=None, **kwargs):
super().__init__(option_strings, dest, **kwargs)
self.function = function
def __call__(self, parser, args, values, option_string=None):
try:
print(self.function(*values))
except ValueError as e:
parser.error(e)
parser.exit()
def get_parser():
prefix_dir = os.path.dirname(os.path.dirname(cocotb.__file__))
version = cocotb.__version__
python_bin = sys.executable
parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument(
"--prefix",
help="echo the package-prefix of cocotb",
action=PrintAction,
text=prefix_dir,
)
parser.add_argument(
"--share",
help="echo the package-share of cocotb",
action=PrintAction,
text=share_dir,
)
parser.add_argument(
"--makefiles",
help="echo the package-makefiles of cocotb",
action=PrintAction,
text=makefiles_dir,
)
parser.add_argument(
"--python-bin",
help="echo the path to the Python binary cocotb is installed for",
action=PrintAction,
text=python_bin,
)
parser.add_argument(
"--help-vars",
help="show help about supported variables",
action=PrintAction,
text=help_vars_text(),
)
parser.add_argument(
"--libpython",
help="Print the absolute path to the libpython associated with the current Python installation",
nargs=0,
metavar=(),
action=PrintFuncAction,
function=_findlibpython,
)
parser.add_argument(
"--lib-dir",
help="Print the absolute path to the interface libraries location",
action=PrintAction,
text=libs_dir,
)
parser.add_argument(
"--lib-name",
help="Print the name of interface library for given interface (VPI/VHPI/FLI) and simulator",
nargs=2,
metavar=("INTERFACE", "SIMULATOR"),
action=PrintFuncAction,
function=lib_name,
)
parser.add_argument(
"--lib-name-path",
help="Print the absolute path of interface library for given interface (VPI/VHPI/FLI) and simulator",
nargs=2,
metavar=("INTERFACE", "SIMULATOR"),
action=PrintFuncAction,
function=lib_name_path,
)
parser.add_argument(
"-v",
"--version",
help="echo the version of cocotb",
action=PrintAction,
text=version,
)
return parser
def main():
parser = get_parser()
if len(sys.argv) == 1:
parser.print_help(sys.stderr)
sys.exit(1)
parser.parse_args()
if __name__ == "__main__":
main()
| 3,893 |
608 | // This file is part of VSTGUI. It is subject to the license terms
// in the LICENSE file found in the top-level directory of this
// distribution and at http://github.com/steinbergmedia/vstgui/LICENSE
#pragma once
#include "../../vstguibase.h"
#if WINDOWS
#include "../iplatformresourceinputstream.h"
#include "../../optional.h"
#include <combaseapi.h>
//------------------------------------------------------------------------
namespace VSTGUI {
//------------------------------------------------------------------------
class ResourceStream final : public IStream
{
public:
ResourceStream ();
virtual ~ResourceStream () noexcept = default;
bool open (const CResourceDescription& resourceDesc, const char* type);
HRESULT STDMETHODCALLTYPE Read (void *pv, ULONG cb, ULONG *pcbRead) override;
HRESULT STDMETHODCALLTYPE Write (const void *pv, ULONG cb, ULONG *pcbWritten) override;
HRESULT STDMETHODCALLTYPE Seek (LARGE_INTEGER dlibMove, DWORD dwOrigin, ULARGE_INTEGER *plibNewPosition) override;
HRESULT STDMETHODCALLTYPE SetSize (ULARGE_INTEGER libNewSize) override;
HRESULT STDMETHODCALLTYPE CopyTo (IStream *pstm, ULARGE_INTEGER cb, ULARGE_INTEGER *pcbRead, ULARGE_INTEGER *pcbWritten) override;
HRESULT STDMETHODCALLTYPE Commit (DWORD grfCommitFlags) override;
HRESULT STDMETHODCALLTYPE Revert () override;
HRESULT STDMETHODCALLTYPE LockRegion (ULARGE_INTEGER libOffset, ULARGE_INTEGER cb, DWORD dwLockType) override;
HRESULT STDMETHODCALLTYPE UnlockRegion (ULARGE_INTEGER libOffset, ULARGE_INTEGER cb, DWORD dwLockType) override;
HRESULT STDMETHODCALLTYPE Stat (STATSTG *pstatstg, DWORD grfStatFlag) override;
HRESULT STDMETHODCALLTYPE Clone (IStream **ppstm) override;
HRESULT STDMETHODCALLTYPE QueryInterface(REFIID iid, void ** ppvObject) override;
ULONG STDMETHODCALLTYPE AddRef() override;
ULONG STDMETHODCALLTYPE Release() override;
protected:
HGLOBAL resData;
uint32_t streamPos;
uint32_t resSize;
LONG _refcount;
};
//-----------------------------------------------------------------------------
class WinResourceInputStream final : public IPlatformResourceInputStream
{
public:
using ResourceStreamPtr = std::unique_ptr<ResourceStream>;
static PlatformResourceInputStreamPtr create (const CResourceDescription& desc);
private:
WinResourceInputStream (ResourceStreamPtr&& stream);
uint32_t readRaw (void* buffer, uint32_t size) override;
int64_t seek (int64_t pos, SeekMode mode) override;
int64_t tell () override;
ResourceStreamPtr stream;
};
//------------------------------------------------------------------------
} // VSTGUI
#endif // WINDOWS
| 832 |
1,144 | <reponame>dram/metasfresh
package de.metas.procurement.base.model.interceptor;
import org.adempiere.ad.callout.annotations.Callout;
import org.adempiere.ad.callout.annotations.CalloutMethod;
import org.adempiere.ad.callout.api.ICalloutField;
import org.adempiere.ad.modelvalidator.annotations.Interceptor;
import org.adempiere.ad.modelvalidator.annotations.ModelChange;
import org.adempiere.exceptions.AdempiereException;
import org.compiere.model.ModelValidator;
import de.metas.procurement.base.IPMMContractsDAO;
import de.metas.procurement.base.IPMMProductBL;
import de.metas.procurement.base.IWebuiPush;
import de.metas.procurement.base.model.I_PMM_Product;
import de.metas.util.Services;
/*
* #%L
* de.metas.procurement.base
* %%
* Copyright (C) 2016 metas GmbH
* %%
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as
* published by the Free Software Foundation, either version 2 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public
* License along with this program. If not, see
* <http://www.gnu.org/licenses/gpl-2.0.html>.
* #L%
*/
@Callout(I_PMM_Product.class)
@Interceptor(I_PMM_Product.class)
public class PMM_Product
{
public static final PMM_Product instance = new PMM_Product();
private static final String MSG_ProductChangeNotAllowedForRunningContracts = "de.metas.procurement.ProductChangeNotAllowedForRunningContracts";
private PMM_Product()
{
}
@ModelChange(timings = { ModelValidator.TYPE_BEFORE_CHANGE }, ifColumnsChanged = {
I_PMM_Product.COLUMNNAME_M_Product_ID,
I_PMM_Product.COLUMNNAME_M_HU_PI_Item_Product_ID,
I_PMM_Product.COLUMNNAME_M_AttributeSetInstance_ID
})
public void preventChangesIfContractActive(final I_PMM_Product pmmProduct)
{
if (Services.get(IPMMContractsDAO.class).hasRunningContracts(pmmProduct))
{
throw new AdempiereException("@" + MSG_ProductChangeNotAllowedForRunningContracts + "@");
}
}
// NOTE: Always allow deactivating an PMM_Product even if it has a running contract.
// See: https://github.com/metasfresh/metasfresh/issues/1817
// @ModelChange(timings = { ModelValidator.TYPE_BEFORE_CHANGE }, ifColumnsChanged = { I_PMM_Product.COLUMNNAME_IsActive })
// public void preventDeactivateIfContractActive(final I_PMM_Product pmmProduct)
// {
// if (!pmmProduct.isActive() && Services.get(IPMMContractsDAO.class).hasRunningContracts(pmmProduct))
// {
// throw new AdempiereException("@" + MSG_ProductChangeNotAllowedForRunningContracts + "@");
// }
// }
@ModelChange(timings = { ModelValidator.TYPE_BEFORE_CHANGE, ModelValidator.TYPE_BEFORE_NEW }, ifColumnsChanged = {
I_PMM_Product.COLUMNNAME_M_Product_ID,
I_PMM_Product.COLUMNNAME_M_AttributeSetInstance_ID,
I_PMM_Product.COLUMNNAME_M_HU_PI_Item_Product_ID
})
public void updateReadOnlyFields(final I_PMM_Product pmmProduct)
{
Services.get(IPMMProductBL.class).update(pmmProduct);
}
@CalloutMethod(columnNames = {
I_PMM_Product.COLUMNNAME_M_Product_ID,
I_PMM_Product.COLUMNNAME_M_AttributeSetInstance_ID,
I_PMM_Product.COLUMNNAME_M_HU_PI_Item_Product_ID
})
public void updateReadOnlyFields(final I_PMM_Product pmmProduct, final ICalloutField unused)
{
Services.get(IPMMProductBL.class).update(pmmProduct);
}
@ModelChange(timings = { ModelValidator.TYPE_AFTER_CHANGE, ModelValidator.TYPE_AFTER_NEW }, //
ifColumnsChanged = {
I_PMM_Product.COLUMNNAME_IsActive,
I_PMM_Product.COLUMNNAME_M_Product_ID,
I_PMM_Product.COLUMNNAME_M_AttributeSetInstance_ID,
I_PMM_Product.COLUMNNAME_M_HU_PI_Item_Product_ID,
I_PMM_Product.COLUMNNAME_M_Warehouse_ID, } //
, afterCommit = true)
public void pushToWebUI(final I_PMM_Product pmmProduct)
{
Services.get(IWebuiPush.class).pushProduct(pmmProduct);
}
}
| 1,499 |
316 | // Copyright (c) 2015-2016, tandasat. All rights reserved.
// Copyright (c) 2016-2017, KelvinChan. All rights reserved.
// Use of this source code is governed by a MIT-style license that can be
// found in the LICENSE file.
/// @file
/// Implements EPT functions.
#include "ept.h"
#include "asm.h"
#include "common.h"
#include "log.h"
#include "util.h"
#include "performance.h"
extern "C" {
////////////////////////////////////////////////////////////////////////////////
//
// macro utilities
//
////////////////////////////////////////////////////////////////////////////////
//
// types
//
#include <pshpack1.h>
struct MtrrData {
bool enabled; //<! Whether this entry is valid
bool fixedMtrr; //<! Whether this entry manages a fixed range MTRR
UCHAR type; //<! Memory Type (such as WB, UC)
bool reserverd1; //<! Padding
ULONG reserverd2; //<! Padding
ULONG64 range_base; //<! A base address of a range managed by this entry
ULONG64 range_end; //<! An end address of a range managed by this entry
};
#include <poppack.h>
////////////////////////////////////////////////////////////////////////////////
//
// constants and macros
//
// Followings are how 64bits of a physical address is used to locate EPT
// entries:
//
// EPT Page map level 4 selector 9 bits
// EPT Page directory pointer selector 9 bits
// EPT Page directory selector 9 bits
// EPT Page table selector 9 bits
// EPT Byte within page 12 bits
// Get the highest 25 bits
static const auto kEptpPxiShift = 39ull;
// Get the highest 34 bits
static const auto kEptpPpiShift = 30ull;
// Get the highest 43 bits
static const auto kEptpPdiShift = 21ull;
// Get the highest 52 bits
static const auto kEptpPtiShift = 12ull;
// Use 9 bits; 0b0000_0000_0000_0000_0000_0000_0001_1111_1111
static const auto kEptpPtxMask = 0x1ffull;
// Architecture defined number of variable range MTRRs
static const auto kEptpNumOfMaxVariableRangeMtrrs = 255;
// Architecture defined number of fixed range MTRRs (1 for 64k, 2 for 16k, 8
// for 4k)
static const auto kEptpNumOfFixedRangeMtrrs = 1 + 2 + 8;
// A size of array to store all possible MTRRs
static const auto kEptpMtrrEntriesSize =
kEptpNumOfMaxVariableRangeMtrrs + kEptpNumOfFixedRangeMtrrs;
// How many EPT entries are preallocated. When the number exceeds it, the
// hypervisor issues a bugcheck.
static const auto kEptpNumberOfPreallocatedEntries = 50;
static MtrrData g_eptp_mtrr_entries[kEptpMtrrEntriesSize];
static UCHAR g_eptp_mtrr_default_type;
static_assert(sizeof(MtrrData) == 24, "Size check");
////////////////////////////////////////////////////////////////////////////////
//
// prototypes
//
struct GuestContext;
typedef void(*pEptWalkerCallback)(EptCommonEntry* entry, void* Context);
//----------------------------------------------------------------------------------------------------------------//
NTSTATUS VmmpSetLastFaultAddr(
_In_ GuestContext *guest_context,
_In_ ULONG_PTR LastFaultAddr
);
_When_(ept_data == nullptr,
_IRQL_requires_max_(DISPATCH_LEVEL)) static EptCommonEntry
*EptpConstructTables(_In_ EptCommonEntry *table, _In_ ULONG table_level,
_In_ ULONG64 physical_address,
_In_opt_ EptData *ept_data);
static void EptpDestructTables(_In_ EptCommonEntry *table,
_In_ ULONG table_level);
_Must_inspect_result_ __drv_allocatesMem(Mem)
_When_(ept_data == nullptr,
_IRQL_requires_max_(DISPATCH_LEVEL)) static EptCommonEntry
*EptpAllocateEptEntry(_In_opt_ EptData *ept_data);
static EptCommonEntry *EptpAllocateEptEntryFromPreAllocated(
_In_ EptData *ept_data);
_Must_inspect_result_ __drv_allocatesMem(Mem) _IRQL_requires_max_(
DISPATCH_LEVEL) static EptCommonEntry *EptpAllocateEptEntryFromPool();
static void EptpInitTableEntry(_In_ EptCommonEntry *Entry,
_In_ ULONG table_level,
_In_ ULONG64 physical_address);
static ULONG64 EptpAddressToPxeIndex(_In_ ULONG64 physical_address);
static ULONG64 EptpAddressToPpeIndex(_In_ ULONG64 physical_address);
static ULONG64 EptpAddressToPdeIndex(_In_ ULONG64 physical_address);
static ULONG64 EptpAddressToPteIndex(_In_ ULONG64 physical_address);
static bool EptpIsDeviceMemory(_In_ ULONG64 physical_address);
static EptCommonEntry *EptpGetEptPtEntry(_In_ EptCommonEntry *table,
_In_ ULONG table_level,
_In_ ULONG64 physical_address);
static void EptpFreeUnusedPreAllocatedEntries(
_Pre_notnull_ __drv_freesMem(Mem) EptCommonEntry **preallocated_entries,
_In_ long used_count);
_Use_decl_annotations_ void EptHandleEptViolationEx(
_In_ EptData *ept_data,
_In_ ULONG64 PhysAddr,
_In_ bool is_range_of_ept12);
#if defined(ALLOC_PRAGMA)
#pragma alloc_text(PAGE, EptIsEptAvailable)
#pragma alloc_text(PAGE, EptInitialization)
#pragma alloc_text(PAGE, EptInitializeMtrrEntries)
#endif
////////////////////////////////////////////////////////////////////////////////
//
// variables
//
////////////////////////////////////////////////////////////////////////////////
//
// implementations
//
_Use_decl_annotations_ static bool EptpWalker(
EptCommonEntry *table,
ULONG table_level,
pEptWalkerCallback callback,
void* context)
{
bool ret = false;
for (auto i = 0ul; i < 512; ++i)
{
const auto entry = table[i];
if (callback) {
callback(&table[i], context);
}
if (table_level == 1) {
continue;
}
if (entry.fields.physial_address)
{
const auto sub_table = reinterpret_cast<EptCommonEntry *>(
UtilVaFromPfn(entry.fields.physial_address));
switch (table_level) {
case 4: // table == PML4, sub_table == PDPT
case 3: // table == PDPT, sub_table == PDT
case 2: // table == PDT, sub_table == PT
ret = EptpWalker(sub_table, table_level - 1, callback, context);
break;
case 1:
break;
default:
HYPERPLATFORM_COMMON_DBG_BREAK();
break;
}
}
}
return ret;
}
// Reads and stores all MTRRs to set a correct memory type for EPT
_Use_decl_annotations_ void EptInitializeMtrrEntries() {
PAGED_CODE();
int index = 0;
MtrrData *mtrr_entries = g_eptp_mtrr_entries;
// Get and store the default memory type
Ia32MtrrDefaultTypeMsr default_type = { UtilReadMsr64(Msr::kIa32MtrrDefType) };
g_eptp_mtrr_default_type = default_type.fields.default_mtemory_type;
// Read MTRR capability
Ia32MtrrCapabilitiesMsr mtrr_capabilities = {
UtilReadMsr64(Msr::kIa32MtrrCap) };
HYPERPLATFORM_LOG_DEBUG(
"MTRR Default=%lld, VariableCount=%lld, FixedSupported=%lld, FixedEnabled=%lld",
default_type.fields.default_mtemory_type,
mtrr_capabilities.fields.variable_range_count,
mtrr_capabilities.fields.fixed_range_supported,
default_type.fields.fixed_mtrrs_enabled);
// Read fixed range MTRRs if supported
if (mtrr_capabilities.fields.fixed_range_supported &&
default_type.fields.fixed_mtrrs_enabled) {
static const auto k64kBase = 0x0;
static const auto k64kManagedSize = 0x10000;
static const auto k16kBase = 0x80000;
static const auto k16kManagedSize = 0x4000;
static const auto k4kBase = 0xC0000;
static const auto k4kManagedSize = 0x1000;
// The kIa32MtrrFix64k00000 manages 8 ranges of memory. The first range
// starts at 0x0, and each range manages a 64k (0x10000) range. For example,
// entry[0]: 0x0 : 0x10000 - 1
// entry[1]: 0x10000 : 0x20000 - 1
// ...
// entry[7]: 0x70000 : 0x80000 - 1
ULONG64 offset = 0;
Ia32MtrrFixedRangeMsr fixed_range = {
UtilReadMsr64(Msr::kIa32MtrrFix64k00000) };
for (auto memory_type : fixed_range.fields.types) {
// Each entry manages 64k (0x10000) length.
ULONG64 base = k64kBase + offset;
offset += k64kManagedSize;
// Saves the MTRR
mtrr_entries[index].enabled = true;
mtrr_entries[index].fixedMtrr = true;
mtrr_entries[index].type = memory_type;
mtrr_entries[index].range_base = base;
mtrr_entries[index].range_end = base + k64kManagedSize - 1;
index++;
}
NT_ASSERT(k64kBase + offset == k16kBase);
// kIa32MtrrFix16k80000 manages 8 ranges of memory. The first range starts
// at 0x80000, and each range manages a 16k (0x4000) range. For example,
// entry[0]: 0x80000 : 0x84000 - 1
// entry[1]: 0x88000 : 0x8C000 - 1
// ...
// entry[7]: 0x9C000 : 0xA0000 - 1
// Also, subsequent memory ranges are managed by other MSR,
// kIa32MtrrFix16kA0000, which manages 8 ranges of memory starting at
// 0xA0000 in the same fashion. For example,
// entry[0]: 0xA0000 : 0xA4000 - 1
// entry[1]: 0xA8000 : 0xAC000 - 1
// ...
// entry[7]: 0xBC000 : 0xC0000 - 1
offset = 0;
for (auto msr = static_cast<ULONG>(Msr::kIa32MtrrFix16k80000);
msr <= static_cast<ULONG>(Msr::kIa32MtrrFix16kA0000); msr++) {
fixed_range.all = UtilReadMsr64(static_cast<Msr>(msr));
for (auto memory_type : fixed_range.fields.types) {
// Each entry manages 16k (0x4000) length.
ULONG64 base = k16kBase + offset;
offset += k16kManagedSize;
// Saves the MTRR
mtrr_entries[index].enabled = true;
mtrr_entries[index].fixedMtrr = true;
mtrr_entries[index].type = memory_type;
mtrr_entries[index].range_base = base;
mtrr_entries[index].range_end = base + k16kManagedSize - 1;
index++;
}
}
NT_ASSERT(k16kBase + offset == k4kBase);
// kIa32MtrrFix4kC0000 manages 8 ranges of memory. The first range starts
// at 0xC0000, and each range manages a 4k (0x1000) range. For example,
// entry[0]: 0xC0000 : 0xC1000 - 1
// entry[1]: 0xC1000 : 0xC2000 - 1
// ...
// entry[7]: 0xC7000 : 0xC8000 - 1
// Also, subsequent memory ranges are managed by other MSRs such as
// kIa32MtrrFix4kC8000, kIa32MtrrFix4kD0000, and kIa32MtrrFix4kF8000. Each
// MSR manages 8 ranges of memory in the same fashion up to 0x100000.
offset = 0;
for (auto msr = static_cast<ULONG>(Msr::kIa32MtrrFix4kC0000);
msr <= static_cast<ULONG>(Msr::kIa32MtrrFix4kF8000); msr++) {
fixed_range.all = UtilReadMsr64(static_cast<Msr>(msr));
for (auto memory_type : fixed_range.fields.types) {
// Each entry manages 4k (0x1000) length.
ULONG64 base = k4kBase + offset;
offset += k4kManagedSize;
// Saves the MTRR
mtrr_entries[index].enabled = true;
mtrr_entries[index].fixedMtrr = true;
mtrr_entries[index].type = memory_type;
mtrr_entries[index].range_base = base;
mtrr_entries[index].range_end = base + k4kManagedSize - 1;
index++;
}
}
NT_ASSERT(k4kBase + offset == 0x100000);
}
// Read all variable range MTRRs
for (auto i = 0; i < mtrr_capabilities.fields.variable_range_count; i++) {
// Read MTRR mask and check if it is in use
const auto phy_mask = static_cast<ULONG>(Msr::kIa32MtrrPhysMaskN) + i * 2;
Ia32MtrrPhysMaskMsr mtrr_mask = { UtilReadMsr64(static_cast<Msr>(phy_mask)) };
if (!mtrr_mask.fields.valid) {
continue;
}
// Get a length this MTRR manages
ULONG length;
BitScanForward64(&length, mtrr_mask.fields.phys_mask * PAGE_SIZE);
// Read MTRR base and calculate a range this MTRR manages
const auto phy_base = static_cast<ULONG>(Msr::kIa32MtrrPhysBaseN) + i * 2;
Ia32MtrrPhysBaseMsr mtrr_base = { UtilReadMsr64(static_cast<Msr>(phy_base)) };
ULONG64 base = mtrr_base.fields.phys_base * PAGE_SIZE;
ULONG64 end = base + (1ull << length) - 1;
// Save it
mtrr_entries[index].enabled = true;
mtrr_entries[index].fixedMtrr = false;
mtrr_entries[index].type = mtrr_base.fields.type;
mtrr_entries[index].range_base = base;
mtrr_entries[index].range_end = end;
index++;
}
}
// Returns a memory type based on MTRRs
_Use_decl_annotations_ static memory_type EptpGetMemoryType(
ULONG64 physical_address) {
// Indicate that MTRR is not defined (as a default)
UCHAR result_type = MAXUCHAR;
// Looks for MTRR that includes the specified physical_address
for (const auto mtrr_entry : g_eptp_mtrr_entries) {
if (!mtrr_entry.enabled) {
// Reached out the end of stored MTRRs
break;
}
if (!UtilIsInBounds(physical_address, mtrr_entry.range_base,
mtrr_entry.range_end)) {
// This MTRR does not describe a memory type of the physical_address
continue;
}
// See: MTRR Precedences
if (mtrr_entry.fixedMtrr) {
// If a fixed MTRR describes a memory type, it is priority
result_type = mtrr_entry.type;
break;
}
if (mtrr_entry.type == static_cast<UCHAR>(memory_type::kUncacheable)) {
// If a memory type is UC, it is priority. Do not continue to search as
// UC has the highest priority
result_type = mtrr_entry.type;
break;
}
if (result_type == static_cast<UCHAR>(memory_type::kWriteThrough) ||
mtrr_entry.type == static_cast<UCHAR>(memory_type::kWriteThrough)) {
if (result_type == static_cast<UCHAR>(memory_type::kWriteBack)) {
// If two or more MTRRs describes an over-wrapped memory region, and
// one is WT and the other one is WB, use WT. However, look for other
// MTRRs, as the other MTRR specifies the memory address as UC, which is
// priority.
result_type = static_cast<UCHAR>(memory_type::kWriteThrough);
continue;
}
}
// Otherwise, processor behavior is undefined. We just use the last MTRR
// describes the memory address.
result_type = mtrr_entry.type;
}
// Use the default MTRR if no MTRR entry is found
if (result_type == MAXUCHAR) {
result_type = g_eptp_mtrr_default_type;
}
return static_cast<memory_type>(result_type);
}
// Checks if the system supports EPT technology sufficient enough
_Use_decl_annotations_ bool EptIsEptAvailable() {
PAGED_CODE();
// Check the followings:
// - page walk length is 4 steps
// - extended page tables can be laid out in write-back memory
// - INVEPT instruction with all possible types is supported
// - INVVPID instruction with all possible types is supported
Ia32VmxEptVpidCapMsr capability = {UtilReadMsr64(Msr::kIa32VmxEptVpidCap)};
if (!capability.fields.support_page_walk_length4 ||
!capability.fields.support_write_back_memory_type ||
!capability.fields.support_invept ||
!capability.fields.support_single_context_invept ||
!capability.fields.support_all_context_invept ||
!capability.fields.support_invvpid ||
!capability.fields.support_individual_address_invvpid ||
!capability.fields.support_single_context_invvpid ||
!capability.fields.support_all_context_invvpid ||
!capability.fields.support_single_context_retaining_globals_invvpid) {
return false;
}
return true;
}
// Returns an EPT pointer from ept_data
_Use_decl_annotations_ ULONG64 EptGetEptPointer(EptData *ept_data) {
return ept_data->ept_pointer->all;
}
// Builds EPT, allocates pre-allocated enties, initializes and returns EptData
_Use_decl_annotations_ EptData *EptInitialization() {
PAGED_CODE();
static const auto kEptPageWalkLevel = 4ul;
// Allocate ept_data
const auto ept_data = reinterpret_cast<EptData *>(ExAllocatePoolWithTag(
NonPagedPool, sizeof(EptData), kHyperPlatformCommonPoolTag));
if (!ept_data) {
return nullptr;
}
RtlZeroMemory(ept_data, sizeof(EptData));
// Allocate EptPointer
const auto ept_poiner = reinterpret_cast<EptPointer *>(ExAllocatePoolWithTag(
NonPagedPool, PAGE_SIZE, kHyperPlatformCommonPoolTag));
if (!ept_poiner) {
ExFreePoolWithTag(ept_data, kHyperPlatformCommonPoolTag);
return nullptr;
}
RtlZeroMemory(ept_poiner, PAGE_SIZE);
// Allocate EPT_PML4 and initialize EptPointer
const auto ept_pml4 =
reinterpret_cast<EptCommonEntry *>(ExAllocatePoolWithTag(
NonPagedPool, PAGE_SIZE, kHyperPlatformCommonPoolTag));
if (!ept_pml4) {
ExFreePoolWithTag(ept_poiner, kHyperPlatformCommonPoolTag);
ExFreePoolWithTag(ept_data, kHyperPlatformCommonPoolTag);
return nullptr;
}
RtlZeroMemory(ept_pml4, PAGE_SIZE);
ept_poiner->fields.memory_type = static_cast<ULONG64>(EptpGetMemoryType(UtilPaFromVa(ept_pml4)));
ept_poiner->fields.page_walk_length = kEptPageWalkLevel - 1;
ept_poiner->fields.pml4_address = UtilPfnFromPa(UtilPaFromVa(ept_pml4));
// Initialize all EPT entries for all physical memory pages
const auto pm_ranges = UtilGetPhysicalMemoryRanges();
for (auto run_index = 0ul; run_index < pm_ranges->number_of_runs;
++run_index) {
const auto run = &pm_ranges->run[run_index];
const auto base_addr = run->base_page * PAGE_SIZE;
for (auto page_index = 0ull; page_index < run->page_count; ++page_index) {
const auto indexed_addr = base_addr + page_index * PAGE_SIZE;
const auto ept_pt_entry =
EptpConstructTables(ept_pml4, 4, indexed_addr, nullptr);
if (!ept_pt_entry) {
EptpDestructTables(ept_pml4, 4);
ExFreePoolWithTag(ept_poiner, kHyperPlatformCommonPoolTag);
ExFreePoolWithTag(ept_data, kHyperPlatformCommonPoolTag);
return nullptr;
}
}
}
// Initialize an EPT entry for APIC_BASE. It is required to allocated it now
// for some reasons, or else, system hangs.
const Ia32ApicBaseMsr apic_msr = {UtilReadMsr64(Msr::kIa32ApicBase)};
if (!EptpConstructTables(ept_pml4, 4, apic_msr.fields.apic_base * PAGE_SIZE,
nullptr)) {
EptpDestructTables(ept_pml4, 4);
ExFreePoolWithTag(ept_poiner, kHyperPlatformCommonPoolTag);
ExFreePoolWithTag(ept_data, kHyperPlatformCommonPoolTag);
return nullptr;
}
// Allocate preallocated_entries
const auto preallocated_entries_size =
sizeof(EptCommonEntry *) * kEptpNumberOfPreallocatedEntries;
const auto preallocated_entries = reinterpret_cast<EptCommonEntry **>(
ExAllocatePoolWithTag(NonPagedPool, preallocated_entries_size,
kHyperPlatformCommonPoolTag));
if (!preallocated_entries) {
EptpDestructTables(ept_pml4, 4);
ExFreePoolWithTag(ept_poiner, kHyperPlatformCommonPoolTag);
ExFreePoolWithTag(ept_data, kHyperPlatformCommonPoolTag);
return nullptr;
}
RtlZeroMemory(preallocated_entries, preallocated_entries_size);
// And fill preallocated_entries with newly created entries
for (auto i = 0ul; i < kEptpNumberOfPreallocatedEntries; ++i) {
const auto ept_entry = EptpAllocateEptEntry(nullptr);
if (!ept_entry) {
EptpFreeUnusedPreAllocatedEntries(preallocated_entries, 0);
EptpDestructTables(ept_pml4, 4);
ExFreePoolWithTag(ept_poiner, kHyperPlatformCommonPoolTag);
ExFreePoolWithTag(ept_data, kHyperPlatformCommonPoolTag);
return nullptr;
}
preallocated_entries[i] = ept_entry;
}
// Initialization completed
ept_data->ept_pointer = ept_poiner;
ept_data->ept_pml4 = ept_pml4;
ept_data->preallocated_entries = preallocated_entries;
ept_data->preallocated_entries_count = 0;
return ept_data;
}
// Allocate and initialize all EPT entries associated with the physical_address
_Use_decl_annotations_ static EptCommonEntry *EptpConstructTables(
EptCommonEntry *table, ULONG table_level, ULONG64 physical_address, EptData* ept_data)
{
return EptpConstructTablesEx(table, table_level, physical_address, nullptr ,nullptr);
}
// Allocate and initialize all EPT entries associated with the physical_address
_Use_decl_annotations_ EptCommonEntry *EptpConstructTablesEx(
EptCommonEntry *table, ULONG table_level, ULONG64 physical_address,
EptData *ept_data, EptCommonEntry* reserved) {
switch (table_level) {
case 4: {
// table == PML4 (512 GB)
const auto pxe_index = EptpAddressToPxeIndex(physical_address);
const auto ept_pml4_entry = &table[pxe_index];
if (!ept_pml4_entry->all) {
if (!reserved)
{
const auto ept_pdpt = EptpAllocateEptEntry(ept_data);
if (!ept_pdpt) {
return nullptr;
}
EptpInitTableEntry(ept_pml4_entry, table_level, UtilPaFromVa(ept_pdpt));
}
else
{
const auto ept01_pml4_entry = &reserved[pxe_index];
ept_pml4_entry->all = ept01_pml4_entry->all;
reserved = reinterpret_cast<EptCommonEntry *>(UtilVaFromPfn(ept01_pml4_entry->fields.physial_address));
}
}
return EptpConstructTablesEx(
reinterpret_cast<EptCommonEntry *>(
UtilVaFromPfn(ept_pml4_entry->fields.physial_address)),
table_level - 1, physical_address, ept_data, reserved);
}
case 3: {
// table == PDPT (1 GB)
const auto ppe_index = EptpAddressToPpeIndex(physical_address);
const auto ept_pdpt_entry = &table[ppe_index];
if (!ept_pdpt_entry->all) {
if (!reserved)
{
const auto ept_pdt = EptpAllocateEptEntry(ept_data);
if (!ept_pdt) {
return nullptr;
}
EptpInitTableEntry(ept_pdpt_entry, table_level, UtilPaFromVa(ept_pdt));
}
else
{
const auto ept01_pdpt_entry = &reserved[ppe_index];
ept_pdpt_entry->all = ept01_pdpt_entry->all;
reserved = reinterpret_cast<EptCommonEntry *>(UtilVaFromPfn(ept01_pdpt_entry->fields.physial_address));
}
}
return EptpConstructTablesEx(
reinterpret_cast<EptCommonEntry *>(
UtilVaFromPfn(ept_pdpt_entry->fields.physial_address)),
table_level - 1, physical_address, ept_data, reserved);
}
case 2: {
// table == PDT (2 MB)
const auto pde_index = EptpAddressToPdeIndex(physical_address);
const auto ept_pdt_entry = &table[pde_index];
if (!ept_pdt_entry->all)
{
if (!reserved)
{
const auto ept_pt = EptpAllocateEptEntry(ept_data);
if (!ept_pt) {
return nullptr;
}
EptpInitTableEntry(ept_pdt_entry, table_level, UtilPaFromVa(ept_pt));
}
else
{
const auto ept01_pdt_entry = &reserved[pde_index];
ept_pdt_entry->all = ept01_pdt_entry->all;
reserved = reinterpret_cast<EptCommonEntry *>(UtilVaFromPfn(ept01_pdt_entry->fields.physial_address));
}
}
return EptpConstructTablesEx(
reinterpret_cast<EptCommonEntry *>(
UtilVaFromPfn(ept_pdt_entry->fields.physial_address)),
table_level - 1, physical_address, ept_data, reserved);
}
case 1: {
// table == PT (4 KB)
const auto pte_index = EptpAddressToPteIndex(physical_address);
const auto ept_pt_entry = &table[pte_index];
// NT_ASSERT(!ept_pt_entry->all);
if (!ept_pt_entry->all)
{
if (!reserved)
{
EptpInitTableEntry(ept_pt_entry, table_level, physical_address);
}
else
{
const auto ept01_pt_entry = &reserved[pte_index];
ept_pt_entry->all = ept01_pt_entry->all;
}
}
return ept_pt_entry;
}
default:
HYPERPLATFORM_COMMON_DBG_BREAK();
return nullptr;
}
}
// Return a new EPT entry either by creating new one or from pre-allocated ones
_Use_decl_annotations_ static EptCommonEntry *EptpAllocateEptEntry(
EptData *ept_data) {
if (ept_data) {
return EptpAllocateEptEntryFromPreAllocated(ept_data);
} else {
return EptpAllocateEptEntryFromPool();
}
}
// Return a new EPT entry from pre-allocated ones.
_Use_decl_annotations_ static EptCommonEntry *
EptpAllocateEptEntryFromPreAllocated(EptData *ept_data) {
const auto count =
InterlockedIncrement(&ept_data->preallocated_entries_count);
if (count > kEptpNumberOfPreallocatedEntries) {
HYPERPLATFORM_COMMON_BUG_CHECK(
HyperPlatformBugCheck::kExhaustedPreallocatedEntries, count,
reinterpret_cast<ULONG_PTR>(ept_data), 0);
}
return ept_data->preallocated_entries[count - 1];
}
// Return a new EPT entry either by creating new one
_Use_decl_annotations_ static EptCommonEntry *EptpAllocateEptEntryFromPool() {
static const auto kAllocSize = 512 * sizeof(EptCommonEntry);
static_assert(kAllocSize == PAGE_SIZE, "Size check");
const auto entry = reinterpret_cast<EptCommonEntry *>(ExAllocatePoolWithTag(
NonPagedPool, kAllocSize, kHyperPlatformCommonPoolTag));
if (!entry) {
return nullptr;
}
RtlZeroMemory(entry, kAllocSize);
return entry;
}
// Initialize an EPT entry with a "pass through" attribute
_Use_decl_annotations_ static void EptpInitTableEntry(
EptCommonEntry *entry, ULONG table_level, ULONG64 physical_address) {
entry->fields.read_access = true;
entry->fields.write_access = true;
entry->fields.execute_access = true;
entry->fields.physial_address = UtilPfnFromPa(physical_address);
if (table_level == 1) {
entry->fields.memory_type = static_cast<ULONG64>(memory_type::kWriteBack);
}
}
// Return an address of PXE
_Use_decl_annotations_ static ULONG64 EptpAddressToPxeIndex(
ULONG64 physical_address) {
const auto index = (physical_address >> kEptpPxiShift) & kEptpPtxMask;
return index;
}
// Return an address of PPE
_Use_decl_annotations_ static ULONG64 EptpAddressToPpeIndex(
ULONG64 physical_address) {
const auto index = (physical_address >> kEptpPpiShift) & kEptpPtxMask;
return index;
}
// Return an address of PDE
_Use_decl_annotations_ static ULONG64 EptpAddressToPdeIndex(
ULONG64 physical_address) {
const auto index = (physical_address >> kEptpPdiShift) & kEptpPtxMask;
return index;
}
// Set MTF on the current processor
_Use_decl_annotations_ void ShpSetMonitorTrapFlag(bool enable) {
VmxProcessorBasedControls vm_procctl = {
static_cast<unsigned int>(UtilVmRead(VmcsField::kCpuBasedVmExecControl)) };
vm_procctl.fields.monitor_trap_flag = enable;
UtilVmWrite(VmcsField::kCpuBasedVmExecControl, vm_procctl.all);
}
// Return an address of PTE
_Use_decl_annotations_ static ULONG64 EptpAddressToPteIndex(
ULONG64 physical_address) {
const auto index = (physical_address >> kEptpPtiShift) & kEptpPtxMask;
return index;
}
// Deal with L2 EPT violation VM-exit.
_Use_decl_annotations_ void EptHandleEptViolationEx(EptData *ept_data, ULONG64 PhysAddr, bool is_range_of_ept12) {
const EptViolationQualification exit_qualification = {
UtilVmRead(VmcsField::kExitQualification) };
ULONG_PTR fault_pa = 0;
if (PhysAddr) {
fault_pa = PhysAddr;
}
else {
fault_pa = UtilVmRead64(VmcsField::kGuestPhysicalAddress);
}
if (!exit_qualification.fields.ept_readable &&
!exit_qualification.fields.ept_writeable &&
!exit_qualification.fields.ept_executable)
{
const auto ept_entry = EptGetEptPtEntry(ept_data, fault_pa);
if (!ept_entry || !ept_entry->all)
{
EptpConstructTables(ept_data->ept_pml4, 4, fault_pa, ept_data);
UtilInveptGlobal();
return;
}
else {
ept_entry->fields.read_access = true;
ept_entry->fields.execute_access = true;
ept_entry->fields.write_access = true;
UtilInveptGlobal();
return;
}
}
else if (exit_qualification.fields.caused_by_translation)
{
if ((!exit_qualification.fields.ept_writeable && exit_qualification.fields.write_access) ||
(!exit_qualification.fields.ept_readable && exit_qualification.fields.read_access) ||
(!exit_qualification.fields.ept_executable && exit_qualification.fields.execute_access) )
{
const auto Ept01Pte = EptGetEptPtEntry(ept_data, fault_pa);
if (!Ept01Pte)
{
HYPERPLATFORM_COMMON_BUG_CHECK(HyperPlatformBugCheck::kUnexpectedVmEptExit3,
UtilVmRead(VmcsField::kGuestRip), exit_qualification.all, fault_pa);
}
Ept01Pte->fields.read_access = true;
Ept01Pte->fields.execute_access = true;
Ept01Pte->fields.write_access = true;
return;
}
else
{
HYPERPLATFORM_COMMON_BUG_CHECK(HyperPlatformBugCheck::kUnexpectedVmEptExit2,
UtilVmRead(VmcsField::kGuestRip), exit_qualification.all, fault_pa);
}
}
else {
HYPERPLATFORM_COMMON_BUG_CHECK(HyperPlatformBugCheck::kUnexpectedVmEptExit,
UtilVmRead(VmcsField::kGuestRip), exit_qualification.all, fault_pa);
}
}
// Deal with EPT violation VM-exit.
_Use_decl_annotations_ void EptHandleEptViolation(EptData* ept_data, ULONG64 PhysAddr, bool is_range_of_ept12) {
EptHandleEptViolationEx(ept_data, PhysAddr, is_range_of_ept12);
}
// Returns if the physical_address is device memory (which could not have a
// corresponding PFN entry)
_Use_decl_annotations_ static bool EptpIsDeviceMemory(
ULONG64 physical_address) {
const auto pm_ranges = UtilGetPhysicalMemoryRanges();
for (auto i = 0ul; i < pm_ranges->number_of_runs; ++i) {
const auto current_run = &pm_ranges->run[i];
const auto base_addr =
static_cast<ULONG64>(current_run->base_page) * PAGE_SIZE;
const auto endAddr = base_addr + current_run->page_count * PAGE_SIZE - 1;
if (UtilIsInBounds(physical_address, base_addr, endAddr)) {
return false;
}
}
return true;
}
// Returns an EPT entry corresponds to the physical_address
_Use_decl_annotations_ EptCommonEntry *EptGetEptPtEntry(
EptData *ept_data, ULONG64 physical_address) {
return EptpGetEptPtEntry(ept_data->ept_pml4, 4, physical_address);
}
// Returns an EPT entry corresponds to the physical_address
_Use_decl_annotations_ static EptCommonEntry *EptpGetEptPtEntry(
EptCommonEntry *table, ULONG table_level, ULONG64 physical_address) {
if (!table) {
return nullptr;
}
switch (table_level) {
case 4: {
// table == PML4
const auto pxe_index = EptpAddressToPxeIndex(physical_address);
const auto ept_pml4_entry = &table[pxe_index];
if (!ept_pml4_entry->all) {
return nullptr;
}
return EptpGetEptPtEntry(reinterpret_cast<EptCommonEntry *>(UtilVaFromPfn(
ept_pml4_entry->fields.physial_address)),
table_level - 1, physical_address);
}
case 3: {
// table == PDPT
const auto ppe_index = EptpAddressToPpeIndex(physical_address);
const auto ept_pdpt_entry = &table[ppe_index];
if (!ept_pdpt_entry->all) {
return nullptr;
}
return EptpGetEptPtEntry(reinterpret_cast<EptCommonEntry *>(UtilVaFromPfn(
ept_pdpt_entry->fields.physial_address)),
table_level - 1, physical_address);
}
case 2: {
// table == PDT
const auto pde_index = EptpAddressToPdeIndex(physical_address);
const auto ept_pdt_entry = &table[pde_index];
if (!ept_pdt_entry->all) {
return nullptr;
}
return EptpGetEptPtEntry(reinterpret_cast<EptCommonEntry *>(UtilVaFromPfn(
ept_pdt_entry->fields.physial_address)),
table_level - 1, physical_address);
}
case 1: {
// table == PT
const auto pte_index = EptpAddressToPteIndex(physical_address);
const auto ept_pt_entry = &table[pte_index];
return ept_pt_entry;
}
default:
HYPERPLATFORM_COMMON_DBG_BREAK();
return nullptr;
}
}
// Frees all EPT stuff
_Use_decl_annotations_ void EptTermination(EptData *ept_data) {
HYPERPLATFORM_LOG_DEBUG("Used pre-allocated entries = %2d / %2d",
ept_data->preallocated_entries_count,
kEptpNumberOfPreallocatedEntries);
EptpFreeUnusedPreAllocatedEntries(ept_data->preallocated_entries,
ept_data->preallocated_entries_count);
EptpDestructTables(ept_data->ept_pml4, 4);
ExFreePoolWithTag(ept_data->ept_pointer, kHyperPlatformCommonPoolTag);
ExFreePoolWithTag(ept_data, kHyperPlatformCommonPoolTag);
}
// Frees all unused pre-allocated EPT entries. Other used entries should be
// freed with EptpDestructTables().
_Use_decl_annotations_ static void EptpFreeUnusedPreAllocatedEntries(
EptCommonEntry **preallocated_entries, long used_count) {
for (auto i = used_count; i < kEptpNumberOfPreallocatedEntries; ++i) {
if (!preallocated_entries[i]) {
break;
}
#pragma warning(push)
#pragma warning(disable : 6001)
ExFreePoolWithTag(preallocated_entries[i], kHyperPlatformCommonPoolTag);
#pragma warning(pop)
}
ExFreePoolWithTag(preallocated_entries, kHyperPlatformCommonPoolTag);
}
_Use_decl_annotations_ EptCommonEntry* EptpGetNextLevelTableBase(EptCommonEntry *table)
{
const auto entry = table;
if (entry && entry->fields.physial_address)
{
const auto sub_table = reinterpret_cast<EptCommonEntry *>(UtilVaFromPfn(entry->fields.physial_address));
return sub_table;
}
return nullptr;
}
_Use_decl_annotations_ ULONG64 EptpGetNextLevelTablePhysicalBase(EptCommonEntry *table)
{
const auto entry = table;
if (entry && entry->fields.physial_address)
{
const auto sub_table = UtilPaFromPfn(entry->fields.physial_address);
return sub_table;
}
return 0;
}
_Use_decl_annotations_ void EptpRefreshEpt02(EptData* EptData02, EptData* EptData12, EptData* EptData01, void* LookupEntryPa)
{
if (!EptData01 || !EptData02 || !EptData12) {
return;
}
EptCommonEntry* pml4_table = EptData02->ept_pml4;
EptCommonEntry* pml4_table2 = EptData12->ept_pml4;
EptCommonEntry* pdptr_table = NULL;
EptCommonEntry* pdptr_table2 = NULL;
EptCommonEntry* pdt_table = NULL;
EptCommonEntry* pdt_table2 = NULL;
EptCommonEntry* pt_table = NULL;
EptCommonEntry* pt_table2 = NULL;
for (int i = 0; i < 512 && pml4_table && pml4_table2; i++, pml4_table++, pml4_table2++) //PML4
{
pml4_table->all = pml4_table2->all;
ULONG_PTR pdptr_entry_pa02 = EptpGetNextLevelTablePhysicalBase(pml4_table);
ULONG_PTR pdptr_entry_pa12 = EptpGetNextLevelTablePhysicalBase(pml4_table2);
if (!pdptr_entry_pa02 || !pdptr_entry_pa12) {
continue;
}
pdptr_table = (EptCommonEntry*)UtilVaFromPa(pdptr_entry_pa02);
pdptr_table2 = (EptCommonEntry*)UtilVaFromPa(pdptr_entry_pa12);
for (int j = 0; j < 512 && pdptr_table && pdptr_table2; j++, pdptr_table++, pdptr_table2++) //PDPTR
{
pdptr_table->all = pdptr_table2->all;
ULONG_PTR pdt_entry_pa02 = EptpGetNextLevelTablePhysicalBase(pdptr_table);
ULONG_PTR pdt_entry_pa12 = EptpGetNextLevelTablePhysicalBase(pdptr_table2);
if (!pdt_entry_pa02 || !pdt_entry_pa12) {
continue;
}
pdt_table = (EptCommonEntry*)UtilVaFromPa(pdt_entry_pa02);
pdt_table2 = (EptCommonEntry*)UtilVaFromPa(pdt_entry_pa12);
for (int k = 0; k < 512 && pdt_table && pdt_table2; k++, pdt_table++, pdt_table2++) // PDT
{
pdt_table->all = pdt_table2->all;
ULONG_PTR pt_table_pa02 = EptpGetNextLevelTablePhysicalBase(pdt_table);
ULONG_PTR pt_table_pa12 = EptpGetNextLevelTablePhysicalBase(pdt_table2);
if (!pt_table_pa02 || !pt_table_pa12) {
continue;
}
pt_table = (EptCommonEntry*)UtilVaFromPa(pt_table_pa02);
pt_table2 = (EptCommonEntry*)UtilVaFromPa(pt_table_pa12);
for (int p = 0; p < 512 && pt_table && pt_table2; p++, pt_table++, pt_table2++) // PT
{
pt_table->all = pt_table2->all;
if (LookupEntryPa == (void*)UtilPaFromVa(pt_table2) && LookupEntryPa != nullptr)
{
return;
}
}
}
}
}
}
_Use_decl_annotations_ bool EptpIsInRangesOfEpt(ULONG_PTR PhysicalAddres, EptCommonEntry *pml4_table)
{
EptCommonEntry* pdptr_table = NULL;
EptCommonEntry* pdt_table = NULL;
EptCommonEntry* pt_table = NULL;
bool IsMatch = false;
for (int i = 0; i < 512 && pml4_table; i++, pml4_table++) //PML4
{
if ((void*)PhysicalAddres == (void*)UtilPaFromVa(pml4_table))
{
IsMatch = true;
return IsMatch;
}
ULONG_PTR pdptr_entry_pa = EptpGetNextLevelTablePhysicalBase(pml4_table);
pdptr_table = (EptCommonEntry*)UtilVaFromPa(pdptr_entry_pa);
for (int j = 0; j < 512 && pdptr_table; j++, pdptr_table++) //PDPTR
{
if ((void*)PhysicalAddres == (void*)(pdptr_entry_pa + j * sizeof(EptCommonEntry)))
{
IsMatch = true;
return IsMatch;
}
ULONG_PTR pdt_entry_pa = EptpGetNextLevelTablePhysicalBase(pdptr_table);
pdt_table = (EptCommonEntry*)UtilVaFromPa(pdt_entry_pa);
for (int k = 0; k < 512 && pdt_table; k++, pdt_table++) // PDT
{
if ((void*)PhysicalAddres == (void*)(pdt_entry_pa + k * sizeof(EptCommonEntry)))
{
IsMatch = true;
return IsMatch;
}
ULONG_PTR pt_table_pa = EptpGetNextLevelTablePhysicalBase(pdt_table);
//Last Level, we dun need get each of them.
if (PAGE_ALIGN(PhysicalAddres) == (void*)pt_table_pa)
{
IsMatch = true;
return IsMatch;
}
}
}
}
return IsMatch;
}
_Use_decl_annotations_ void EptpSetEntryAccess(
EptData* ept_data, ULONG_PTR physical_address, bool readable, bool writable, bool executable)
{
EptCommonEntry* entry = EptGetEptPtEntry(ept_data, physical_address);
if (!entry || !entry ->fields.physial_address)
{
return;
}
entry->fields.read_access = readable;
entry->fields.execute_access = executable;
entry->fields.write_access = writable;
}
_Use_decl_annotations_ void EptpValidateEptCallback(EptCommonEntry* EptEntry, void* Context)
{
EptData* ept_data01 = (EptData*)Context;
EptpSetEntryAccess(ept_data01, (ULONG64)UtilPaFromVa(EptEntry), true, true, true);
}
_Use_decl_annotations_ void EptpValidateEpt(EptData* EptData12, EptData* EptData01)
{
EptpWalker(EptData12->ept_pml4, 4, EptpValidateEptCallback, EptData01);
}
_Use_decl_annotations_ void EptpInvalidateEptCallback(EptCommonEntry* EptEntry, void* Context)
{
EptData* ept_data01 = (EptData*)Context;
EptpSetEntryAccess(ept_data01, (ULONG64)UtilPaFromVa(EptEntry), true, false , true);
}
_Use_decl_annotations_ void EptpInvalidateEpt(EptData* EptData12, EptData* EptData01)
{
EptpWalker(EptData12->ept_pml4, 4, EptpInvalidateEptCallback, EptData01);
}
// Frees all used EPT entries by walking through whole EPT
_Use_decl_annotations_ static void EptpDestructTables(EptCommonEntry *table,
ULONG table_level) {
for (auto i = 0ul; i < 512; ++i) {
const auto entry = table[i];
if (entry.fields.physial_address) {
const auto sub_table = reinterpret_cast<EptCommonEntry *>(
UtilVaFromPfn(entry.fields.physial_address));
switch (table_level) {
case 4: // table == PML4, sub_table == PDPT
case 3: // table == PDPT, sub_table == PDT
EptpDestructTables(sub_table, table_level - 1);
break;
case 2: // table == PDT, sub_table == PT
ExFreePoolWithTag(sub_table, kHyperPlatformCommonPoolTag);
break;
default:
HYPERPLATFORM_COMMON_DBG_BREAK();
break;
}
}
}
ExFreePoolWithTag(table, kHyperPlatformCommonPoolTag);
}
NTSTATUS EptpBuildNestedEpt(
ULONG_PTR vmcs12_va,
EptData* ept_data12,
EptData* ept_data02)
{
do {
EptCommonEntry* Pml4Entry = NULL;
EptPointer* Ept02Ptr = NULL;
EptPointer* Ept12Ptr = NULL;
ULONG64 _Ept12Ptr = vmcs12_va;
if (!vmcs12_va || !ept_data12 || !ept_data02)
{
break;
}
Ept12Ptr = (EptPointer*)ExAllocatePoolWithTag(NonPagedPoolMustSucceed, PAGE_SIZE, 'eptp');
if (!Ept12Ptr)
{
break;
}
RtlZeroMemory(Ept12Ptr, PAGE_SIZE);
Ept02Ptr = (EptPointer*)ExAllocatePoolWithTag(NonPagedPoolMustSucceed, PAGE_SIZE, 'eptp');
if (!Ept02Ptr)
{
ExFreePool(Ept12Ptr);
break;
}
RtlZeroMemory(Ept02Ptr, PAGE_SIZE);
Pml4Entry = (EptCommonEntry*)ExAllocatePoolWithTag(NonPagedPoolMustSucceed, PAGE_SIZE, 'pml4');
if (!Pml4Entry)
{
ExFreePool(Ept12Ptr);
ExFreePool(Ept02Ptr);
break;
}
RtlZeroMemory(Pml4Entry, PAGE_SIZE);
Ept12Ptr->all = _Ept12Ptr;
Pml4Entry->fields.read_access = false;
Pml4Entry->fields.execute_access = false;
Pml4Entry->fields.memory_type = 0;
Pml4Entry->fields.write_access = false;
Ept02Ptr->fields.memory_type = static_cast<ULONG>(memory_type::kWriteBack);
Ept02Ptr->fields.pml4_address = UtilPfnFromPa(UtilPaFromVa(Pml4Entry));
Ept02Ptr->fields.page_walk_length = 4 - 1;
Ept02Ptr->fields.enable_accessed_and_dirty_flags = false;
const auto pm_ranges = UtilGetPhysicalMemoryRanges();
for (auto run_index = 0ul; run_index < pm_ranges->number_of_runs;
++run_index) {
const auto run = &pm_ranges->run[run_index];
const auto base_addr = run->base_page * PAGE_SIZE;
for (auto page_index = 0ull; page_index < run->page_count; ++page_index) {
const auto indexed_addr = base_addr + page_index * PAGE_SIZE;
EptpConstructTables(Pml4Entry, 4, indexed_addr, nullptr);
EptpConstructTablesEx(Pml4Entry, 4, indexed_addr, nullptr, ept_data12->ept_pml4);
}
}
ept_data02->ept_pointer = Ept02Ptr;
ept_data02->ept_pml4 = Pml4Entry;
ept_data12->ept_pointer = Ept12Ptr;
ept_data12->ept_pml4 = (EptCommonEntry*)UtilVaFromPfn(Ept12Ptr->fields.pml4_address);
} while (FALSE);
return STATUS_SUCCESS;
}
EptData* EptBuildEptDataByEptp()
{
EptData* EptDataPtr = (EptData*)ExAllocatePoolWithTag(NonPagedPoolMustSucceed, PAGE_SIZE, 'eptd');
NT_ASSERT(EptDataPtr);
RtlZeroMemory(EptDataPtr, sizeof(EptData));
return EptDataPtr;
}
} // extern "C"
| 16,653 |
320 | <filename>matconvnet-1.0-beta16/matlab/src/bits/data.hpp
// @file data.hpp
// @brief Basic data structures
// @author <NAME>
/*
Copyright (C) 2015 <NAME>.
All rights reserved.
This file is part of the VLFeat library and is made available under
the terms of the BSD license (see the COPYING file).
*/
#ifndef __vl__data_hpp__
#define __vl__data_hpp__
#include <cstddef>
#include <string>
#define STRINGIZE(x) STRINGIZE_HELPER(x)
#define STRINGIZE_HELPER(x) #x
#define FILELINE STRINGIZE(__FILE__) ":" STRINGIZE(__LINE__)
namespace vl {
typedef int index_t ;
enum Device { CPU = 0, GPU } ;
enum Type {
vlTypeChar,
vlTypeFloat,
vlTypeDouble
} ;
enum Error {
vlSuccess = 0,
vlErrorUnsupported,
vlErrorCuda,
vlErrorCudnn,
vlErrorCublas,
vlErrorOutOfMemory,
vlErrorOutOfGPUMemeory,
vlErrorUnknown
} ;
const char * getErrorMessage(Error error) ;
class CudaHelper ;
/* -----------------------------------------------------------------
* Helpers
* -------------------------------------------------------------- */
inline int divideUpwards(int a, int b)
{
return (a + b - 1) / b ;
}
namespace impl {
class Buffer
{
public:
Buffer() ;
vl::Error init(Device deviceType, Type dataType, size_t size) ;
void * getMemory() ;
int getNumReallocations() const ;
void clear() ;
void invalidateGpu() ;
protected:
Device deviceType ;
Type dataType ;
size_t size ;
void * memory ;
int numReallocations ;
} ;
}
/* -----------------------------------------------------------------
* Context
* -------------------------------------------------------------- */
class Context
{
public:
Context() ;
~Context() ;
void * getWorkspace(Device device, size_t size) ;
void clearWorkspace(Device device) ;
void * getAllOnes(Device device, Type type, size_t size) ;
void clearAllOnes(Device device) ;
CudaHelper& getCudaHelper() ;
void clear() ; // do a reset
void invalidateGpu() ; // drop CUDA memory and handles
vl::Error passError(vl::Error error, char const * message = NULL) ;
vl::Error setError(vl::Error error, char const * message = NULL) ;
void resetLastError() ;
vl::Error getLastError() const ;
std::string const& getLastErrorMessage() const ;
private:
impl::Buffer workspace[2] ;
impl::Buffer allOnes[2] ;
Error lastError ;
std::string lastErrorMessage ;
CudaHelper * cudaHelper ;
} ;
/* -----------------------------------------------------------------
* TensorGeometry
* -------------------------------------------------------------- */
class TensorGeometry
{
public:
TensorGeometry() ;
TensorGeometry(TensorGeometry const& t) ;
TensorGeometry(index_t height, index_t width, index_t depth, index_t size) ;
index_t getHeight() const ;
index_t getWidth() const ;
index_t getDepth() const ;
index_t getSize() const ;
index_t getNumElements() const ;
bool isEmpty() const ;
void setHeight(index_t x) ;
void setWidth(index_t x) ;
void setDepth(index_t x) ;
void setSize(index_t x) ;
protected:
index_t height ;
index_t width ;
index_t depth ;
index_t size ;
} ;
inline TensorGeometry::TensorGeometry(TensorGeometry const & t)
: height(t.height), width(t.width), depth(t.depth), size(t.size)
{ }
inline bool operator == (TensorGeometry const & a, TensorGeometry const & b)
{
return
(a.getHeight() == b.getHeight()) &
(a.getWidth() == b.getWidth()) &
(a.getDepth() == b.getDepth()) &
(a.getSize() == b.getSize()) ;
}
inline bool operator != (TensorGeometry const & a, TensorGeometry const & b)
{
return ! (a == b) ;
}
/* -----------------------------------------------------------------
* Tensor
* -------------------------------------------------------------- */
class Tensor : public TensorGeometry
{
public:
Tensor() ;
Tensor(Tensor const &) ;
Tensor(float * memory, size_t memorySize, Device memoryType,
TensorGeometry const & geom) ;
float * getMemory() ;
Device getMemoryType() const ;
TensorGeometry getGeometry() const ;
operator bool() const ;
bool isNull() const ;
void setMemory(float * x) ;
protected:
float * memory ;
size_t memorySize ;
Device memoryType ;
} ;
inline Tensor::Tensor(Tensor const& t)
: TensorGeometry(t), memory(t.memory), memorySize(t.memorySize), memoryType(t.memoryType)
{ }
inline bool areCompatible(Tensor const & a, Tensor const & b)
{
return
(a.isEmpty() || a.isNull()) ||
(b.isEmpty() || b.isNull()) ||
(a.getMemoryType() == b.getMemoryType()) ;
}
}
#endif
| 1,718 |
1,144 | // SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2015 Google, Inc
*/
#include <common.h>
#include <dm.h>
#include <mmc.h>
#include <dm/test.h>
#include <test/ut.h>
/*
* Basic test of the mmc uclass. We could expand this by implementing an MMC
* stack for sandbox, or at least implementing the basic operation.
*/
static int dm_test_mmc_base(struct unit_test_state *uts)
{
struct udevice *dev;
ut_assertok(uclass_get_device(UCLASS_MMC, 0, &dev));
return 0;
}
DM_TEST(dm_test_mmc_base, DM_TESTF_SCAN_PDATA | DM_TESTF_SCAN_FDT);
static int dm_test_mmc_blk(struct unit_test_state *uts)
{
struct udevice *dev;
struct blk_desc *dev_desc;
char cmp[1024];
ut_assertok(uclass_get_device(UCLASS_MMC, 0, &dev));
ut_assertok(blk_get_device_by_str("mmc", "0", &dev_desc));
/* Read a few blocks and look for the string we expect */
ut_asserteq(512, dev_desc->blksz);
memset(cmp, '\0', sizeof(cmp));
ut_asserteq(2, blk_dread(dev_desc, 0, 2, cmp));
ut_assertok(strcmp(cmp, "this is a test"));
return 0;
}
DM_TEST(dm_test_mmc_blk, DM_TESTF_SCAN_PDATA | DM_TESTF_SCAN_FDT);
| 467 |
6,300 | //
// Created by <NAME> on 6/1/21.
//
#ifndef MENU_DARWIN_H
#define MENU_DARWIN_H
#include "common.h"
#include "ffenestri_darwin.h"
enum MenuItemType {Text = 0, Checkbox = 1, Radio = 2};
enum MenuType {ApplicationMenuType = 0, ContextMenuType = 1, TrayMenuType = 2};
static const char *MenuTypeAsString[] = {
"ApplicationMenu", "ContextMenu", "TrayMenu",
};
typedef struct _NSRange {
unsigned long location;
unsigned long length;
} NSRange;
#define NSFontWeightUltraLight -0.8
#define NSFontWeightThin -0.6
#define NSFontWeightLight -0.4
#define NSFontWeightRegular 0.0
#define NSFontWeightMedium 0.23
#define NSFontWeightSemibold 0.3
#define NSFontWeightBold 0.4
#define NSFontWeightHeavy 0.56
#define NSFontWeightBlack 0.62
extern void messageFromWindowCallback(const char *);
typedef struct {
const char *title;
/*** Internal ***/
// The decoded version of the Menu JSON
JsonNode *processedMenu;
struct hashmap_s menuItemMap;
struct hashmap_s radioGroupMap;
// Vector to keep track of callback data memory
vec_void_t callbackDataCache;
// The NSMenu for this menu
id menu;
// The parent data, eg ContextMenuStore or Tray
void *parentData;
// The commands for the menu callbacks
const char *callbackCommand;
// This indicates if we are an Application Menu, tray menu or context menu
enum MenuType menuType;
} Menu;
typedef struct {
id menuItem;
Menu *menu;
const char *menuID;
enum MenuItemType menuItemType;
} MenuItemCallbackData;
// NewMenu creates a new Menu struct, saving the given menu structure as JSON
Menu* NewMenu(JsonNode *menuData);
Menu* NewApplicationMenu(const char *menuAsJSON);
MenuItemCallbackData* CreateMenuItemCallbackData(Menu *menu, id menuItem, const char *menuID, enum MenuItemType menuItemType);
void DeleteMenu(Menu *menu);
// Creates a JSON message for the given menuItemID and data
const char* createMenuClickedMessage(const char *menuItemID, const char *data, enum MenuType menuType, const char *parentID);
// Callback for text menu items
void menuItemCallback(id self, SEL cmd, id sender);
id processAcceleratorKey(const char *key);
void addSeparator(id menu);
id createMenuItemNoAutorelease( id title, const char *action, const char *key);
id createMenuItem(id title, const char *action, const char *key);
id addMenuItem(id menu, const char *title, const char *action, const char *key, bool disabled);
id createMenu(id title);
void createDefaultAppMenu(id parentMenu);
void createDefaultEditMenu(id parentMenu);
void processMenuRole(Menu *menu, id parentMenu, JsonNode *item);
// This converts a string array of modifiers into the
// equivalent MacOS Modifier Flags
unsigned long parseModifiers(const char **modifiers);
id processRadioMenuItem(Menu *menu, id parentmenu, const char *title, const char *menuid, bool disabled, bool checked, const char *acceleratorkey);
id processCheckboxMenuItem(Menu *menu, id parentmenu, const char *title, const char *menuid, bool disabled, bool checked, const char *key);
id processTextMenuItem(Menu *menu, id parentMenu, const char *title, const char *menuid, bool disabled, const char *acceleratorkey, const char **modifiers, const char* tooltip, const char* image, const char* fontName, int fontSize, const char* RGBA, bool templateImage, bool alternate, JsonNode* styledLabel);
void processMenuItem(Menu *menu, id parentMenu, JsonNode *item);
void processMenuData(Menu *menu, JsonNode *menuData);
void processRadioGroupJSON(Menu *menu, JsonNode *radioGroup);
id GetMenu(Menu *menu);
id createAttributedString(const char* title, const char* fontName, int fontSize, const char* RGBA);
id createAttributedStringFromStyledLabel(JsonNode *styledLabel, const char* fontName, int fontSize);
#endif //ASSETS_C_MENU_DARWIN_H
| 1,186 |
488 | // An example ROSE plugin
#ifndef CLASS_HIERARCHY_WRITER_H
#define CLASS_HIERARCHY_WRITER_H 1
#include <ostream>
#include "ClassHierarchyAnalysis.h"
#include "ObjectLayoutAnalysis.h"
namespace CodeThorn
{
/// filter type for class hierarchy writer
/// returns true, if a class should be included in the graph
using ClassFilterFn = std::function<bool (ClassKeyType)>;
/// writes out a class analysis graph for all classes and casts
void classHierarchyDot( std::ostream& os,
ClassNameFn& nameOf,
ClassFilterFn include,
const ClassAnalysis& classes,
const CastAnalysis& casts
);
/// writes out the virtual functions relationships as text
void virtualFunctionsTxt( std::ostream& os,
ClassNameFn& className,
FuncNameFn& funcName,
ClassFilterFn include,
const ClassAnalysis& classes,
const VirtualFunctionAnalysis& vfuns,
bool withOverridden = false
);
/// writes out the virtual base class initialization order as text for
/// classes that have virtual base classes.
void virtualBaseClassInitOrderTxt( std::ostream& os,
ClassNameFn& className,
ClassFilterFn include,
const ClassAnalysis& classes
);
/// writes out the class layout as text
void classLayoutTxt( std::ostream& os,
ClassNameFn& className,
VarNameFn& varName,
ClassFilterFn include,
const ObjectLayoutContainer& classLayout
);
/// writes out the class layout as dot graph
void classLayoutDot( std::ostream& os,
ClassNameFn& className,
VarNameFn& varName,
ClassFilterFn include,
const ObjectLayoutContainer& classLayout
);
}
#endif /* CLASS_HIERARCHY_WRITER_H */
| 1,084 |
9,136 | <filename>examples/ThirdPartyLibs/BussIK/LinearR2.cpp
/*
*
* Mathematics Subpackage (VrMath)
*
*
* Author: <NAME>, <EMAIL>.
* Web page: http://math.ucsd.edu/~sbuss/MathCG
*
*
This software is provided 'as-is', without any express or implied warranty.
In no event will the authors be held liable for any damages arising from the use of this software.
Permission is granted to anyone to use this software for any purpose,
including commercial applications, and to alter it and redistribute it freely,
subject to the following restrictions:
1. The origin of this software must not be misrepresented; you must not claim that you wrote the original software. If you use this software in a product, an acknowledgment in the product documentation would be appreciated but is not required.
2. Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software.
3. This notice may not be removed or altered from any source distribution.
*
*
*/
#include "LinearR2.h"
#include <assert.h>
// ******************************************************
// * VectorR2 class - math library functions *
// * * * * * * * * * * * * * * * * * * * * * * * * * * **
const VectorR2 VectorR2::Zero(0.0, 0.0);
const VectorR2 VectorR2::UnitX(1.0, 0.0);
const VectorR2 VectorR2::UnitY(0.0, 1.0);
const VectorR2 VectorR2::NegUnitX(-1.0, 0.0);
const VectorR2 VectorR2::NegUnitY(0.0, -1.0);
const Matrix2x2 Matrix2x2::Identity(1.0, 0.0, 0.0, 1.0);
// ******************************************************
// * Matrix2x2 class - math library functions *
// * * * * * * * * * * * * * * * * * * * * * * * * * * **
// ******************************************************
// * LinearMapR2 class - math library functions *
// * * * * * * * * * * * * * * * * * * * * * * * * * * **
LinearMapR2 LinearMapR2::Inverse() const // Returns inverse
{
double detInv = 1.0 / (m11 * m22 - m12 * m21);
return (LinearMapR2(m22 * detInv, -m21 * detInv, -m12 * detInv, m11 * detInv));
}
LinearMapR2& LinearMapR2::Invert() // Converts into inverse.
{
double detInv = 1.0 / (m11 * m22 - m12 * m21);
double temp;
temp = m11 * detInv;
m11 = m22 * detInv;
m22 = temp;
m12 = -m12 * detInv;
m21 = -m22 * detInv;
return (*this);
}
VectorR2 LinearMapR2::Solve(const VectorR2& u) const // Returns solution
{
// Just uses Inverse() for now.
return (Inverse() * u);
}
// ******************************************************
// * RotationMapR2 class - math library functions *
// * * * * * * * * * * * * * * * * * * * * * * * * * * **
// ***************************************************************
// * 2-space vector and matrix utilities *
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
// ***************************************************************
// Stream Output Routines *
// * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
ostream& operator<<(ostream& os, const VectorR2& u)
{
return (os << "<" << u.x << "," << u.y << ">");
}
| 1,018 |
575 | // Copyright 2020 Proyectos y Sistemas de Mantenimiento SL (eProsima).
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/**
* @file GuidUtils.hpp
*/
#ifndef RTPS_COMMON_GUIDUTILS_HPP_
#define RTPS_COMMON_GUIDUTILS_HPP_
#include <cstdint>
#include <limits>
#include <random>
#include <fastdds/rtps/common/GuidPrefix_t.hpp>
#include <fastdds/rtps/common/VendorId_t.hpp>
#include <utils/SystemInfo.hpp>
namespace eprosima {
namespace fastdds {
namespace rtps {
/**
* This singleton handles the generation of GUID prefix
*/
class GuidUtils
{
using GuidPrefix_t = eprosima::fastrtps::rtps::GuidPrefix_t;
using octet = eprosima::fastrtps::rtps::octet;
public:
/**
* Create a GUID prefix based on a participant id.
*
* @param [in] participant_id Identifier of the participant for which to generate the GUID prefix.
* @param [out] guid_prefix Generated GUID prefix.
*/
void guid_prefix_create(
uint32_t participant_id,
GuidPrefix_t& guid_prefix) const
{
// Use precalculated vendor-host-process part of the prefix
std::copy(prefix_.value, prefix_.value + 8, guid_prefix.value);
// Add little endian serialization of participant_id
guid_prefix.value[8] = static_cast<octet>(participant_id & 0xFF);
guid_prefix.value[9] = static_cast<octet>((participant_id >> 8) & 0xFF);
guid_prefix.value[10] = static_cast<octet>((participant_id >> 16) & 0xFF);
guid_prefix.value[11] = static_cast<octet>((participant_id >> 24) & 0xFF);
}
/**
* Get a reference to the singleton instance.
*
* @return reference to the singleton instance.
*/
static const GuidUtils& instance()
{
static GuidUtils singleton;
return singleton;
}
private:
GuidUtils()
{
// This is to comply with RTPS section 9.3.1.5 - Mapping of the GUID_t
prefix_.value[0] = c_VendorId_eProsima[0];
prefix_.value[1] = c_VendorId_eProsima[1];
// On Fast DDS, next two bytes should be the same across all processes on the same host
uint16_t host_id = SystemInfo::instance().host_id();
prefix_.value[2] = static_cast<octet>(host_id & 0xFF);
prefix_.value[3] = static_cast<octet>((host_id >> 8) & 0xFF);
// On Fast DDS, next four bytes would be the same across all participants on the same process.
// Even though using the process id here might seem a nice idea, there are cases where it might not serve as
// unique identifier of the process:
// - One of them is when using a Kubernetes pod on which several containers with their own PID namespace are
// created.
// - Another one is when a system in which a Fast DDS application is started during boot time. If the system
// crashes and is then re-started, it may happen that the participant may be considered an old one if the
// announcement lease duration did not expire.
// In order to behave correctly in those situations, we will use the 16 least-significant bits of the PID,
// along with a random 16 bits value. This should not be a problem, as the PID is known to be 16 bits long on
// several systems. On those where it is longer, using the 16 least-significant ones along with a random value
// should still give enough uniqueness for our use cases.
int pid = SystemInfo::instance().process_id();
prefix_.value[4] = static_cast<octet>(pid & 0xFF);
prefix_.value[5] = static_cast<octet>((pid >> 8) & 0xFF);
std::random_device generator;
std::uniform_int_distribution<uint16_t> distribution(0, std::numeric_limits<uint16_t>::max());
uint16_t rand_value = distribution(generator);
prefix_.value[6] = static_cast<octet>(rand_value & 0xFF);
prefix_.value[7] = static_cast<octet>((rand_value >> 8) & 0xFF);
}
GuidPrefix_t prefix_;
};
} // namespace rtps
} // namespace fastdds
} // namespace eprosima
#endif // RTPS_COMMON_GUIDUTILS_HPP_
| 1,683 |
979 | # This test generates the 'precaculatedHashes' array in 1.sys.test.ts
import hashlib
import math
def calculate_hash(total_size: int) -> str:
if total_size >= 20000000:
divider = math.ceil(total_size / 100000000)
blocks = []
for x in range(divider):
blocks.append({'start': x * 100000000, 'end': (x + 1) * 100000000 - 1})
blocks[-1]['end'] = min(100000000 * divider, total_size) - 1
content = b'-' * total_size
r = hashlib.sha256()
for block in blocks:
h = hashlib.sha256()
h.update(content[block['start']: block['end'] + 1])
r.update(h.hexdigest().encode('ascii'))
return r.hexdigest()
h = hashlib.sha256()
h.update(b"-" * total_size)
return h.hexdigest()
def print_hash(i: int):
print(f" [{i}, '{calculate_hash(i)}'],")
def calculate_and_print_hash(i: int):
print_hash(i, calculate_hash(i))
print(' const precaculatedHashes = [')
calculate_and_print_hash(0)
calculate_and_print_hash(1)
calculate_and_print_hash(2)
calculate_and_print_hash(10)
calculate_and_print_hash(100)
for x in range(5000, 200000000, 9999999):
calculate_and_print_hash(x)
calculate_and_print_hash(200000000)
calculate_and_print_hash(200000001)
print(' ]')
| 581 |
585 | /**
* Copyright (c) 2016-present, Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <caffe2/distributed/store_handler.h>
namespace caffe2 {
class FileStoreHandler : public StoreHandler {
public:
explicit FileStoreHandler(const std::string& path, const std::string& prefix);
virtual ~FileStoreHandler();
virtual void set(const std::string& name, const std::string& data) override;
virtual std::string get(const std::string& name) override;
virtual int64_t add(const std::string& name, int64_t value) override;
virtual bool check(const std::vector<std::string>& names) override;
virtual void wait(
const std::vector<std::string>& names,
const std::chrono::milliseconds& timeout = kDefaultTimeout) override;
protected:
std::string basePath_;
std::string realPath(const std::string& path);
std::string tmpPath(const std::string& name);
std::string objectPath(const std::string& name);
};
} // namespace caffe2
| 432 |
1,444 | <reponame>GabrielSturtevant/mage
package mage.cards.t;
import java.util.UUID;
import mage.MageInt;
import mage.MageObject;
import mage.abilities.Abilities;
import mage.abilities.Ability;
import mage.abilities.common.SpellCastControllerTriggeredAbility;
import mage.abilities.effects.common.search.SearchLibraryPutInHandEffect;
import mage.abilities.keyword.EnchantAbility;
import mage.cards.CardImpl;
import mage.cards.CardSetInfo;
import mage.constants.CardType;
import mage.constants.SubType;
import mage.filter.FilterCard;
import mage.filter.StaticFilters;
import mage.filter.predicate.Predicate;
import mage.game.Game;
import mage.target.common.TargetCardInLibrary;
/**
*
* @author LevelX2
*/
public final class Tallowisp extends CardImpl {
private static final FilterCard filterAura = new FilterCard("Aura card with enchant creature");
static {
filterAura.add(CardType.ENCHANTMENT.getPredicate());
filterAura.add(SubType.AURA.getPredicate());
filterAura.add(new TallowispAbilityPredicate());
}
public Tallowisp(UUID ownerId, CardSetInfo setInfo) {
super(ownerId,setInfo,new CardType[]{CardType.CREATURE},"{1}{W}");
this.subtype.add(SubType.SPIRIT);
this.power = new MageInt(1);
this.toughness = new MageInt(3);
// Whenever you cast a Spirit or Arcane spell, you may search your library for an Aura card with enchant creature, reveal it, and put it into your hand. If you do, shuffle your library.
this.addAbility(new SpellCastControllerTriggeredAbility(new SearchLibraryPutInHandEffect(new TargetCardInLibrary(filterAura), true, true), StaticFilters.SPIRIT_OR_ARCANE_CARD, true));
}
private Tallowisp(final Tallowisp card) {
super(card);
}
@Override
public Tallowisp copy() {
return new Tallowisp(this);
}
}
class TallowispAbilityPredicate implements Predicate<MageObject> {
public TallowispAbilityPredicate() {
}
@Override
public boolean apply(MageObject input, Game game) {
Abilities<Ability> abilities = input.getAbilities();
for (int i = 0; i < abilities.size(); i++) {
if (abilities.get(i) instanceof EnchantAbility) {
String enchantText = abilities.get(i).getRule();
if (enchantText.contentEquals("Enchant creature")) {
return true;
}
}
}
return false;
}
@Override
public String toString() {
return "Aura card with enchant creature";
}
}
| 947 |
530 | <filename>application/org.openjdk.jmc.joverflow/src/main/java/org/openjdk/jmc/joverflow/support/ProblemRecorder.java
/*
* Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
*
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* The contents of this file are subject to the terms of either the Universal Permissive License
* v 1.0 as shown at http://oss.oracle.com/licenses/upl
*
* or the following license:
*
* Redistribution and use in source and binary forms, with or without modification, are permitted
* provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this list of conditions
* and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice, this list of
* conditions and the following disclaimer in the documentation and/or other materials provided with
* the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its contributors may be used to
* endorse or promote products derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
* FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY
* WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package org.openjdk.jmc.joverflow.support;
import org.openjdk.jmc.joverflow.descriptors.CollectionInstanceDescriptor;
import org.openjdk.jmc.joverflow.heap.model.JavaLazyReadObject;
import org.openjdk.jmc.joverflow.heap.model.JavaObject;
import org.openjdk.jmc.joverflow.heap.model.JavaValueArray;
import org.openjdk.jmc.joverflow.heap.model.Snapshot;
/**
* JOverflow core heap scanning code invokes methods of this interface when it comes across
* problematic objects such as empty collections or duplicate strings. The implementation is
* supposed to record these problems, likely in an aggregated form, and then provide the resulting
* data to the user. There is an additional method that can be called by the heap scanner to report
* a Java instance that does not have any problems. But since reporting non-problematic objects
* requires additional time and memory and may not be needed in every scenario, that method is
* called by the scanner only when it is configured to do so.
*/
public interface ProblemRecorder {
/**
* Implementation of this method may initialize some internal data using information from the
* passed objects.
*/
public void initialize(Snapshot snapshot, HeapStats hs);
/**
* Reports a problematic collection or object array class with the specified problem and
* overhead of specified kind, and the reference chain leading to that object from some GC root.
* An instance of CollectionClassDescriptor that is also passed to the method allows its
* implementation to find more information about the problematic collection, if needed, e.g. its
* implementation-inclusive size, number of elements, etc.
*/
public void recordProblematicCollection(
JavaLazyReadObject col, CollectionInstanceDescriptor colDesc, Constants.ProblemKind ovhdKind, int ovhd,
RefChainElement referer);
/**
* Reports a good collection or object array, that does not have any problems, and a reference
* chain leading to it.
*/
public void recordGoodCollection(
JavaLazyReadObject col, CollectionInstanceDescriptor colDesc, RefChainElement referer);
/**
* Reports a duplicate string with the associated overhead and reference chain leading to it
* from some GC root. If hasDupBackingCharArray is true, the backing char array is duplicated;
* otherwise there are two or more String objects pointing at the same backing char array.
*/
public void recordDuplicateString(
JavaObject strObj, String stringValue, int implInclusiveSize, int ovhd, boolean hasDupBackingCharArray,
RefChainElement referer);
/**
* Reports a good string, that does not have any duplicates, and a reference chain leading to
* it.
*/
public void recordNonDuplicateString(JavaObject strObj, int implInclusiveSize, RefChainElement referer);
/**
* Reports a duplicate primitive array with the associated overhead and reference chain leading
* to it from some GC root.
*/
public void recordDuplicateArray(JavaValueArray ar, int ovhd, RefChainElement referer);
/**
* Reports a good primitive array, that does not have any duplicates, and a reference chain
* leading to it from some GC root.
*/
public void recordNonDuplicateArray(JavaValueArray ar, RefChainElement referer);
/**
* Reports a problematic instance of WeakHashMap or its subclass, that incurs the specified
* minimum overhead due to references from values pointing back to keys.
*/
public void recordWeakHashMapWithBackRefs(
JavaObject col, CollectionInstanceDescriptor colDesc, int ovhd, String valueTypeAndFieldSample,
RefChainElement referer);
/**
* If this method returns true for the given object,
* {@link #recordGoodInstance(JavaObject, RefChainElement)} will be called for it next.
*/
public boolean shouldRecordGoodInstance(JavaObject obj);
/**
* Reports a good Java instance, that does not have any problems, with the reference chain
* leading to it from some GC root. Will be called by the heap scanner only if previously
* {@link #shouldRecordGoodInstance(JavaObject)} returned true for it.
* <p>
* NOTE: currently objects that are good in principle, but belong to the implementation of some
* collection, such as HashMap$Entry, are not reported here. That is done in part to keep
* uniform our view of the heap, as it is generated by the core heap scanner, where
* implementation details of collections are not exposed in any way.
*/
public void recordGoodInstance(JavaObject obj, RefChainElement referer);
}
| 1,698 |
1,663 | <gh_stars>1000+
/*
Copyright (c) 2015 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom
the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
IN THE SOFTWARE.
*/
#include <assert.h>
#include <stdint.h>
#include <stdlib.h>
#include <stdio.h>
#include <sys/time.h>
#include "../libdill.h"
static coroutine void whisper(int left, int right) {
int val;
chrecv(right, &val, sizeof(val), -1);
val++;
chsend(left, &val, sizeof(val), -1);
}
int main(int argc, char *argv[]) {
if(argc != 2) {
printf("usage: whispers <number-of-whispers>\n");
return 1;
}
long count = atol(argv[1]);
int64_t start = now();
int left[2];
int right[2];
chmake(left);
right[0] = left[0];
right[1] = left[1];
int leftmost = left[0];
long i;
for (i = 0; i < count; ++i) {
chmake(right);
go(whisper(left[1], right[0]));
left[0] = right[0];
left[1] = right[1];
}
int val = 1;
chsend(right[1], &val, sizeof(val), -1);
int res;
chrecv(leftmost, &res, sizeof(res), -1);
assert(res == count + 1);
int64_t stop = now();
long duration = (long)(stop - start);
long ns = (duration * 1000000) / count;
printf("took %f seconds\n", (float)duration / 1000);
printf("performed %ld whispers in %f seconds\n", count, ((float)duration) / 1000);
printf("duration of one whisper: %ld ns\n", ns);
printf("whispers per second: %fM\n",
(float)(1000000000 / ns) / 1000000);
return 0;
}
| 903 |
1,874 | # Copyright 2012 Nebula, Inc.
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from nova import exception
from nova.tests import fixtures as nova_fixtures
from nova.tests.functional.api_sample_tests import api_sample_base
class NetworksJsonTests(api_sample_base.ApiSampleTestBaseV21):
ADMIN_API = True
sample_dir = 'os-networks'
def test_network_list(self):
response = self._do_get('os-networks')
self._verify_response('networks-list-resp', {}, response, 200)
def test_network_show(self):
uuid = nova_fixtures.NeutronFixture.network_1['id']
response = self._do_get('os-networks/%s' % uuid)
self._verify_response('network-show-resp', {}, response, 200)
@mock.patch('nova.network.neutron.API.get',
side_effect=exception.Unauthorized)
def test_network_show_token_expired(self, mock_get):
uuid = nova_fixtures.NeutronFixture.network_1['id']
response = self._do_get('os-networks/%s' % uuid)
self.assertEqual(401, response.status_code)
def test_network_create(self):
self.api.api_post('os-networks', {},
check_response_status=[410])
def test_network_add(self):
self.api.api_post('os-networks/add', {},
check_response_status=[410])
def test_network_delete(self):
self.api.api_delete('os-networks/always-delete',
check_response_status=[410])
| 803 |
2,805 | <reponame>robin-NEC/ckan
{
"processing": "Bezig...",
"lengthMenu": "_MENU_ resultaten weergeven",
"zeroRecords": "Geen resultaten gevonden",
"info": "_START_ tot _END_ van _TOTAL_ resultaten",
"infoEmpty": "Geen resultaten om weer te geven",
"infoFiltered": " (gefilterd uit _MAX_ resultaten)",
"search": "Zoeken:",
"emptyTable": "Geen resultaten aanwezig in de tabel",
"infoThousands": ".",
"loadingRecords": "Een moment geduld aub - bezig met laden...",
"paginate": {
"first": "Eerste",
"last": "Laatste",
"next": "Volgende",
"previous": "Vorige"
},
"aria": {
"sortAscending": ": activeer om kolom oplopend te sorteren",
"sortDescending": ": activeer om kolom aflopend te sorteren"
}
} | 356 |
3,372 | <reponame>MC-JY/aws-sdk-java<gh_stars>1000+
/*
* Copyright 2016-2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with
* the License. A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
* CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
* and limitations under the License.
*/
package com.amazonaws.services.wafv2.model;
import java.io.Serializable;
import javax.annotation.Generated;
import com.amazonaws.protocol.StructuredPojo;
import com.amazonaws.protocol.ProtocolMarshaller;
/**
* <p>
* A version of the named managed rule group, that the rule group's vendor publishes for use by customers.
* </p>
* <note>
* <p>
* This is intended for use only by vendors of managed rule sets. Vendors are Amazon Web Services and Amazon Web
* Services Marketplace sellers.
* </p>
* <p>
* Vendors, you can use the managed rule set APIs to provide controlled rollout of your versioned managed rule group
* offerings for your customers. The APIs are <code>ListManagedRuleSets</code>, <code>GetManagedRuleSet</code>,
* <code>PutManagedRuleSetVersions</code>, and <code>UpdateManagedRuleSetVersionExpiryDate</code>.
* </p>
* </note>
*
* @see <a href="http://docs.aws.amazon.com/goto/WebAPI/wafv2-2019-07-29/VersionToPublish" target="_top">AWS API
* Documentation</a>
*/
@Generated("com.amazonaws:aws-java-sdk-code-generator")
public class VersionToPublish implements Serializable, Cloneable, StructuredPojo {
/**
* <p>
* The Amazon Resource Name (ARN) of the vendor's rule group that's used in the published managed rule group
* version.
* </p>
*/
private String associatedRuleGroupArn;
/**
* <p>
* The amount of time the vendor expects this version of the managed rule group to last, in days.
* </p>
*/
private Integer forecastedLifetime;
/**
* <p>
* The Amazon Resource Name (ARN) of the vendor's rule group that's used in the published managed rule group
* version.
* </p>
*
* @param associatedRuleGroupArn
* The Amazon Resource Name (ARN) of the vendor's rule group that's used in the published managed rule group
* version.
*/
public void setAssociatedRuleGroupArn(String associatedRuleGroupArn) {
this.associatedRuleGroupArn = associatedRuleGroupArn;
}
/**
* <p>
* The Amazon Resource Name (ARN) of the vendor's rule group that's used in the published managed rule group
* version.
* </p>
*
* @return The Amazon Resource Name (ARN) of the vendor's rule group that's used in the published managed rule group
* version.
*/
public String getAssociatedRuleGroupArn() {
return this.associatedRuleGroupArn;
}
/**
* <p>
* The Amazon Resource Name (ARN) of the vendor's rule group that's used in the published managed rule group
* version.
* </p>
*
* @param associatedRuleGroupArn
* The Amazon Resource Name (ARN) of the vendor's rule group that's used in the published managed rule group
* version.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public VersionToPublish withAssociatedRuleGroupArn(String associatedRuleGroupArn) {
setAssociatedRuleGroupArn(associatedRuleGroupArn);
return this;
}
/**
* <p>
* The amount of time the vendor expects this version of the managed rule group to last, in days.
* </p>
*
* @param forecastedLifetime
* The amount of time the vendor expects this version of the managed rule group to last, in days.
*/
public void setForecastedLifetime(Integer forecastedLifetime) {
this.forecastedLifetime = forecastedLifetime;
}
/**
* <p>
* The amount of time the vendor expects this version of the managed rule group to last, in days.
* </p>
*
* @return The amount of time the vendor expects this version of the managed rule group to last, in days.
*/
public Integer getForecastedLifetime() {
return this.forecastedLifetime;
}
/**
* <p>
* The amount of time the vendor expects this version of the managed rule group to last, in days.
* </p>
*
* @param forecastedLifetime
* The amount of time the vendor expects this version of the managed rule group to last, in days.
* @return Returns a reference to this object so that method calls can be chained together.
*/
public VersionToPublish withForecastedLifetime(Integer forecastedLifetime) {
setForecastedLifetime(forecastedLifetime);
return this;
}
/**
* Returns a string representation of this object. This is useful for testing and debugging. Sensitive data will be
* redacted from this string using a placeholder value.
*
* @return A string representation of this object.
*
* @see java.lang.Object#toString()
*/
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
sb.append("{");
if (getAssociatedRuleGroupArn() != null)
sb.append("AssociatedRuleGroupArn: ").append(getAssociatedRuleGroupArn()).append(",");
if (getForecastedLifetime() != null)
sb.append("ForecastedLifetime: ").append(getForecastedLifetime());
sb.append("}");
return sb.toString();
}
@Override
public boolean equals(Object obj) {
if (this == obj)
return true;
if (obj == null)
return false;
if (obj instanceof VersionToPublish == false)
return false;
VersionToPublish other = (VersionToPublish) obj;
if (other.getAssociatedRuleGroupArn() == null ^ this.getAssociatedRuleGroupArn() == null)
return false;
if (other.getAssociatedRuleGroupArn() != null && other.getAssociatedRuleGroupArn().equals(this.getAssociatedRuleGroupArn()) == false)
return false;
if (other.getForecastedLifetime() == null ^ this.getForecastedLifetime() == null)
return false;
if (other.getForecastedLifetime() != null && other.getForecastedLifetime().equals(this.getForecastedLifetime()) == false)
return false;
return true;
}
@Override
public int hashCode() {
final int prime = 31;
int hashCode = 1;
hashCode = prime * hashCode + ((getAssociatedRuleGroupArn() == null) ? 0 : getAssociatedRuleGroupArn().hashCode());
hashCode = prime * hashCode + ((getForecastedLifetime() == null) ? 0 : getForecastedLifetime().hashCode());
return hashCode;
}
@Override
public VersionToPublish clone() {
try {
return (VersionToPublish) super.clone();
} catch (CloneNotSupportedException e) {
throw new IllegalStateException("Got a CloneNotSupportedException from Object.clone() " + "even though we're Cloneable!", e);
}
}
@com.amazonaws.annotation.SdkInternalApi
@Override
public void marshall(ProtocolMarshaller protocolMarshaller) {
com.amazonaws.services.wafv2.model.transform.VersionToPublishMarshaller.getInstance().marshall(this, protocolMarshaller);
}
}
| 2,755 |
458 | <reponame>timmattison/gwt-material<filename>gwt-material/src/main/java/gwt/material/design/client/ui/MaterialRange.java<gh_stars>100-1000
/*
* #%L
* GwtMaterial
* %%
* Copyright (C) 2015 - 2017 GwtMaterialDesign
* %%
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* #L%
*/
package gwt.material.design.client.ui;
import com.google.gwt.dom.client.Document;
import com.google.gwt.dom.client.Element;
import com.google.gwt.event.dom.client.ChangeEvent;
import com.google.gwt.event.dom.client.ChangeHandler;
import com.google.gwt.event.dom.client.HasChangeHandlers;
import com.google.gwt.event.shared.HandlerRegistration;
import com.google.gwt.user.client.Window;
import gwt.material.design.client.base.AbstractValueWidget;
import gwt.material.design.client.base.HasInputChangeHandler;
import gwt.material.design.client.base.HasStatusText;
import gwt.material.design.client.base.mixin.StatusTextMixin;
import gwt.material.design.client.base.mixin.ToggleStyleMixin;
import gwt.material.design.client.constants.CssName;
import gwt.material.design.client.constants.InputType;
import gwt.material.design.client.events.InputChangeEvent;
import gwt.material.design.client.ui.html.Span;
import static gwt.material.design.jquery.client.api.JQuery.$;
//@formatter:off
/**
* Material Range - a slider that initialize the minimum and maximum values.
* <p>
* <h3>UiBinder Usage:</h3>
* <pre>
* {@code <m:MaterialRange value="2" min="20" max="50" value="25"/>}
* </pre>
*
* @author kevzlou7979
* @author <NAME>
* @see <a href="http://gwtmaterialdesign.github.io/gwt-material-demo/#range">Material Range</a>
* @see <a href="https://material.io/guidelines/components/sliders.html">Material Design Specification</a>
*/
//@formatter:on
public class MaterialRange extends AbstractValueWidget<Integer>
implements HasChangeHandlers, HasStatusText, HasInputChangeHandler {
private static String VALUE = "value";
private static String MAX = "max";
private static String MIN = "min";
private MaterialPanel progress = new MaterialPanel();
private MaterialPanel progressWrapper = new MaterialPanel();
private MaterialPanel progressFillContainer = new MaterialPanel();
private MaterialPanel rangeContainer = new MaterialPanel();
private MaterialInput rangeInputElement = new MaterialInput();
private Span thumb = new Span();
private Span value = new Span();
private boolean autoBlur;
private MaterialLabel errorLabel = new MaterialLabel();
private StatusTextMixin<AbstractValueWidget, MaterialLabel> statusTextMixin;
private ToggleStyleMixin<MaterialRange> enableThumbMixin;
/**
* Creates a range
*/
public MaterialRange() {
super(Document.get().createDivElement());
}
/**
* Creates a range with specified values
*
* @param min - start min value
* @param max - end max value
* @param value - default range value
*/
public MaterialRange(Integer min, Integer max, Integer value) {
this();
setMin(min);
setMax(max);
setValue(value);
}
public void reset() {
super.reset();
setValue(getMin());
}
@Override
protected void onLoad() {
super.onLoad();
errorLabel.setVisible(false);
rangeContainer.setStyleName(CssName.RANGE_FIELD);
rangeInputElement.setType(InputType.RANGE);
rangeContainer.add(rangeInputElement);
thumb.getElement().setClassName(CssName.THUMB);
value.getElement().setClassName(CssName.VALUE);
thumb.add(value);
rangeContainer.add(thumb);
progressWrapper.getElement().setClassName("range-progress-wrapper");
progress.getElement().setClassName(CssName.PROGRESS);
progressFillContainer.getElement().setClassName("progress-container");
progressFillContainer.add(progress);
progressWrapper.add(progressFillContainer);
rangeContainer.add(progressWrapper);
add(rangeContainer);
add(errorLabel);
$(rangeInputElement.getElement()).on("input", (event, o) -> {
InputChangeEvent.fire(this, getValue());
updateProgressWidth(getValue());
return true;
});
// Fixing IE Inconsistent event handling on Value Change event
// https://www.impressivewebs.com/onchange-vs-oninput-for-range-sliders/
registerHandler(addMouseUpHandler(event -> {
if (isIE()) {
setValue(getValue(), true);
}
}));
registerHandler(addChangeHandler(changeEvent -> {
// Fixing IE Inconsistent event handling on Input Change event
// https://www.impressivewebs.com/onchange-vs-oninput-for-range-sliders/
if (isIE()) {
InputChangeEvent.fire(this, getValue());
}
setValue(getValue(), !isIE());
if (isAutoBlur()) {
$(rangeInputElement.getElement()).blur();
}
}));
}
protected void updateProgressWidth(int value) {
double range = ((value - getMin()) * 100.0) / (getMax() - getMin());
progress.setWidth(range > 0 ? range + "%" : "0px");
}
/**
* Retrieve the Integer value from the given Attribute of the range element
*
* @param attribute The name of the attribute on the range element
* @return The Integer vaulue read from the given attribute or null
*/
protected Integer getIntFromRangeElement(String attribute) {
Element ele = $(rangeInputElement).asElement();
if (ele != null) {
return ele.getPropertyInt(attribute);
}
return null;
}
/**
* Set the given Integer value to the attribute of the range element.
*/
protected void setIntToRangeElement(String attribute, Integer val) {
Element ele = $(rangeInputElement).asElement();
if (ele != null) {
ele.setPropertyInt(attribute, val);
}
}
/**
* Read the current value
*
* @return The Integer value or null
*/
@Override
public Integer getValue() {
return getIntFromRangeElement(VALUE);
}
@Override
public void setValue(Integer value, boolean fireEvents) {
if (value == null) {
throw new IllegalArgumentException("Value must not be null");
}
if (value < getMin()) {
throw new IllegalArgumentException("Value must not be less than the minimum range value.");
}
if (value > getMax()) {
throw new IllegalArgumentException("Value must not be greater than the maximum range value");
}
setIntToRangeElement(VALUE, value);
updateProgressWidth(value);
super.setValue(value, fireEvents);
}
/**
* Read the min value
*
* @return The Integer or null
*/
public Integer getMin() {
return getIntFromRangeElement(MIN);
}
/**
* Write the current min value
*
* @param min value must be < max
*/
public void setMin(Integer min) {
setIntToRangeElement(MIN, min);
}
/**
* Read the max value
*
* @return The Integer or null
*/
public Integer getMax() {
return getIntFromRangeElement(MAX);
}
public Integer getStep() {
String step = getRangeInputElement().getElement().getAttribute("step");
if (step != null && !step.isEmpty()) {
return Integer.parseInt(step);
}
return null;
}
public void setStep(Integer step) {
getRangeInputElement().getElement().setAttribute("step", step + "");
}
public boolean isEnableThumb() {
return getEnableThumbMixin().isOn();
}
public void setEnableThumb(boolean enableThumb) {
getEnableThumbMixin().setOn(!enableThumb);
}
public boolean isAutoBlur() {
return autoBlur;
}
public void setAutoBlur(boolean autoBlur) {
this.autoBlur = autoBlur;
}
/**
* Write the current max value
*
* @param max value must be > min
*/
public void setMax(Integer max) {
setIntToRangeElement(MAX, max);
}
public MaterialLabel getErrorLabel() {
return errorLabel;
}
public MaterialInput getRangeInputElement() {
return rangeInputElement;
}
public MaterialPanel getRangeContainer() {
return rangeContainer;
}
public Span getThumb() {
return thumb;
}
public boolean isIE() {
return Window.Navigator.getUserAgent().indexOf("MSIE") > -1 || Window.Navigator.getUserAgent().indexOf("Trident/") > -1;
}
@Override
public void setEnabled(boolean enabled) {
super.setEnabled(enabled);
if (!enabled) {
getRangeInputElement().getElement().setAttribute(CssName.DISABLED, "true");
} else {
getRangeInputElement().getElement().removeAttribute(CssName.DISABLED);
}
}
/**
* Register the ChangeHandler to become notified if the user changes the slider.
* The Handler is called when the user releases the mouse only at the end of the slide
* operation.
*/
@Override
public HandlerRegistration addChangeHandler(final ChangeHandler handler) {
return getRangeInputElement().addDomHandler(handler, ChangeEvent.getType());
}
@Override
public HandlerRegistration addInputChangeHandler(InputChangeEvent.InputChangeHandler handler) {
return addHandler(handler, InputChangeEvent.getType());
}
@Override
public StatusTextMixin<AbstractValueWidget, MaterialLabel> getStatusTextMixin() {
if (statusTextMixin == null) {
statusTextMixin = new StatusTextMixin<>(this, errorLabel, null);
}
return statusTextMixin;
}
public ToggleStyleMixin<MaterialRange> getEnableThumbMixin() {
if (enableThumbMixin == null) {
enableThumbMixin = new ToggleStyleMixin<>(this, CssName.NO_THUMB);
}
return enableThumbMixin;
}
}
| 3,986 |
310 | <reponame>orekyuu/doma
/** Provides classes to build dynamic SQL statements. */
package org.seasar.doma.jdbc.builder;
| 39 |
679 | /**************************************************************
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*************************************************************/
// MARKER(update_precomp.py): autogen include statement, do not remove
#include "precompiled_slideshow.hxx"
// must be first
#include <canvas/debug.hxx>
#include <tools/diagnose_ex.h>
#include <canvas/verbosetrace.hxx>
#include <continuouskeytimeactivitybase.hxx>
#include <boost/tuple/tuple.hpp>
#include <algorithm>
#include <iterator>
namespace slideshow
{
namespace internal
{
ContinuousKeyTimeActivityBase::ContinuousKeyTimeActivityBase( const ActivityParameters& rParms ) :
SimpleContinuousActivityBase( rParms ),
maLerper( rParms.maDiscreteTimes )
{
ENSURE_OR_THROW( rParms.maDiscreteTimes.size() > 1,
"ContinuousKeyTimeActivityBase::ContinuousKeyTimeActivityBase(): key times vector must have two entries or more" );
ENSURE_OR_THROW( rParms.maDiscreteTimes.front() == 0.0,
"ContinuousKeyTimeActivityBase::ContinuousKeyTimeActivityBase(): key times vector first entry must be zero" );
ENSURE_OR_THROW( rParms.maDiscreteTimes.back() <= 1.0,
"ContinuousKeyTimeActivityBase::ContinuousKeyTimeActivityBase(): key times vector last entry must be less or equal 1" );
}
void ContinuousKeyTimeActivityBase::simplePerform( double nSimpleTime,
sal_uInt32 nRepeatCount ) const
{
// calc simple time from global time - sweep through the
// array multiple times for repeated animations (according to
// SMIL spec).
double fAlpha( calcAcceleratedTime( nSimpleTime ) );
std::ptrdiff_t nIndex;
boost::tuples::tie(nIndex,fAlpha) = maLerper.lerp(fAlpha);
perform(
nIndex,
fAlpha,
nRepeatCount );
}
}
}
| 1,079 |
4,538 | <filename>components/ota/ota_agent/verify/ota_verify_hash.c
/*
* Copyright (C) 2015-2017 Alibaba Group Holding Limited
*/
#include <string.h>
#include "ota_log.h"
#include "ota_import.h"
#include "ota_hal_digest.h"
#include "ota_hal_os.h"
#define OTA_BUF_VERIFY 512
int ota_hash_init(ota_hash_ctx_t *ctx, unsigned char type)
{
int ret = 0;
if (ctx == NULL) {
OTA_LOG_E("Invalid hash param");
return OTA_INVALID_PARAMETER;
}
ctx->hash_method = type;
switch (type) {
case OTA_SHA256:
ota_sha256_init(&ctx->sha256_ctx);
ota_sha256_starts(&ctx->sha256_ctx, 0);
break;
case OTA_MD5:
ota_md5_init(&ctx->md5_ctx);
ota_md5_starts(&ctx->md5_ctx);
break;
default:
ret = OTA_INVALID_PARAMETER;
break;
}
return ret;
}
int ota_hash_update(ota_hash_ctx_t *ctx, const unsigned char *buf, unsigned int len)
{
int ret = 0;
if (ctx == NULL) {
OTA_LOG_E("Invalid hash param");
return OTA_INVALID_PARAMETER;
}
switch (ctx->hash_method) {
case OTA_SHA256:
ota_sha256_update(&ctx->sha256_ctx, buf, len);
break;
case OTA_MD5:
ota_md5_update(&ctx->md5_ctx, buf, len);
break;
default:
ret = OTA_INVALID_PARAMETER;
break;
}
return ret;
}
int ota_hash_final(ota_hash_ctx_t *ctx, unsigned char *dgst)
{
int ret = 0;
if (ctx == NULL) {
OTA_LOG_E("Invalid hash param");
return OTA_INVALID_PARAMETER;
}
switch (ctx->hash_method) {
case OTA_SHA256:
ota_sha256_finish(&ctx->sha256_ctx, dgst);
ota_sha256_free(&ctx->sha256_ctx);
break;
case OTA_MD5:
ota_md5_finish(&ctx->md5_ctx, dgst);
ota_md5_free(&ctx->md5_ctx);
break;
default:
ret = OTA_INVALID_PARAMETER;
break;
}
return ret;
}
int ota_check_hash(unsigned char type, char *src, char *dst)
{
int ret = 0;
switch (type) {
case OTA_SHA256:
OTA_LOG_I("SHA256 src:%s dst:%s", src, dst);
if (strncmp(dst, src, 64) != 0) {
ret = OTA_VERIFY_SHA2_FAIL;
}
break;
case OTA_MD5:
OTA_LOG_I("md5 src:%s dst:%s", src, dst);
if (strncmp(dst, src, 32) != 0) {
ret = OTA_VERIFY_MD5_FAIL;
}
break;
default:
ret = OTA_INVALID_PARAMETER;
break;
}
return ret;
}
int ota_check_image_hash(unsigned int image_len, unsigned char type, char *hash_value, int len)
{
int ret = 0;
char *rd_buf = NULL;
unsigned int read_size = 0;
unsigned int offset = 0;
ota_hash_ctx_t ctx = {0};
ota_crc16_ctx crc_ctx = {0};
unsigned short crc_result = 0;
if (NULL == hash_value) {
ret = OTA_VERIFY_IMAGE_FAIL;
goto EXIT;
}
switch (type) {
case OTA_SHA256:
if (len < 32) {
ret = OTA_VERIFY_SHA2_FAIL;
goto EXIT;
}
break;
case OTA_MD5:
if (len < 16) {
ret = OTA_VERIFY_MD5_FAIL;
goto EXIT;
}
break;
default:
ret = OTA_INVALID_PARAMETER;
goto EXIT;
}
rd_buf = ota_malloc(OTA_BUF_VERIFY);
if (rd_buf == NULL) {
ret = OTA_VERIFY_IMAGE_FAIL;
goto EXIT;
}
ret = ota_hash_init(&ctx, type);
if (ret != 0) {
goto EXIT;
}
ota_crc16_init(&crc_ctx);
while (offset < image_len) {
unsigned int off = offset;
(image_len - offset >= OTA_BUF_VERIFY) ? (read_size = OTA_BUF_VERIFY) : (read_size = image_len - offset);
ret = ota_read(&off, rd_buf, read_size);
if (ret < 0) {
ret = OTA_VERIFY_IMAGE_FAIL;
goto EXIT;
}
ret = ota_hash_update(&ctx, (const unsigned char *)rd_buf, read_size);
if (ret != 0) {
goto EXIT;
}
ota_crc16_update(&crc_ctx, (void *)rd_buf, read_size);
offset += read_size;
ota_msleep(5);
}
memset(hash_value, 0x00, len);
ota_crc16_final(&crc_ctx, &crc_result);
OTA_LOG_I("check image_crc16 = %x\r\n", crc_result);
ret = ota_hash_final(&ctx, (unsigned char *)hash_value);
EXIT:
if (NULL != rd_buf) {
ota_free(rd_buf);
rd_buf = NULL;
}
if (ret != 0) {
OTA_LOG_E("check image hash:%d", ret);
}
return ret;
}
int ota_get_image_info(unsigned int image_size, ota_image_info_t *tmp_info)
{
int ret = -1;
unsigned int off_set = 0;
if ((tmp_info == NULL) || (image_size <= sizeof(ota_image_info_t))) {
OTA_LOG_E("input param err for getting image info!");
return ret;
}
off_set = image_size - sizeof(ota_image_info_t);
OTA_LOG_I("bin size:%d off:%d\r\n", image_size, off_set);
ret = ota_read(&off_set, (char *)tmp_info, sizeof(ota_image_info_t));
if (ret < 0) {
OTA_LOG_E("read image info err!");
}
OTA_LOG_I("magic:0x%04x, size:%d, crc16:0x%02x\r\n", tmp_info->image_magic, tmp_info->image_size, tmp_info->image_crc16);
return ret;
}
int ota_check_image(unsigned int size)
{
int ret = 0;
char image_md5[16] = {0};
char download_md5[33] = {0};
char cal_md5[33] = {0};
ota_image_info_t image_info = {0};
ret = ota_get_image_info(size, &image_info);
if (ret < 0) {
ret = OTA_VERIFY_IMAGE_FAIL;
goto EXIT;
}
ret = ota_hex2str(download_md5, (const unsigned char *)image_info.image_md5, sizeof(download_md5), sizeof(image_info.image_md5));
if (ret != 0) {
ret = OTA_VERIFY_IMAGE_FAIL;
goto EXIT;
}
OTA_LOG_I("magic:0x%04x size:%d md5:%s crc16:0x%02x", image_info.image_magic, image_info.image_size, download_md5, image_info.image_crc16);
if ((image_info.image_magic != OTA_BIN_MAGIC_APP) &&
(image_info.image_magic != OTA_BIN_MAGIC_KERNEL) &&
(image_info.image_magic != OTA_BIN_MAGIC_ALL) &&
(image_info.image_magic != OTA_BIN_MAGIC_MCU) &&
(image_info.image_magic != OTA_BIN_MAGIC_FS)) {
ret = OTA_INVALID_PARAMETER;
goto EXIT;
}
if (image_info.image_size == 0) {
ret = OTA_VERIFY_IMAGE_FAIL;
goto EXIT;
}
ret = ota_check_image_hash(image_info.image_size, OTA_MD5, image_md5, 16);
if (ret != 0) {
goto EXIT;
}
ret = ota_hex2str(cal_md5, (const unsigned char *)image_md5, sizeof(cal_md5), sizeof(image_md5));
if (ret != 0) {
ret = OTA_VERIFY_IMAGE_FAIL;
goto EXIT;
}
ret = ota_check_hash(OTA_MD5, cal_md5, download_md5);
if (ret != 0) {
goto EXIT;
}
EXIT:
if (ret != 0) {
OTA_LOG_E("ota check_image :%d", ret);
}
return ret;
}
| 3,840 |
563 | <reponame>OferLevi85/athenz
/*
* Copyright The Athenz Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.yahoo.athenz.zts.workload.impl;
import com.amazonaws.services.dynamodbv2.document.*;
import com.amazonaws.services.dynamodbv2.document.internal.IteratorSupport;
import com.amazonaws.services.dynamodbv2.document.spec.QuerySpec;
import com.amazonaws.services.dynamodbv2.document.spec.UpdateItemSpec;
import com.amazonaws.services.dynamodbv2.model.AmazonDynamoDBException;
import com.yahoo.athenz.common.server.workload.WorkloadRecord;
import com.yahoo.athenz.zts.ZTSTestUtils;
import org.mockito.ArgumentMatchers;
import org.mockito.Mock;
import org.mockito.Mockito;
import org.mockito.MockitoAnnotations;
import org.testng.Assert;
import org.testng.annotations.BeforeMethod;
import org.testng.annotations.Test;
import java.util.Date;
import java.util.List;
import static org.mockito.Mockito.when;
public class DynamoDBWorkloadRecordStoreConnectionTest {
private final String tableName = "workload-table";
private final String serviceIndexName = "service-index";
private final String ipIndexName = "ip-index";
@Mock private DynamoDB dynamoDB = Mockito.mock(DynamoDB.class);
@Mock private Table table = Mockito.mock(Table.class);
@Mock private Index serviceIndex = Mockito.mock(Index.class);
@Mock private Index ipIndex = Mockito.mock(Index.class);
@Mock private Item item = Mockito.mock(Item.class);
@Mock private PutItemOutcome putOutcome = Mockito.mock(PutItemOutcome.class);
@Mock private DeleteItemOutcome deleteOutcome = Mockito.mock(DeleteItemOutcome.class);
@Mock private UpdateItemOutcome updateOutcome = Mockito.mock(UpdateItemOutcome.class);
@BeforeMethod
public void setUp() {
MockitoAnnotations.initMocks(this);
Mockito.doReturn(table).when(dynamoDB).getTable(tableName);
Mockito.doReturn(serviceIndex).when(table).getIndex(serviceIndexName);
Mockito.doReturn(ipIndex).when(table).getIndex(ipIndexName);
}
private DynamoDBWorkloadRecordStoreConnection getDBConnection() {
return new DynamoDBWorkloadRecordStoreConnection(dynamoDB, tableName, serviceIndexName, ipIndexName);
}
@Test
public void testGetWorkloadRecordsByService() {
long currTime = System.currentTimeMillis();
Mockito.doReturn("1234").when(item).getString("instanceId");
Mockito.doReturn("openstack").when(item).getString("provider");
Mockito.doReturn("10.10.10.11").when(item).getString("ip");
Mockito.doReturn("test-host").when(item).getString("hostname");
Mockito.doReturn(true).when(item).hasAttribute("hostname");
Mockito.doReturn(currTime).when(item).get("creationTime");
Mockito.doReturn(currTime).when(item).get("updateTime");
Mockito.doReturn(currTime).when(item).get("certExpiryTime");
Mockito.doReturn(currTime).when(item).getLong("creationTime");
Mockito.doReturn(currTime).when(item).getLong("updateTime");
Mockito.doReturn(currTime).when(item).getLong("certExpiryTime");
Mockito.doReturn(true).when(item).hasAttribute("certExpiryTime");
ItemCollection<QueryOutcome> itemCollection = Mockito.mock(ItemCollection.class);
IteratorSupport<Item, QueryOutcome> iteratorSupport = Mockito.mock(IteratorSupport.class);
when(itemCollection.iterator()).thenReturn(iteratorSupport);
when(iteratorSupport.hasNext()).thenReturn(true, false);
when(iteratorSupport.next()).thenReturn(item);
Mockito.doReturn(itemCollection).when(serviceIndex).query(Mockito.any(QuerySpec.class));
DynamoDBWorkloadRecordStoreConnection dbConn = getDBConnection();
dbConn.setOperationTimeout(10);
List<WorkloadRecord> wlRecordList = dbConn.getWorkloadRecordsByService("athenz", "api");
Assert.assertNotNull(wlRecordList);
Assert.assertEquals(wlRecordList.get(0).getInstanceId(), "1234");
Assert.assertEquals(wlRecordList.get(0).getProvider(), "openstack");
Assert.assertEquals(wlRecordList.get(0).getIp(), "10.10.10.11");
Assert.assertEquals(wlRecordList.get(0).getHostname(), "test-host");
Assert.assertEquals(wlRecordList.get(0).getUpdateTime(), new Date(currTime));
Assert.assertEquals(wlRecordList.get(0).getCertExpiryTime(), new Date(currTime));
dbConn.close();
}
@Test
public void testGetWorkloadRecordsByServiceNotFoundNull() {
Mockito.doReturn(null).when(serviceIndex).query(Mockito.any(QuerySpec.class));
DynamoDBWorkloadRecordStoreConnection dbConn = new DynamoDBWorkloadRecordStoreConnection(dynamoDB, tableName, "service-index", "ip-index");
List<WorkloadRecord> wlRecordList = dbConn.getWorkloadRecordsByService("athenz", "api");
Assert.assertTrue(wlRecordList.isEmpty());
dbConn.close();
}
@Test
public void testGetWorkloadRecordsByIp() {
long currTime = System.currentTimeMillis();
Mockito.doReturn("1234").when(item).getString("instanceId");
Mockito.doReturn("openstack").when(item).getString("provider");
Mockito.doReturn("athenz.api").when(item).getString("service");
Mockito.doReturn("test-host").when(item).getString("hostname");
Mockito.doReturn(currTime).when(item).get("creationTime");
Mockito.doReturn(currTime).when(item).get("updateTime");
Mockito.doReturn(currTime).when(item).get("certExpiryTime");
Mockito.doReturn(currTime).when(item).getLong("creationTime");
Mockito.doReturn(currTime).when(item).getLong("updateTime");
Mockito.doReturn(currTime).when(item).getLong("certExpiryTime");
ItemCollection<QueryOutcome> itemCollection = Mockito.mock(ItemCollection.class);
IteratorSupport<Item, QueryOutcome> iteratorSupport = Mockito.mock(IteratorSupport.class);
when(itemCollection.iterator()).thenReturn(iteratorSupport);
when(iteratorSupport.hasNext()).thenReturn(true, false);
when(iteratorSupport.next()).thenReturn(item);
Mockito.doReturn(itemCollection).when(ipIndex).query(Mockito.any(QuerySpec.class));
DynamoDBWorkloadRecordStoreConnection dbConn = getDBConnection();
dbConn.setOperationTimeout(10);
List<WorkloadRecord> wlRecordList = dbConn.getWorkloadRecordsByIp("10.0.0.1");
Assert.assertNotNull(wlRecordList);
Assert.assertEquals(wlRecordList.get(0).getInstanceId(), "1234");
Assert.assertEquals(wlRecordList.get(0).getProvider(), "openstack");
Assert.assertEquals(wlRecordList.get(0).getService(), "athenz.api");
Assert.assertEquals(wlRecordList.get(0).getHostname(), "NA");
Assert.assertEquals(wlRecordList.get(0).getUpdateTime(), new Date(currTime));
Assert.assertEquals(wlRecordList.get(0).getCertExpiryTime(), new Date(0));
dbConn.close();
}
@Test
public void testGetWorkloadRecordsByIpNotFoundNull() {
Mockito.doReturn(null).when(ipIndex).query(Mockito.any(QuerySpec.class));
DynamoDBWorkloadRecordStoreConnection dbConn = getDBConnection();
List<WorkloadRecord> wlRecordList = dbConn.getWorkloadRecordsByIp("10.0.0.1");
Assert.assertTrue(wlRecordList.isEmpty());
dbConn.close();
}
@Test
public void testGetWorkloadRecordsByServiceNotFoundException() {
Mockito.doThrow(new AmazonDynamoDBException("item not found"))
.when(serviceIndex).query(Mockito.any(QuerySpec.class));
DynamoDBWorkloadRecordStoreConnection dbConn = getDBConnection();
List<WorkloadRecord> wlRecordList = dbConn.getWorkloadRecordsByService("athenz", "api");
Assert.assertTrue(wlRecordList.isEmpty());
dbConn.close();
}
@Test
public void testGetWorkloadRecordsByIpNotFoundException() {
Mockito.doThrow(new AmazonDynamoDBException("item not found"))
.when(ipIndex).query(Mockito.any(QuerySpec.class));
DynamoDBWorkloadRecordStoreConnection dbConn = getDBConnection();
List<WorkloadRecord> wlRecordList = dbConn.getWorkloadRecordsByIp("10.0.0.1");
Assert.assertTrue(wlRecordList.isEmpty());
dbConn.close();
}
@Test
public void testInsertWorkloadRecord() {
DynamoDBWorkloadRecordStoreConnection dbConn = getDBConnection();
WorkloadRecord workloadRecord = new WorkloadRecord();
workloadRecord.setInstanceId("1234");
workloadRecord.setService("athenz.api");
workloadRecord.setProvider("openstack");
workloadRecord.setIp("10.0.0.1");
workloadRecord.setHostname("test-host.corp.yahoo.com");
long currTime = System.currentTimeMillis();
Date currDate = new Date(currTime);
workloadRecord.setCreationTime(currDate);
workloadRecord.setUpdateTime(currDate);
Item item = ItemUtils.toItem(ZTSTestUtils.generateWorkloadAttributeValues("athenz.api", "1234", "opensack", "10.0.0.1", "test-host.corp.yahoo.com",
Long.toString(currTime), Long.toString(currTime),Long.toString(currTime)));
Mockito.doReturn(putOutcome).when(table).putItem(item);
boolean requestSuccess = dbConn.insertWorkloadRecord(workloadRecord);
Assert.assertTrue(requestSuccess);
dbConn.close();
}
@Test
public void testInsertWorkloadRecordException() {
WorkloadRecord workloadRecord = new WorkloadRecord();
Mockito.doThrow(new AmazonDynamoDBException("invalid operation"))
.when(table).putItem(ArgumentMatchers.any(Item.class));
DynamoDBWorkloadRecordStoreConnection dbConn = getDBConnection();
boolean requestSuccess = dbConn.insertWorkloadRecord(workloadRecord);
Assert.assertFalse(requestSuccess);
dbConn.close();
}
@Test
public void testUpdateWorkloadRecord() {
DynamoDBWorkloadRecordStoreConnection dbConn = getDBConnection();
WorkloadRecord workloadRecord = new WorkloadRecord();
workloadRecord.setProvider("openstack");
long currTime = System.currentTimeMillis();
Date currDate = new Date(currTime);
workloadRecord.setUpdateTime(currDate);
UpdateItemSpec item = new UpdateItemSpec()
.withPrimaryKey("primaryKey", "athenz.api#1234#10.0.0.1")
.withAttributeUpdate(
new AttributeUpdate("provider").put(workloadRecord.getProvider()),
new AttributeUpdate("updateTime").put(workloadRecord.getUpdateTime()));
Mockito.doReturn(updateOutcome).when(table).updateItem(item);
boolean requestSuccess = dbConn.updateWorkloadRecord(workloadRecord);
Assert.assertTrue(requestSuccess);
dbConn.close();
}
@Test
public void testUpdateWorkloadRecordException() {
WorkloadRecord workloadRecord = new WorkloadRecord();
Mockito.doThrow(new AmazonDynamoDBException("invalid operation"))
.when(table).updateItem(ArgumentMatchers.any(UpdateItemSpec.class));
DynamoDBWorkloadRecordStoreConnection dbConn = getDBConnection();
boolean requestSuccess = dbConn.updateWorkloadRecord(workloadRecord);
Assert.assertFalse(requestSuccess);
dbConn.close();
}
}
| 4,492 |
14,668 | <reponame>zealoussnow/chromium
// Copyright 2013 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "google_apis/gcm/engine/fake_connection_factory.h"
#include <memory>
#include "google_apis/gcm/engine/fake_connection_handler.h"
#include "google_apis/gcm/protocol/mcs.pb.h"
#include "mojo/public/cpp/system/data_pipe.h"
#include "net/socket/stream_socket.h"
#include "net/traffic_annotation/network_traffic_annotation_test_helper.h"
namespace gcm {
FakeConnectionFactory::FakeConnectionFactory()
: reconnect_pending_(false),
delay_reconnect_(false),
connection_listener_(nullptr) {
}
FakeConnectionFactory::~FakeConnectionFactory() {
}
void FakeConnectionFactory::Initialize(
const BuildLoginRequestCallback& request_builder,
const ConnectionHandler::ProtoReceivedCallback& read_callback,
const ConnectionHandler::ProtoSentCallback& write_callback) {
request_builder_ = request_builder;
connection_handler_ =
std::make_unique<FakeConnectionHandler>(read_callback, write_callback);
}
ConnectionHandler* FakeConnectionFactory::GetConnectionHandler() const {
return connection_handler_.get();
}
void FakeConnectionFactory::Connect() {
mcs_proto::LoginRequest login_request;
request_builder_.Run(&login_request);
connection_handler_->Init(login_request, mojo::ScopedDataPipeConsumerHandle(),
mojo::ScopedDataPipeProducerHandle());
}
bool FakeConnectionFactory::IsEndpointReachable() const {
return connection_handler_.get() && connection_handler_->CanSendMessage();
}
std::string FakeConnectionFactory::GetConnectionStateString() const {
return "";
}
base::TimeTicks FakeConnectionFactory::NextRetryAttempt() const {
return base::TimeTicks();
}
void FakeConnectionFactory::SignalConnectionReset(
ConnectionResetReason reason) {
if (!delay_reconnect_)
Connect();
else
reconnect_pending_ = true;
if (connection_listener_)
connection_listener_->OnDisconnected();
}
void FakeConnectionFactory::SetConnectionListener(
ConnectionListener* listener) {
connection_listener_ = listener;
}
} // namespace gcm
| 705 |
356 | <reponame>AyuBisht/Algo-Tree<filename>Code/Java/Longest_Common_Prefix.java<gh_stars>100-1000
/* Here is the code for finding the Longest Conmmon Prefix
from the input provided by user.
_______________________________________________________________
*/
import java.util.Scanner;
public class Main {
private static String longestCommonPrefix(String[] strs) {
String lcp = "";
if(strs.length == 0 || strs == null) {
return lcp;
}
int index = 0;
for(char c : strs[0].toCharArray()) {
for(int i = 1; i < strs.length; i++) {
if(index >= strs[i].length() || c != strs[i].charAt(index)) {
return lcp;
}
}
lcp += c;
index++;
}
return lcp;
}
public static void main(String[] args)
{
Scanner sc = new Scanner(System.in);
String str = sc.nextLine();
String[] words = str.split("\\s+");
System.out.println(longestCommonPrefix(words));
}
}
/*
Time complexity:
O(N) as depends on the characters given in the string
Space complexity:
O(1) Takes constant space.
*/
/*
Test cases:
Input: small smile smell
Output: sm
Input: flight flock fly
Output: fl
*/
| 599 |
315 | <reponame>shink/incubator-inlong
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.inlong.manager.common.enums;
import java.util.Arrays;
public enum FileAgentDataGenerateRule {
DAY("day", "5 1 0 * * ?"),
HOUR("hour", "5 1 0/1 * * ?"),
MINUTE("minute", "1 0/10 * * * ?"),
RANDOM("random", "run_once");
private final String rule;
private final String scheduleRule;
FileAgentDataGenerateRule(String rule, String scheduleRule) {
this.rule = rule;
this.scheduleRule = scheduleRule;
}
public static FileAgentDataGenerateRule fromRuleValue(String rule) {
return rule != null ? parse(rule) : null;
}
public static FileAgentDataGenerateRule parse(String value) {
return Arrays.stream(FileAgentDataGenerateRule.class.getEnumConstants())
.filter(x -> x.getRule().equalsIgnoreCase(value))
.findAny()
.orElse(null);
}
public String getRule() {
return rule;
}
public String getScheduleRule() {
return scheduleRule;
}
}
| 614 |
4,812 | //===--- DeltaAlgorithm.cpp - A Set Minimization Algorithm -----*- C++ -*--===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//===----------------------------------------------------------------------===//
#include "llvm/ADT/DeltaAlgorithm.h"
#include <algorithm>
#include <iterator>
#include <set>
using namespace llvm;
DeltaAlgorithm::~DeltaAlgorithm() {
}
bool DeltaAlgorithm::GetTestResult(const changeset_ty &Changes) {
if (FailedTestsCache.count(Changes))
return false;
bool Result = ExecuteOneTest(Changes);
if (!Result)
FailedTestsCache.insert(Changes);
return Result;
}
void DeltaAlgorithm::Split(const changeset_ty &S, changesetlist_ty &Res) {
// FIXME: Allow clients to provide heuristics for improved splitting.
// FIXME: This is really slow.
changeset_ty LHS, RHS;
unsigned idx = 0, N = S.size() / 2;
for (changeset_ty::const_iterator it = S.begin(),
ie = S.end(); it != ie; ++it, ++idx)
((idx < N) ? LHS : RHS).insert(*it);
if (!LHS.empty())
Res.push_back(LHS);
if (!RHS.empty())
Res.push_back(RHS);
}
DeltaAlgorithm::changeset_ty
DeltaAlgorithm::Delta(const changeset_ty &Changes,
const changesetlist_ty &Sets) {
// Invariant: union(Res) == Changes
UpdatedSearchState(Changes, Sets);
// If there is nothing left we can remove, we are done.
if (Sets.size() <= 1)
return Changes;
// Look for a passing subset.
changeset_ty Res;
if (Search(Changes, Sets, Res))
return Res;
// Otherwise, partition the sets if possible; if not we are done.
changesetlist_ty SplitSets;
for (changesetlist_ty::const_iterator it = Sets.begin(),
ie = Sets.end(); it != ie; ++it)
Split(*it, SplitSets);
if (SplitSets.size() == Sets.size())
return Changes;
return Delta(Changes, SplitSets);
}
bool DeltaAlgorithm::Search(const changeset_ty &Changes,
const changesetlist_ty &Sets,
changeset_ty &Res) {
// FIXME: Parallelize.
for (changesetlist_ty::const_iterator it = Sets.begin(),
ie = Sets.end(); it != ie; ++it) {
// If the test passes on this subset alone, recurse.
if (GetTestResult(*it)) {
changesetlist_ty Sets;
Split(*it, Sets);
Res = Delta(*it, Sets);
return true;
}
// Otherwise, if we have more than two sets, see if test passes on the
// complement.
if (Sets.size() > 2) {
// FIXME: This is really slow.
changeset_ty Complement;
std::set_difference(
Changes.begin(), Changes.end(), it->begin(), it->end(),
std::insert_iterator<changeset_ty>(Complement, Complement.begin()));
if (GetTestResult(Complement)) {
changesetlist_ty ComplementSets;
ComplementSets.insert(ComplementSets.end(), Sets.begin(), it);
ComplementSets.insert(ComplementSets.end(), it + 1, Sets.end());
Res = Delta(Complement, ComplementSets);
return true;
}
}
}
return false;
}
DeltaAlgorithm::changeset_ty DeltaAlgorithm::Run(const changeset_ty &Changes) {
// Check empty set first to quickly find poor test functions.
if (GetTestResult(changeset_ty()))
return changeset_ty();
// Otherwise run the real delta algorithm.
changesetlist_ty Sets;
Split(Changes, Sets);
return Delta(Changes, Sets);
}
| 1,303 |
8,629 | #include "IFileCache.h"
#include <Common/hex.h>
#include <Common/CurrentThread.h>
#include <Common/SipHash.h>
#include <Common/FileCacheSettings.h>
#include <IO/ReadSettings.h>
#include <filesystem>
namespace fs = std::filesystem;
namespace DB
{
namespace ErrorCodes
{
extern const int REMOTE_FS_OBJECT_CACHE_ERROR;
extern const int LOGICAL_ERROR;
}
IFileCache::IFileCache(
const String & cache_base_path_,
const FileCacheSettings & cache_settings_)
: cache_base_path(cache_base_path_)
, max_size(cache_settings_.max_size)
, max_element_size(cache_settings_.max_elements)
, max_file_segment_size(cache_settings_.max_file_segment_size)
, enable_filesystem_query_cache_limit(cache_settings_.enable_filesystem_query_cache_limit)
{
}
String IFileCache::Key::toString() const
{
return getHexUIntLowercase(key);
}
IFileCache::Key IFileCache::hash(const String & path)
{
return Key(sipHash128(path.data(), path.size()));
}
String IFileCache::getPathInLocalCache(const Key & key, size_t offset, bool is_persistent) const
{
auto key_str = key.toString();
return fs::path(cache_base_path)
/ key_str.substr(0, 3)
/ key_str
/ (std::to_string(offset) + (is_persistent ? "_persistent" : ""));
}
String IFileCache::getPathInLocalCache(const Key & key) const
{
auto key_str = key.toString();
return fs::path(cache_base_path) / key_str.substr(0, 3) / key_str;
}
static bool isQueryInitialized()
{
return CurrentThread::isInitialized()
&& CurrentThread::get().getQueryContext()
&& CurrentThread::getQueryId().size != 0;
}
bool IFileCache::isReadOnly()
{
return !isQueryInitialized();
}
void IFileCache::assertInitialized() const
{
if (!is_initialized)
throw Exception(ErrorCodes::REMOTE_FS_OBJECT_CACHE_ERROR, "Cache not initialized");
}
IFileCache::QueryContextPtr IFileCache::getCurrentQueryContext(std::lock_guard<std::mutex> & cache_lock)
{
if (!isQueryInitialized())
return nullptr;
return getQueryContext(CurrentThread::getQueryId().toString(), cache_lock);
}
IFileCache::QueryContextPtr IFileCache::getQueryContext(const String & query_id, std::lock_guard<std::mutex> & /* cache_lock */)
{
auto query_iter = query_map.find(query_id);
return (query_iter == query_map.end()) ? nullptr : query_iter->second;
}
void IFileCache::removeQueryContext(const String & query_id)
{
std::lock_guard cache_lock(mutex);
auto query_iter = query_map.find(query_id);
if (query_iter == query_map.end())
{
throw Exception(
ErrorCodes::LOGICAL_ERROR,
"Attempt to release query context that does not exist (query_id: {})",
query_id);
}
query_map.erase(query_iter);
}
IFileCache::QueryContextPtr IFileCache::getOrSetQueryContext(
const String & query_id, const ReadSettings & settings, std::lock_guard<std::mutex> & cache_lock)
{
if (query_id.empty())
return nullptr;
auto context = getQueryContext(query_id, cache_lock);
if (context)
return context;
auto query_context = std::make_shared<QueryContext>(settings.max_query_cache_size, settings.skip_download_if_exceeds_query_cache);
auto query_iter = query_map.emplace(query_id, query_context).first;
return query_iter->second;
}
IFileCache::QueryContextHolder IFileCache::getQueryContextHolder(const String & query_id, const ReadSettings & settings)
{
std::lock_guard cache_lock(mutex);
if (!enable_filesystem_query_cache_limit || settings.max_query_cache_size == 0)
return {};
/// if enable_filesystem_query_cache_limit is true, and max_query_cache_size large than zero,
/// we create context query for current query.
auto context = getOrSetQueryContext(query_id, settings, cache_lock);
return QueryContextHolder(query_id, this, context);
}
void IFileCache::QueryContext::remove(const Key & key, size_t offset, size_t size, std::lock_guard<std::mutex> & cache_lock)
{
if (cache_size < size)
throw Exception(ErrorCodes::LOGICAL_ERROR, "Deleted cache size exceeds existing cache size");
if (!skip_download_if_exceeds_query_cache)
{
auto record = records.find({key, offset});
if (record != records.end())
{
lru_queue.remove(record->second, cache_lock);
records.erase({key, offset});
}
}
cache_size -= size;
}
void IFileCache::QueryContext::reserve(const Key & key, size_t offset, size_t size, std::lock_guard<std::mutex> & cache_lock)
{
if (cache_size + size > max_cache_size)
{
throw Exception(
ErrorCodes::LOGICAL_ERROR,
"Reserved cache size exceeds the remaining cache size (key: {}, offset: {})",
key.toString(), offset);
}
if (!skip_download_if_exceeds_query_cache)
{
auto record = records.find({key, offset});
if (record == records.end())
{
auto queue_iter = lru_queue.add(key, offset, 0, cache_lock);
record = records.insert({{key, offset}, queue_iter}).first;
}
record->second->size += size;
}
cache_size += size;
}
void IFileCache::QueryContext::use(const Key & key, size_t offset, std::lock_guard<std::mutex> & cache_lock)
{
if (skip_download_if_exceeds_query_cache)
return;
auto record = records.find({key, offset});
if (record != records.end())
lru_queue.moveToEnd(record->second, cache_lock);
}
IFileCache::QueryContextHolder::QueryContextHolder(
const String & query_id_,
IFileCache * cache_,
IFileCache::QueryContextPtr context_)
: query_id(query_id_)
, cache(cache_)
, context(context_)
{
}
IFileCache::QueryContextHolder::~QueryContextHolder()
{
/// If only the query_map and the current holder hold the context_query,
/// the query has been completed and the query_context is released.
if (context && context.use_count() == 2)
cache->removeQueryContext(query_id);
}
}
| 2,310 |
861 | package cn.springcloud.gray.server.configuration;
import cn.springcloud.gray.server.clustering.PeerNode;
import cn.springcloud.gray.server.clustering.ServerCluster;
import cn.springcloud.gray.server.clustering.ServerClusterImpl;
import cn.springcloud.gray.server.configuration.properties.ClusterProperties;
import cn.springcloud.gray.utils.NetworkUtils;
import org.apache.commons.lang3.StringUtils;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.boot.autoconfigure.condition.ConditionalOnMissingBean;
import org.springframework.boot.context.properties.EnableConfigurationProperties;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import java.net.UnknownHostException;
/**
* @author saleson
* @date 2020-08-16 00:13
*/
@Configuration
@EnableConfigurationProperties({ClusterProperties.class})
public class ServerClusterAutoConfiguration {
@Autowired
private ClusterProperties clusterProperties;
@Value("${server.port}")
private int serverPort;
@Bean
@ConditionalOnMissingBean
public ServerCluster serverCluster() throws UnknownHostException {
PeerNode peerNode = new PeerNode();
String localId = NetworkUtils.getLocalIp();
if (StringUtils.isEmpty(localId)) {
throw new NullPointerException("无法获取到本机ip");
}
peerNode.setHost(localId);
peerNode.setPort(serverPort);
ServerCluster serverCluster = new ServerClusterImpl(peerNode);
for (PeerNode node : clusterProperties.getPeerNodes()) {
serverCluster.registerPeerNode(node);
}
return serverCluster;
}
}
| 614 |
485 | package io.indexr.query.plan.physical;
import java.util.Comparator;
import java.util.Iterator;
import java.util.List;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.function.Predicate;
import java.util.function.Supplier;
import io.indexr.query.expr.Expression;
import io.indexr.query.expr.InterpretedPredicate;
import io.indexr.query.expr.SortOrder;
import io.indexr.query.expr.attr.Attribute;
import io.indexr.query.expr.project.InterpretedMutableProjection;
import io.indexr.query.expr.project.MutableProjection;
import io.indexr.query.plan.QueryPlan;
import io.indexr.query.row.InternalRow;
import io.indexr.query.util.InterpretedOrdering;
public abstract class PhysicalPlan extends QueryPlan<PhysicalPlan> {
private AtomicBoolean prepareCalled = new AtomicBoolean(false);
public final Iterator<InternalRow> execute() {
prepare();
return doExecute();
}
public final void prepare() {
if (prepareCalled.compareAndSet(false, true)) {
doPrepare();
children().stream().forEach(PhysicalPlan::prepare);
}
}
protected void doPrepare() {}
protected abstract Iterator<InternalRow> doExecute();
protected Supplier<MutableProjection> newMutableProjection(
List<Expression> expressions, List<Attribute> inputSchema) {
return () -> new InterpretedMutableProjection(expressions, inputSchema);
}
protected Predicate<InternalRow> newPredicate(
Expression expression, List<Attribute> inputSchema) {
return InterpretedPredicate.create(expression, inputSchema);
}
protected Comparator<InternalRow> newOrdering(
List<SortOrder> order, List<Attribute> inputSchema) {
return new InterpretedOrdering(order, inputSchema);
}
@Override
public String nodeName() {
return "physical." + getClass().getSimpleName();
}
}
| 673 |
348 | {"nom":"Montsûrs-<NAME>","circ":"1ère circonscription","dpt":"Mayenne","inscrits":1860,"abs":886,"votants":974,"blancs":14,"nuls":2,"exp":958,"res":[{"nuance":"SOC","nom":"<NAME>","voix":266},{"nuance":"REM","nom":"Mme <NAME>","voix":239},{"nuance":"LR","nom":"Mme <NAME>","voix":178},{"nuance":"FN","nom":"<NAME>","voix":106},{"nuance":"FI","nom":"M. <NAME>","voix":76},{"nuance":"DVD","nom":"M. <NAME>","voix":34},{"nuance":"ECO","nom":"<NAME>","voix":22},{"nuance":"DLF","nom":"Mme <NAME>","voix":20},{"nuance":"EXG","nom":"Mme <NAME>","voix":8},{"nuance":"COM","nom":"<NAME>","voix":8},{"nuance":"DIV","nom":"<NAME>","voix":1}]} | 255 |
381 | <reponame>m4sterchain/mesapy<filename>pypy/module/_rawffi/alt/test/test_ffitype.py<gh_stars>100-1000
from pypy.module._rawffi.alt.test.test_funcptr import BaseAppTestFFI
class AppTestFFIType(BaseAppTestFFI):
def test_simple_types(self):
from _rawffi.alt import types
assert str(types.sint) == "<ffi type sint>"
assert str(types.uint) == "<ffi type uint>"
assert types.sint.name == 'sint'
assert types.uint.name == 'uint'
def test_sizeof(self):
from _rawffi.alt import types
assert types.sbyte.sizeof() == 1
assert types.sint.sizeof() == 4
def test_typed_pointer(self):
from _rawffi.alt import types
intptr = types.Pointer(types.sint) # create a typed pointer to sint
assert intptr.deref_pointer() is types.sint
assert str(intptr) == '<ffi type (pointer to sint)>'
assert types.sint.deref_pointer() is None
raises(TypeError, "types.Pointer(42)")
def test_pointer_identity(self):
from _rawffi.alt import types
x = types.Pointer(types.slong)
y = types.Pointer(types.slong)
z = types.Pointer(types.char)
assert x is y
assert x is not z
def test_char_p_cached(self):
from _rawffi.alt import types
x = types.Pointer(types.char)
assert x is types.char_p
x = types.Pointer(types.unichar)
assert x is types.unichar_p
| 653 |
938 | {
"variants": {
"type=bottom": {
"model": "tconstruct:block/wood/bloodshroom/slab"
},
"type=double": {
"model": "tconstruct:block/wood/bloodshroom/planks"
},
"type=top": {
"model": "tconstruct:block/wood/bloodshroom/slab_top"
}
}
} | 132 |
997 | <filename>crypto_sign/sphincs-haraka-192f-simple/clean/hash_haraka.c
#include <stdint.h>
#include <string.h>
#include "address.h"
#include "haraka.h"
#include "hash.h"
#include "params.h"
#include "utils.h"
void PQCLEAN_SPHINCSHARAKA192FSIMPLE_CLEAN_initialize_hash_function(
hash_state *hash_state_seeded,
const unsigned char *pub_seed, const unsigned char *sk_seed) {
PQCLEAN_SPHINCSHARAKA192FSIMPLE_CLEAN_tweak_constants(hash_state_seeded, pub_seed, sk_seed, PQCLEAN_SPHINCSHARAKA192FSIMPLE_CLEAN_N);
}
/* The haraka implementation is stack based and won't be replaced in PQClean/OQS,
so we don't need to do anything */
void PQCLEAN_SPHINCSHARAKA192FSIMPLE_CLEAN_destroy_hash_function(
hash_state *hash_state_seeded) { // NOLINT(readability-non-const-parameter)
(void)hash_state_seeded;
}
/*
* Computes PRF(key, addr), given a secret key of PQCLEAN_SPHINCSHARAKA192FSIMPLE_CLEAN_N bytes and an address
*/
void PQCLEAN_SPHINCSHARAKA192FSIMPLE_CLEAN_prf_addr(
unsigned char *out, const unsigned char *key, const uint32_t addr[8],
const hash_state *hash_state_seeded) {
unsigned char buf[PQCLEAN_SPHINCSHARAKA192FSIMPLE_CLEAN_ADDR_BYTES];
/* Since PQCLEAN_SPHINCSHARAKA192FSIMPLE_CLEAN_N may be smaller than 32, we need a temporary buffer. */
unsigned char outbuf[32];
(void)key; /* Suppress an 'unused parameter' warning. */
PQCLEAN_SPHINCSHARAKA192FSIMPLE_CLEAN_addr_to_bytes(buf, addr);
PQCLEAN_SPHINCSHARAKA192FSIMPLE_CLEAN_haraka256_sk(outbuf, buf, hash_state_seeded);
memcpy(out, outbuf, PQCLEAN_SPHINCSHARAKA192FSIMPLE_CLEAN_N);
}
/**
* Computes the message-dependent randomness R, using a secret seed and an
* optional randomization value as well as the message.
*/
void PQCLEAN_SPHINCSHARAKA192FSIMPLE_CLEAN_gen_message_random(
unsigned char *R,
const unsigned char *sk_prf, const unsigned char *optrand,
const unsigned char *m, size_t mlen,
const hash_state *hash_state_seeded) {
uint8_t s_inc[65];
PQCLEAN_SPHINCSHARAKA192FSIMPLE_CLEAN_haraka_S_inc_init(s_inc);
PQCLEAN_SPHINCSHARAKA192FSIMPLE_CLEAN_haraka_S_inc_absorb(s_inc, sk_prf, PQCLEAN_SPHINCSHARAKA192FSIMPLE_CLEAN_N, hash_state_seeded);
PQCLEAN_SPHINCSHARAKA192FSIMPLE_CLEAN_haraka_S_inc_absorb(s_inc, optrand, PQCLEAN_SPHINCSHARAKA192FSIMPLE_CLEAN_N, hash_state_seeded);
PQCLEAN_SPHINCSHARAKA192FSIMPLE_CLEAN_haraka_S_inc_absorb(s_inc, m, mlen, hash_state_seeded);
PQCLEAN_SPHINCSHARAKA192FSIMPLE_CLEAN_haraka_S_inc_finalize(s_inc);
PQCLEAN_SPHINCSHARAKA192FSIMPLE_CLEAN_haraka_S_inc_squeeze(R, PQCLEAN_SPHINCSHARAKA192FSIMPLE_CLEAN_N, s_inc, hash_state_seeded);
}
/**
* Computes the message hash using R, the public key, and the message.
* Outputs the message digest and the index of the leaf. The index is split in
* the tree index and the leaf index, for convenient copying to an address.
*/
void PQCLEAN_SPHINCSHARAKA192FSIMPLE_CLEAN_hash_message(
unsigned char *digest, uint64_t *tree, uint32_t *leaf_idx,
const unsigned char *R, const unsigned char *pk,
const unsigned char *m, size_t mlen,
const hash_state *hash_state_seeded) {
#define PQCLEAN_SPHINCSHARAKA192FSIMPLE_CLEAN_TREE_BITS (PQCLEAN_SPHINCSHARAKA192FSIMPLE_CLEAN_TREE_HEIGHT * (PQCLEAN_SPHINCSHARAKA192FSIMPLE_CLEAN_D - 1))
#define PQCLEAN_SPHINCSHARAKA192FSIMPLE_CLEAN_TREE_BYTES ((PQCLEAN_SPHINCSHARAKA192FSIMPLE_CLEAN_TREE_BITS + 7) / 8)
#define PQCLEAN_SPHINCSHARAKA192FSIMPLE_CLEAN_LEAF_BITS PQCLEAN_SPHINCSHARAKA192FSIMPLE_CLEAN_TREE_HEIGHT
#define PQCLEAN_SPHINCSHARAKA192FSIMPLE_CLEAN_LEAF_BYTES ((PQCLEAN_SPHINCSHARAKA192FSIMPLE_CLEAN_LEAF_BITS + 7) / 8)
#define PQCLEAN_SPHINCSHARAKA192FSIMPLE_CLEAN_DGST_BYTES (PQCLEAN_SPHINCSHARAKA192FSIMPLE_CLEAN_FORS_MSG_BYTES + PQCLEAN_SPHINCSHARAKA192FSIMPLE_CLEAN_TREE_BYTES + PQCLEAN_SPHINCSHARAKA192FSIMPLE_CLEAN_LEAF_BYTES)
unsigned char buf[PQCLEAN_SPHINCSHARAKA192FSIMPLE_CLEAN_DGST_BYTES];
unsigned char *bufp = buf;
uint8_t s_inc[65];
PQCLEAN_SPHINCSHARAKA192FSIMPLE_CLEAN_haraka_S_inc_init(s_inc);
PQCLEAN_SPHINCSHARAKA192FSIMPLE_CLEAN_haraka_S_inc_absorb(s_inc, R, PQCLEAN_SPHINCSHARAKA192FSIMPLE_CLEAN_N, hash_state_seeded);
PQCLEAN_SPHINCSHARAKA192FSIMPLE_CLEAN_haraka_S_inc_absorb(s_inc, pk + PQCLEAN_SPHINCSHARAKA192FSIMPLE_CLEAN_N, PQCLEAN_SPHINCSHARAKA192FSIMPLE_CLEAN_N, hash_state_seeded);
PQCLEAN_SPHINCSHARAKA192FSIMPLE_CLEAN_haraka_S_inc_absorb(s_inc, m, mlen, hash_state_seeded);
PQCLEAN_SPHINCSHARAKA192FSIMPLE_CLEAN_haraka_S_inc_finalize(s_inc);
PQCLEAN_SPHINCSHARAKA192FSIMPLE_CLEAN_haraka_S_inc_squeeze(buf, PQCLEAN_SPHINCSHARAKA192FSIMPLE_CLEAN_DGST_BYTES, s_inc, hash_state_seeded);
memcpy(digest, bufp, PQCLEAN_SPHINCSHARAKA192FSIMPLE_CLEAN_FORS_MSG_BYTES);
bufp += PQCLEAN_SPHINCSHARAKA192FSIMPLE_CLEAN_FORS_MSG_BYTES;
*tree = PQCLEAN_SPHINCSHARAKA192FSIMPLE_CLEAN_bytes_to_ull(bufp, PQCLEAN_SPHINCSHARAKA192FSIMPLE_CLEAN_TREE_BYTES);
*tree &= (~(uint64_t)0) >> (64 - PQCLEAN_SPHINCSHARAKA192FSIMPLE_CLEAN_TREE_BITS);
bufp += PQCLEAN_SPHINCSHARAKA192FSIMPLE_CLEAN_TREE_BYTES;
*leaf_idx = (uint32_t)PQCLEAN_SPHINCSHARAKA192FSIMPLE_CLEAN_bytes_to_ull(
bufp, PQCLEAN_SPHINCSHARAKA192FSIMPLE_CLEAN_LEAF_BYTES);
*leaf_idx &= (~(uint32_t)0) >> (32 - PQCLEAN_SPHINCSHARAKA192FSIMPLE_CLEAN_LEAF_BITS);
}
| 2,528 |
1,031 | package at.wirecube.additiveanimations.additive_animator;
import android.animation.TypeEvaluator;
import java.util.List;
import at.wirecube.additiveanimations.helper.FloatProperty;
/**
* This is a small utility class which can animate any kind of object using the
* {@link #property(float, FloatProperty)} and {@link #property(float, TypeEvaluator, FloatProperty)} methods.
* If you'd like to provide your own builder methods for creating animations, subclass {@link BaseAdditiveAnimator}.
*/
public class AdditiveObjectAnimator<V> extends BaseAdditiveAnimator<AdditiveObjectAnimator<V>, V> {
private Runnable mAnimationApplier = null;
@Override
protected AdditiveObjectAnimator<V> newInstance() {
return new AdditiveObjectAnimator<>();
}
public static <V> AdditiveObjectAnimator<V> animate(V target) {
return new AdditiveObjectAnimator<V>().target(target);
}
public static <V> AdditiveObjectAnimator<V> animate(V target, long duration) {
return animate(target).setDuration(duration);
}
public static <V> AdditiveObjectAnimator<V> create() {
return new AdditiveObjectAnimator<V>();
}
public static <V> AdditiveObjectAnimator<V> create(long duration) {
return new AdditiveObjectAnimator<V>().setDuration(duration);
}
@Override
protected AdditiveObjectAnimator<V> setParent(AdditiveObjectAnimator<V> other) {
AdditiveObjectAnimator<V> child = super.setParent(other);
child.setAnimationApplier(mAnimationApplier);
return child;
}
public AdditiveObjectAnimator<V> setAnimationApplier(Runnable animationApplier) {
mAnimationApplier = animationApplier;
return this;
}
@Override
public Float getCurrentPropertyValue(String propertyName) {
// AdditiveObjectAnimator only works with property-backed animations, so we don't need to implement this method
return null;
}
@Override
public void onApplyChanges() {
if(mAnimationApplier != null) {
mAnimationApplier.run();
}
}
}
| 720 |
316 | /**
* Contains the Mapbox Maps Android Log API classes.
*/
package com.mapbox.mapboxsdk.log; | 29 |
852 | import FWCore.ParameterSet.Config as cms
# output block for alcastream EcalPhiSym
# output module
# module alcastreamEcalPhiSymOutput = PoolOutputModule
alcastreamEcalPhiSymOutput = cms.PSet(
outputCommands = cms.untracked.vstring('drop *',
'keep EcalRecHitsSorted__*_*')
)
| 111 |
2,650 | __all__ = [
"__version__",
"utils",
"pdfminer",
"open",
"set_debug",
]
from ._version import __version__
from .pdf import PDF
from . import utils
import pdfminer
import pdfminer.pdftypes
import sys
pdfminer.pdftypes.STRICT = False
pdfminer.pdfinterp.STRICT = False
open = PDF.open
def load(file_or_buffer, **kwargs):
sys.stderr.write(
"Warning: pdfplumber.load is deprecated."
"Please use pdfplumber.open (with same arguments) instead."
"\n"
)
return PDF(file_or_buffer, **kwargs)
def set_debug(debug=0):
pdfminer.debug = debug
set_debug(0)
| 253 |
4,625 | // Copyright 2018 JanusGraph Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package org.janusgraph.diskstorage.configuration.converter;
import org.apache.commons.configuration2.BaseConfiguration;
import org.janusgraph.diskstorage.configuration.ReadConfiguration;
import org.janusgraph.util.system.ConfigurationUtil;
/**
* Converter from {@link ReadConfiguration} into {@link BaseConfiguration}
*/
public class ReadConfigurationConverter {
private static ReadConfigurationConverter configurationConverter;
private ReadConfigurationConverter(){}
public static ReadConfigurationConverter getInstance(){
if(configurationConverter == null){
configurationConverter = new ReadConfigurationConverter();
}
return configurationConverter;
}
public BaseConfiguration convert(ReadConfiguration readConfiguration) {
BaseConfiguration result = ConfigurationUtil.createBaseConfiguration();
for (String k : readConfiguration.getKeys("")) {
result.setProperty(k, readConfiguration.get(k, Object.class));
}
return result;
}
}
| 478 |
404 | // Copyright (c) 2020 K Team. All Rights Reserved.
package org.kframework.compile.checks;
import org.kframework.definition.Context;
import org.kframework.definition.ContextAlias;
import org.kframework.definition.RuleOrClaim;
import org.kframework.definition.Sentence;
import org.kframework.kore.K;
import org.kframework.kore.KApply;
import org.kframework.kore.KAs;
import org.kframework.kore.KVariable;
import org.kframework.kore.VisitK;
import org.kframework.utils.errorsystem.KEMException;
import java.util.Set;
public class CheckK {
private final Set<KEMException> errors;
public CheckK(Set<KEMException> errors) {
this.errors = errors;
}
private void check(K k) {
new VisitK() {
@Override
public void apply(KAs as) {
boolean error = false;
if (!(as.alias() instanceof KVariable)) {
error = true;
if (as.alias() instanceof KApply) {
KApply app = (KApply)as.alias();
if (app.klabel().name().startsWith("#SemanticCastTo") && app.items().size() == 1 && app.items().get(0) instanceof KVariable) {
error = false;
}
}
}
if (error) {
errors.add(KEMException.compilerError("Found #as pattern where the right side is not a variable.", as));
}
super.apply(as);
}
}.apply(k);
}
public void check(Sentence s) {
if (s instanceof RuleOrClaim) {
RuleOrClaim r = (RuleOrClaim)s;
check(r.body());
check(r.requires());
check(r.ensures());
} else if (s instanceof Context) {
Context c = (Context)s;
check(c.body());
check(c.requires());
} else if (s instanceof ContextAlias) {
ContextAlias c = (ContextAlias)s;
check(c.body());
check(c.requires());
}
}
}
| 1,001 |
12,278 | <filename>boost/boost/graph/distributed/depth_first_search.hpp
// Copyright (C) 2004-2008 The Trustees of Indiana University.
// Use, modification and distribution is subject to the Boost Software
// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
// Authors: <NAME>
// <NAME>
#ifndef BOOST_GRAPH_DISTRIBUTED_DFS_HPP
#define BOOST_GRAPH_DISTRIBUTED_DFS_HPP
#ifndef BOOST_GRAPH_USE_MPI
#error "Parallel BGL files should not be included unless <boost/graph/use_mpi.hpp> has been included"
#endif
#include <boost/graph/graph_traits.hpp>
#include <boost/property_map/property_map.hpp>
#include <boost/graph/overloading.hpp>
#include <boost/graph/properties.hpp>
#include <boost/graph/distributed/concepts.hpp>
#include <boost/static_assert.hpp>
#include <boost/assert.hpp>
#include <boost/graph/parallel/process_group.hpp>
#include <boost/graph/parallel/container_traits.hpp>
namespace boost {
namespace graph { namespace distributed { namespace detail {
template<typename DistributedGraph, typename ColorMap, typename ParentMap,
typename ExploreMap, typename VertexIndexMap, typename DFSVisitor>
class parallel_dfs
{
typedef typename graph_traits<DistributedGraph>::vertex_iterator
vertex_iterator;
typedef typename graph_traits<DistributedGraph>::vertex_descriptor
vertex_descriptor;
typedef typename graph_traits<DistributedGraph>::out_edge_iterator
out_edge_iterator;
typedef typename boost::graph::parallel::process_group_type<DistributedGraph>
::type process_group_type;
typedef typename process_group_type::process_id_type process_id_type;
/**
* The first vertex in the pair is the local node (i) and the
* second vertex in the pair is the (possibly remote) node (j).
*/
typedef boost::parallel::detail::untracked_pair<vertex_descriptor, vertex_descriptor> vertex_pair;
typedef typename property_traits<ColorMap>::value_type color_type;
typedef color_traits<color_type> Color;
// Message types
enum { discover_msg = 10, return_msg = 50, visited_msg = 100 , done_msg = 150};
public:
parallel_dfs(const DistributedGraph& g, ColorMap color,
ParentMap parent, ExploreMap explore,
VertexIndexMap index_map, DFSVisitor vis)
: g(g), color(color), parent(parent), explore(explore),
index_map(index_map), vis(vis), pg(process_group(g)),
owner(get(vertex_owner, g)), next_out_edge(num_vertices(g))
{ }
void run(vertex_descriptor s)
{
vertex_iterator vi, vi_end;
for (boost::tie(vi, vi_end) = vertices(g); vi != vi_end; ++vi) {
put(color, *vi, Color::white());
put(parent, *vi, *vi);
put(explore, *vi, *vi);
next_out_edge[get(index_map, *vi)] = out_edges(*vi, g).first;
vis.initialize_vertex(*vi, g);
}
vis.start_vertex(s, g);
if (get(owner, s) == process_id(pg)) {
send_oob(pg, get(owner, s), discover_msg, vertex_pair(s, s));
}
bool done = false;
while (!done) {
std::pair<process_id_type, int> msg = *pg.poll(true);
switch (msg.second) {
case discover_msg:
{
vertex_pair p;
receive_oob(pg, msg.first, msg.second, p);
if (p.first != p.second) {
// delete j from nomessage(j)
if (get(color, p.second) != Color::black())
local_put(color, p.second, Color::gray());
if (recover(p)) break;
}
if (get(color, p.first) == Color::white()) {
put(color, p.first, Color::gray());
put(parent, p.first, p.second);
vis.discover_vertex(p.first, g);
if (shift_center_of_activity(p.first)) break;
out_edge_iterator ei, ei_end;
for (boost::tie(ei,ei_end) = out_edges(p.first, g); ei != ei_end; ++ei)
{
// Notify everyone who may not know that the source
// vertex has been visited. They can then mark the
// corresponding color map entry gray.
if (get(parent, p.first) != target(*ei, g)
&& get(explore, p.first) != target(*ei, g)) {
vertex_pair visit(target(*ei, g), p.first);
send_oob(pg, get(owner, target(*ei, g)), visited_msg, visit);
}
}
}
}
break;
case visited_msg:
{
vertex_pair p;
receive_oob(pg, msg.first, msg.second, p);
// delete j from nomessage(j)
if (get(color, p.second) != Color::black())
local_put(color, p.second, Color::gray());
recover(p);
}
break;
case return_msg:
{
vertex_pair p;
receive_oob(pg, msg.first, msg.second, p);
// delete j from nomessage(i)
local_put(color, p.second, Color::black());
shift_center_of_activity(p.first);
}
break;
case done_msg:
{
receive_oob(pg, msg.first, msg.second, done);
// Propagate done message downward in tree
done = true;
process_id_type id = process_id(pg);
process_id_type left = 2*id + 1;
process_id_type right = left + 1;
if (left < num_processes(pg))
send_oob(pg, left, done_msg, done);
if (right < num_processes(pg))
send_oob(pg, right, done_msg, done);
}
break;
default:
BOOST_ASSERT(false);
}
}
}
private:
bool recover(const vertex_pair& p)
{
if (get(explore, p.first) == p.second) {
return shift_center_of_activity(p.first);
}
else
return false;
}
bool shift_center_of_activity(vertex_descriptor i)
{
for (out_edge_iterator ei = next_out_edge[get(index_map, i)],
ei_end = out_edges(i, g).second;
ei != ei_end; ++ei) {
vis.examine_edge(*ei, g);
vertex_descriptor k = target(*ei, g);
color_type target_color = get(color, k);
if (target_color == Color::black()) vis.forward_or_cross_edge(*ei, g);
else if (target_color == Color::gray()) vis.back_edge(*ei, g);
else {
put(explore, i, k);
vis.tree_edge(*ei, g);
vertex_pair p(k, i);
send_oob(pg, get(owner, k), discover_msg, p);
next_out_edge[get(index_map, i)] = ++ei;
return false;
}
}
next_out_edge[get(index_map, i)] = out_edges(i, g).second;
put(explore, i, i);
put(color, i, Color::black());
vis.finish_vertex(i, g);
if (get(parent, i) == i) {
send_oob(pg, 0, done_msg, true);
return true;
}
else {
vertex_pair ret(get(parent, i), i);
send_oob(pg, get(owner, ret.first), return_msg, ret);
}
return false;
}
const DistributedGraph& g;
ColorMap color;
ParentMap parent;
ExploreMap explore;
VertexIndexMap index_map;
DFSVisitor vis;
process_group_type pg;
typename property_map<DistributedGraph, vertex_owner_t>::const_type owner;
std::vector<out_edge_iterator> next_out_edge;
};
} // end namespace detail
template<typename DistributedGraph, typename ColorMap, typename ParentMap,
typename ExploreMap, typename VertexIndexMap, typename DFSVisitor>
void
tsin_depth_first_visit
(const DistributedGraph& g,
typename graph_traits<DistributedGraph>::vertex_descriptor s,
DFSVisitor vis, ColorMap color, ParentMap parent, ExploreMap explore,
VertexIndexMap index_map)
{
typedef typename graph_traits<DistributedGraph>::directed_category
directed_category;
BOOST_STATIC_ASSERT(
(is_convertible<directed_category, undirected_tag>::value));
set_property_map_role(vertex_color, color);
graph::distributed::detail::parallel_dfs
<DistributedGraph, ColorMap, ParentMap, ExploreMap, VertexIndexMap,
DFSVisitor> do_dfs(g, color, parent, explore, index_map, vis);
do_dfs.run(s);
using boost::graph::parallel::process_group;
synchronize(process_group(g));
}
template<typename DistributedGraph, typename DFSVisitor,
typename VertexIndexMap>
void
tsin_depth_first_visit
(const DistributedGraph& g,
typename graph_traits<DistributedGraph>::vertex_descriptor s,
DFSVisitor vis,
VertexIndexMap index_map)
{
typedef typename graph_traits<DistributedGraph>::vertex_descriptor
vertex_descriptor;
std::vector<default_color_type> colors(num_vertices(g));
std::vector<vertex_descriptor> parent(num_vertices(g));
std::vector<vertex_descriptor> explore(num_vertices(g));
tsin_depth_first_visit
(g, s,
vis,
make_iterator_property_map(colors.begin(), index_map),
make_iterator_property_map(parent.begin(), index_map),
make_iterator_property_map(explore.begin(), index_map),
index_map);
}
template<typename DistributedGraph, typename DFSVisitor,
typename VertexIndexMap>
void
tsin_depth_first_visit
(const DistributedGraph& g,
typename graph_traits<DistributedGraph>::vertex_descriptor s,
DFSVisitor vis)
{
tsin_depth_first_visit(g, s, vis, get(vertex_index, g));
}
} // end namespace distributed
using distributed::tsin_depth_first_visit;
} // end namespace graph
template<typename DistributedGraph, typename DFSVisitor>
void
depth_first_visit
(const DistributedGraph& g,
typename graph_traits<DistributedGraph>::vertex_descriptor s,
DFSVisitor vis)
{
graph::tsin_depth_first_visit(g, s, vis, get(vertex_index, g));
}
} // end namespace boost
#endif // BOOST_GRAPH_DISTRIBUTED_DFS_HPP
| 5,015 |
522 | # Cf. https://github.com/ppwwyyxx/tensorpack/blob/master/examples/FasterRCNN/train.py
# and https://github.com/ppwwyyxx/tensorpack/blob/master/examples/FasterRCNN/model.py
import tensorflow as tf
import numpy as np
from functools import partial
from datasets import DataKeys
from network.ConvolutionalLayers import Conv, ConvTranspose
from network.FullyConnected import FullyConnected
from network.Layer import Layer
from network.Resnet import add_resnet_conv5
from core import Measures, Extractions
from network.FasterRCNN_utils import decode_bbox_target, encode_bbox_target,\
generate_rpn_proposals, sample_fast_rcnn_targets, roi_align, rpn_losses,\
fastrcnn_losses, clip_boxes, fastrcnn_predictions, maskrcnn_loss, crop_and_resize
from datasets.util.Detection import ALL_ANCHORS, NUM_ANCHOR, ANCHOR_STRIDE
FASTRCNN_BBOX_REG_WEIGHTS = np.array([10, 10, 5, 5], dtype='float32')
def rpn_head(featuremap, channel, num_anchors, tower_setup):
with tf.variable_scope('rpn'):
hidden = Conv('conv0', [featuremap], channel, tower_setup, old_order=True, bias=True,
W_initializer=tf.random_normal_initializer(stddev=0.01)).outputs[0]
label_logits = Conv('class', [hidden], num_anchors, tower_setup, (1, 1), old_order=True, bias=True,
activation="linear", W_initializer=tf.random_normal_initializer(stddev=0.01)).outputs[0]
box_logits = Conv('box', [hidden], 4 * num_anchors, tower_setup, (1, 1), old_order=True, bias=True,
activation="linear", W_initializer=tf.random_normal_initializer(stddev=0.01)).outputs[0]
shp = tf.shape(box_logits)
box_logits = tf.reshape(box_logits, tf.stack([shp[0], shp[1], shp[2], num_anchors, 4]))
return label_logits, box_logits
def fastrcnn_head(feature, num_classes, reid_dim, tower_setup, reid_per_class=False, class_agnostic_box=False):
with tf.variable_scope('fastrcnn'):
# GlobalAvgPooling, see https://tensorpack.readthedocs.io/en/latest/_modules/tensorpack/models/pool.html
assert feature.shape.ndims == 4
feature = tf.reduce_mean(feature, [1, 2], name='gap/output')
classification = FullyConnected("class", [feature], num_classes, tower_setup, activation="linear",
W_initializer=tf.random_normal_initializer(stddev=0.01)).outputs[0]
if class_agnostic_box:
num_hidden_box = 1
else:
num_hidden_box = num_classes - 1
box_regression = FullyConnected("box", [feature], num_hidden_box * 4, tower_setup,
activation="linear",
W_initializer=tf.random_normal_initializer(stddev=0.001)).outputs[0]
box_regression = tf.reshape(box_regression, (-1, num_hidden_box, 4))
if reid_dim > 0:
if reid_per_class:
reid_features = \
FullyConnected("reid", [feature], (num_classes - 1) * reid_dim, tower_setup, activation="linear").outputs[0]
reid_features = tf.reshape(reid_features, (-1, (num_classes - 1), reid_dim))
best_class = tf.cast(tf.argmax(classification, axis=-1) - 1, dtype=tf.int32)
feature_indices = tf.stack([tf.range(tf.shape(best_class)[0], dtype=tf.int32), best_class], axis=1)
reid_features = tf.gather_nd(reid_features, feature_indices)
else:
reid_features = FullyConnected("reid", [feature], reid_dim, tower_setup, activation="linear").outputs[0]
else:
reid_features = None
return classification, box_regression, reid_features
def maskrcnn_head(feature, num_class, tower_setup, class_agnostic_conv=False):
with tf.variable_scope('maskrcnn'):
# c2's MSRAFill is fan_out
l = ConvTranspose('deconv', [feature], 256, tower_setup, (2, 2), strides=(2, 2), bias=True,
W_initializer=tf.variance_scaling_initializer(scale=2.0, mode='fan_out',
distribution='truncated_normal')).outputs[0]
if class_agnostic_conv:
num_output_channels = 1
else:
num_output_channels = num_class - 1
l = Conv('conv', [l], num_output_channels, tower_setup, (1, 1), old_order=True, bias=True, activation="linear",
W_initializer=tf.variance_scaling_initializer(scale=2.0, mode='fan_out',
distribution='truncated_normal')).outputs[0]
return l
# See also https://github.com/ppwwyyxx/tensorpack/blob/master/examples/FasterRCNN/train.py
class FasterRCNN(Layer):
def _get_anchors(self, shape2d):
"""
Returns:
FSxFSxNAx4 anchors,
"""
# FSxFSxNAx4 (FS=MAX_SIZE//ANCHOR_STRIDE)
with tf.name_scope('anchors'):
all_anchors = tf.constant(ALL_ANCHORS, name='all_anchors', dtype=tf.float32)
fm_anchors = tf.slice(
all_anchors, [0, 0, 0, 0], tf.stack([
shape2d[0], shape2d[1], -1, -1]), name='fm_anchors')
return fm_anchors
@staticmethod
def fill_full_mask(boxes, masks, img_shape):
"""
Args:
box: 4 float
mask: MxM floats
shape: h,w
"""
n_boxes = boxes.shape[0]
assert n_boxes == masks.shape[0]
ret = np.zeros([n_boxes, img_shape[0], img_shape[1]], dtype='uint8')
for idx in range(n_boxes):
# int() is floor
# box fpcoor=0.0 -> intcoor=0.0
x0, y0 = list(map(int, boxes[idx, :2] + 0.5))
# box fpcoor=h -> intcoor=h-1, inclusive
x1, y1 = list(map(int, boxes[idx, 2:] - 0.5)) # inclusive
x1 = max(x0, x1) # require at least 1x1
y1 = max(y0, y1)
w = x1 + 1 - x0
h = y1 + 1 - y0
# rounding errors could happen here, because masks were not originally computed for this shape.
# but it's hard to do better, because the network does not know the "original" scale
import cv2
mask = (cv2.resize(masks[idx, :, :], (w, h)) > 0.5).astype('uint8')
ret[idx, y0:y1 + 1, x0:x1 + 1] = mask
return ret
def __init__(self, name, inputs, network_input_dict, tower_setup, fastrcnn_batch_per_img=256,
result_score_thresh=0.05, reid_dimension=0, reid_per_class=False, reid_loss_per_class=False,
reid_loss_worst_examples_percent=1.0, reid_loss_variant=0, reid_loss_factor=1.0,
reid_measure="cosine", reid_adjacent_frames=False, class_agnostic_box_and_mask_heads=False,
reid_loss_margin=0.2, provide_boxes_as_input=False):
super(FasterRCNN, self).__init__()
self.is_training = tower_setup.is_training
inputs = inputs[0]
if self.is_training:
tf.add_to_collection('checkpoints', inputs)
self._image_shape2d = tf.shape(network_input_dict[DataKeys.IMAGES])[1:3]
self._add_maskrcnn = tower_setup.dataset.config.bool("add_masks", True)
max_size = tower_setup.dataset.config.int_list("input_size_train", [])[1]
self.bbox_decode_clip = np.log(max_size / 16.0)
self.fastrcnn_batch_per_img = fastrcnn_batch_per_img
self.result_score_thresh = result_score_thresh
self.tower_setup = tower_setup
self.network_input_dict = network_input_dict
self._num_classes = tower_setup.dataset.num_classes()
self._do_reid = reid_dimension > 0
self._reid_dimension = reid_dimension
self._reid_per_class = reid_per_class
self._reid_loss_per_class = reid_loss_per_class
self._reid_loss_worst_examples_percent = reid_loss_worst_examples_percent
# Loss variants: 0=sig_ce - NO LONGER IMPLEMENTED, 1=batch_hard, 2=batch_all, 3=batch_all_no_zeros, 4=contrastive
self._reid_loss_variant = reid_loss_variant
self._reid_loss_factor = reid_loss_factor
# ReID distance: cosine, euclidean, normalized_euclidean
self._reid_measure = reid_measure
self._reid_adjacent_frames = reid_adjacent_frames
self._reid_loss_margin = reid_loss_margin
self.class_agnostic_box_and_mask_heads = class_agnostic_box_and_mask_heads
self.provide_boxes_as_input = provide_boxes_as_input
self.network_input_dict = network_input_dict
with tf.variable_scope(name):
rpn_label_logits, rpn_box_logits = rpn_head(inputs, 1024, NUM_ANCHOR, self.tower_setup)
fm_shape = tf.shape(inputs)[1:3] # h,w
fm_anchors = self._get_anchors(fm_shape)
losses = []
reid_features_and_target_ids_per_time = []
batch_size = inputs.get_shape().as_list()[0]
assert batch_size is not None
# Prepare outputs
self.outputs_per_batch_idx = [[] for _ in range(batch_size)]
self.extractions_per_batch_idx = [{} for _ in range(batch_size)]
final_boxes_list = []
for batch_idx in range(batch_size):
with tf.control_dependencies(control_inputs=final_boxes_list):
with tf.variable_scope(name, reuse=True if batch_idx > 0 else None):
final_boxes, final_labels, final_masks, final_probs, reid_features, target_ids, final_reid_features, \
fastrcnn_box_loss, fastrcnn_label_loss, mrcnn_loss, rpn_box_loss, rpn_label_loss = \
self._create_heads(batch_idx, fm_anchors, fm_shape, rpn_box_logits, inputs, rpn_label_logits)
reid_features_and_target_ids_per_time.append((reid_features, target_ids))
if self.is_training:
losses.extend([fastrcnn_box_loss, fastrcnn_label_loss, mrcnn_loss, rpn_box_loss, rpn_label_loss])
# combine individual losses for summaries create summaries (atm they are separate)
self.add_scalar_summary(rpn_label_loss, "rpn_label_loss")
self.add_scalar_summary(rpn_box_loss, "rpn_box_loss")
self.add_scalar_summary(fastrcnn_label_loss, "fastrcnn_label_loss")
self.add_scalar_summary(fastrcnn_box_loss, "fastrcnn_box_loss")
else:
self._add_test_measures(batch_idx, final_boxes, final_probs, final_labels, final_masks, final_reid_features)
final_boxes_list.append(final_boxes) # For training, didnt have any OOM errors
# Sanity check
#for outputs in self.outputs_per_batch_idx:
# assert len(outputs) == len(self.outputs_per_batch_idx[0])
#for ext in self.extractions_per_batch_idx:
# assert ext.keys() == self.extractions_per_batch_idx[0].keys()
#num_outputs = len(self.outputs_per_batch_idx[0])
# This doesn't work due to different sized outputs
#for i in range(num_outputs):
# self.outputs.append(tf.stack([outputs[i] for outputs in self.outputs_per_batch_idx]))
self.outputs = self.outputs_per_batch_idx[0]
for key in self.extractions_per_batch_idx[0].keys():
# To stack the extraction results
#self.extractions[key] = tf.stack([extractions[key] for extractions in self.extractions_per_batch_idx])
self.extractions[key] = [extractions[key] for extractions in self.extractions_per_batch_idx]
if self.is_training and self._do_reid and self._reid_loss_factor > 0.0:
reid_loss = create_reid_loss(reid_features_and_target_ids_per_time, self._reid_loss_per_class,
self._reid_loss_worst_examples_percent, self._reid_loss_variant,
self._reid_loss_factor, self._reid_measure, self._reid_adjacent_frames,
self._reid_loss_margin)
losses.append(reid_loss)
self.add_scalar_summary(reid_loss, "reid_loss")
if self.is_training:
vars_to_regularize = tf.trainable_variables("frcnn/(?:rpn|group3|fastrcnn|maskrcnn)/.*W")
regularizers = [1e-4 * tf.nn.l2_loss(W) for W in vars_to_regularize]
regularization_loss = tf.add_n(regularizers, "regularization_loss")
self.regularizers.append(regularization_loss)
# TODO how to properly weight losses?
loss = tf.add_n(losses, 'total_cost') / batch_size
self.losses.append(loss)
# self.add_scalar_summary(regularization_loss, "regularization_loss")
else:
loss = 0.0
self.add_scalar_summary(loss, "loss")
self._add_basic_measures(inputs, loss)
def _create_heads(self, batch_idx, fm_anchors, fm_shape, rpn_box_logits, rpn_input, rpn_label_logits):
rpn_label_logits = rpn_label_logits[batch_idx]
rpn_box_logits = rpn_box_logits[batch_idx]
rpn_input = rpn_input[batch_idx, tf.newaxis]
decoded_boxes = decode_bbox_target(self.bbox_decode_clip, rpn_box_logits, fm_anchors) # fHxfWxNAx4, floatbox
proposal_boxes, proposal_scores = generate_rpn_proposals(
tf.reshape(decoded_boxes, [-1, 4]),
tf.reshape(rpn_label_logits, [-1]),
self._image_shape2d, self.is_training)
if self.provide_boxes_as_input:
input_boxes = tf.squeeze(self.network_input_dict[DataKeys.BBOXES_TO_REFINE_x0y0x1y1], axis=0)
image_shape2d_before_resize = self.network_input_dict[DataKeys.RAW_IMAGE_SIZES]
assert image_shape2d_before_resize.shape[0] == 1, "we assume batch size is 1 for now"
image_shape2d_before_resize = tf.squeeze(image_shape2d_before_resize, axis=0)
image_shape2d = self._image_shape2d
old_height, old_width = image_shape2d_before_resize[0], image_shape2d_before_resize[1]
new_height, new_width = image_shape2d[0], image_shape2d[1]
height_scale = new_height / old_height
width_scale = new_width / old_width
scale = tf.stack([width_scale, height_scale, width_scale, height_scale], axis=0)
proposal_boxes = input_boxes * tf.cast(scale, tf.float32)
if self.is_training:
# Prepare the data, slice inputs by batch index
gt_boxes = self.network_input_dict[DataKeys.BBOXES_x0y0x1y1]
gt_boxes = gt_boxes[batch_idx]
gt_labels = self.network_input_dict[DataKeys.CLASSES]
gt_labels = tf.cast(gt_labels[batch_idx], dtype=tf.int64)
if self._add_maskrcnn:
gt_masks = self.network_input_dict[DataKeys.SEGMENTATION_MASK]
gt_masks = gt_masks[batch_idx]
gt_masks = tf.transpose(gt_masks, [2, 0, 1])
else:
gt_masks = None
is_crowd = self.network_input_dict[DataKeys.IS_CROWD]
is_crowd = is_crowd[batch_idx]
# gt_crowd_boxes = tf.boolean_mask(gt_boxes, is_crowd)
target_ids = self.network_input_dict[DataKeys.IDS] # If 0, ignore; but 1 for IS_CROWD==True
target_ids = target_ids[batch_idx]
valid_gts_mask = tf.logical_and(tf.greater(target_ids, 0), tf.equal(is_crowd, 0))
gt_boxes = tf.boolean_mask(gt_boxes, valid_gts_mask)
gt_labels = tf.boolean_mask(gt_labels, valid_gts_mask)
target_ids = tf.boolean_mask(target_ids, valid_gts_mask)
featuremap_labels = self.network_input_dict[DataKeys.FEATUREMAP_LABELS]
featuremap_labels = featuremap_labels[batch_idx]
featuremap_boxes = self.network_input_dict[DataKeys.FEATUREMAP_BOXES] # already in xyxy format
featuremap_boxes = featuremap_boxes[batch_idx]
# sample proposal boxes in training
rcnn_sampled_boxes, rcnn_labels, fg_inds_wrt_gt, rcnn_target_ids = sample_fast_rcnn_targets(
proposal_boxes, gt_boxes, gt_labels, self.fastrcnn_batch_per_img, target_ids)
boxes_on_featuremap = rcnn_sampled_boxes * (1.0 / ANCHOR_STRIDE)
# tf.add_to_collection('checkpoints', boxes_on_featuremap)
else:
gt_boxes, gt_labels, gt_masks, target_ids, featuremap_labels, featuremap_boxes = [None] * 6
rcnn_sampled_boxes, rcnn_labels, fg_inds_wrt_gt, rcnn_target_ids = None, None, None, None
# use all proposal boxes in inference
boxes_on_featuremap = proposal_boxes * (1.0 / ANCHOR_STRIDE)
fastrcnn_box_logits, fastrcnn_label_logits, feature_fastrcnn, reid_features = self._create_fastrcnn_output(
boxes_on_featuremap, rpn_input)
if self.is_training:
#tf.add_to_collection('checkpoints', fastrcnn_box_logits)
#tf.add_to_collection('checkpoints', fastrcnn_label_logits)
#tf.add_to_collection('checkpoints', feature_fastrcnn)
fastrcnn_box_loss, fastrcnn_label_loss, mrcnn_loss, rpn_box_loss, rpn_label_loss = self._create_losses(
featuremap_labels, featuremap_boxes, fm_anchors, fm_shape, fastrcnn_box_logits, fastrcnn_label_logits,
feature_fastrcnn, fg_inds_wrt_gt, gt_boxes, gt_masks, rcnn_labels, rcnn_sampled_boxes, rpn_box_logits,
rpn_label_logits)
final_boxes, final_labels, final_masks, final_probs, final_reid_features = None, None, None, None, None
else:
final_boxes, final_labels, final_masks, final_probs, final_reid_features = self._create_final_outputs(
batch_idx, rpn_input, fastrcnn_box_logits, fastrcnn_label_logits, proposal_boxes, reid_features)
fastrcnn_box_loss, fastrcnn_label_loss, mrcnn_loss, rpn_box_loss, rpn_label_loss = None, None, None, None, None
return final_boxes, final_labels, final_masks, final_probs, reid_features, rcnn_target_ids, final_reid_features, \
fastrcnn_box_loss, fastrcnn_label_loss, mrcnn_loss, rpn_box_loss, rpn_label_loss
def _create_fastrcnn_output(self, boxes_on_featuremap, rpn_input):
roi_resized = roi_align(rpn_input, boxes_on_featuremap, 14)
def ff_true():
feature_fastrcnn_ = add_resnet_conv5(roi_resized, self.tower_setup)[0]
fastrcnn_label_logits_, fastrcnn_box_logits_, fastrcnn_reid_features_ = fastrcnn_head(
feature_fastrcnn_, self._num_classes, self._reid_dimension, self.tower_setup, self._reid_per_class,
class_agnostic_box=self.class_agnostic_box_and_mask_heads)
if self._do_reid:
return feature_fastrcnn_, fastrcnn_label_logits_, fastrcnn_box_logits_, fastrcnn_reid_features_
else:
return feature_fastrcnn_, fastrcnn_label_logits_, fastrcnn_box_logits_
def ff_false():
ncls = self._num_classes
if self._do_reid:
return tf.zeros([0, 7, 7, 2048]), tf.zeros([0, ncls]), tf.zeros([0, ncls - 1, 4]), \
tf.zeros([0, self._reid_dimension])
else:
return tf.zeros([0, 7, 7, 2048]), tf.zeros([0, ncls]), tf.zeros([0, ncls - 1, 4])
feature_fastrcnn, fastrcnn_label_logits, fastrcnn_box_logits, *fastrcnn_reid_features = tf.cond(
tf.size(boxes_on_featuremap) > 0, ff_true, ff_false)
if len(fastrcnn_reid_features) == 0:
fastrcnn_reid_features = None
else:
assert len(fastrcnn_reid_features) == 1
fastrcnn_reid_features = fastrcnn_reid_features[0]
return fastrcnn_box_logits, fastrcnn_label_logits, feature_fastrcnn, fastrcnn_reid_features
def _create_losses(self, featuremap_labels, featuremap_boxes, fm_anchors, fm_shape, fastrcnn_box_logits,
fastrcnn_label_logits, feature_fastrcnn, fg_inds_wrt_gt, gt_boxes, gt_masks,
rcnn_labels, rcnn_sampled_boxes, rpn_box_logits, rpn_label_logits):
anchor_labels = tf.slice(
featuremap_labels, [0, 0, 0],
tf.stack([fm_shape[0], fm_shape[1], -1]),
name='sliced_anchor_labels')
anchor_boxes = tf.slice(
featuremap_boxes, [0, 0, 0, 0],
tf.stack([fm_shape[0], fm_shape[1], -1, -1]),
name='sliced_anchor_boxes')
anchor_boxes_encoded = encode_bbox_target(anchor_boxes, fm_anchors)
# rpn loss
rpn_label_loss, rpn_box_loss = rpn_losses(
anchor_labels, anchor_boxes_encoded, rpn_label_logits, rpn_box_logits)
# fastrcnn loss
fg_inds_wrt_sample = tf.reshape(tf.where(rcnn_labels > 0), [-1]) # fg inds w.r.t all samples
fg_sampled_boxes = tf.gather(rcnn_sampled_boxes, fg_inds_wrt_sample)
matched_gt_boxes = tf.gather(gt_boxes, fg_inds_wrt_gt)
encoded_boxes = encode_bbox_target(
matched_gt_boxes,
fg_sampled_boxes) * tf.constant(FASTRCNN_BBOX_REG_WEIGHTS)
fastrcnn_label_loss, fastrcnn_box_loss = fastrcnn_losses(
rcnn_labels, fastrcnn_label_logits,
encoded_boxes,
tf.gather(fastrcnn_box_logits, fg_inds_wrt_sample))
if self._add_maskrcnn:
# maskrcnn loss
fg_labels = tf.gather(rcnn_labels, fg_inds_wrt_sample)
fg_feature = tf.gather(feature_fastrcnn, fg_inds_wrt_sample)
mask_logits = maskrcnn_head(fg_feature, self._num_classes, self.tower_setup,
class_agnostic_conv=self.class_agnostic_box_and_mask_heads) # #fg x #cat x 14x14
gt_masks_for_fg = tf.gather(gt_masks, fg_inds_wrt_gt) # nfg x H x W
target_masks_for_fg = crop_and_resize(
tf.expand_dims(gt_masks_for_fg, 3),
fg_sampled_boxes,
tf.range(tf.size(fg_inds_wrt_gt)), 14, pad_border=False) # nfg x 1x14x14
target_masks_for_fg = tf.squeeze(target_masks_for_fg, 3, 'sampled_fg_mask_targets')
mrcnn_loss = maskrcnn_loss(mask_logits, fg_labels, target_masks_for_fg,
class_agnostic=self.class_agnostic_box_and_mask_heads)
self.add_scalar_summary(mrcnn_loss, "mrcnn_loss")
else:
mrcnn_loss = 0.0
return fastrcnn_box_loss, fastrcnn_label_loss, mrcnn_loss, rpn_box_loss, rpn_label_loss
def _create_final_outputs(self, batch_idx, rpn_input, fastrcnn_box_logits, fastrcnn_label_logits,
proposal_boxes, reid_features):
label_probs = tf.nn.softmax(fastrcnn_label_logits, name='fastrcnn_all_probs') # #proposal x #Class
if self.class_agnostic_box_and_mask_heads:
anchors = tf.tile(tf.expand_dims(proposal_boxes, 1), [1, 1, 1]) # #proposal x #Cat x 4
else:
anchors = tf.tile(tf.expand_dims(proposal_boxes, 1), [1, self._num_classes - 1, 1]) # #proposal x #Cat x 4
decoded_boxes = decode_bbox_target(
self.bbox_decode_clip,
fastrcnn_box_logits /
tf.constant(FASTRCNN_BBOX_REG_WEIGHTS), anchors)
decoded_boxes = clip_boxes(decoded_boxes, self._image_shape2d, name='fastrcnn_all_boxes')
if self.provide_boxes_as_input:
n_proposals = tf.shape(proposal_boxes)[0]
pred_indices = tf.stack([tf.range(n_proposals), tf.zeros((n_proposals,), dtype=tf.int32)], axis=1)
final_probs = tf.ones((n_proposals,))
## do not do a second bbox regression!
#decoded_boxes = anchors
else:
# indices: Nx2. Each index into (#proposal, #category)
pred_indices, final_probs = fastrcnn_predictions(decoded_boxes, label_probs,
self._num_classes, self.result_score_thresh,
self.class_agnostic_box_and_mask_heads)
final_probs = tf.identity(final_probs, 'final_probs')
if self.class_agnostic_box_and_mask_heads:
final_boxes = tf.gather(tf.squeeze(decoded_boxes, axis=1), pred_indices[:, 0], name='final_boxes')
else:
final_boxes = tf.gather_nd(decoded_boxes, pred_indices, name='final_boxes')
final_labels = tf.add(pred_indices[:, 1], 1, name='final_labels')
self.outputs_per_batch_idx[batch_idx] = [final_probs, final_boxes, final_labels]
if self._add_maskrcnn:
# HACK to work around https://github.com/tensorflow/tensorflow/issues/14657
def f1():
roi_resized = roi_align(rpn_input, final_boxes * (1.0 / ANCHOR_STRIDE), 14)
with tf.variable_scope(tf.get_variable_scope(), reuse=True):
feature_maskrcnn = add_resnet_conv5(roi_resized, self.tower_setup)[0]
mask_logits = maskrcnn_head(
feature_maskrcnn, self._num_classes, self.tower_setup,
class_agnostic_conv=self.class_agnostic_box_and_mask_heads) # #result x #cat x 14x14
mask_logits = tf.transpose(mask_logits, [0, 3, 1, 2])
if self.class_agnostic_box_and_mask_heads:
mask_logits = tf.squeeze(mask_logits, axis=1)
else:
indices = tf.stack([tf.range(tf.size(final_labels)), tf.to_int32(final_labels) - 1], axis=1)
mask_logits = tf.gather_nd(mask_logits, indices) # #resultx14x14
return tf.sigmoid(mask_logits)
final_masks = tf.cond(tf.size(final_probs) > 0, f1, lambda: tf.zeros([0, 14, 14]))
final_masks = tf.identity(final_masks, name='final_masks')
self.outputs_per_batch_idx[batch_idx].append(final_masks)
else:
final_masks = None
if self._do_reid:
final_reid_features = tf.gather(reid_features, pred_indices[:, 0])
self.outputs_per_batch_idx[batch_idx].append(final_reid_features)
else:
final_reid_features = None
return final_boxes, final_labels, final_masks, final_probs, final_reid_features
def _add_basic_measures(self, inp, loss):
n_examples = tf.shape(inp)[0]
self.measures[Measures.N_EXAMPLES] = n_examples
if loss is not None:
self.measures[Measures.LOSS] = loss * tf.cast(n_examples, tf.float32)
def _add_test_measures(self, batch_idx, final_boxes, final_probs, final_labels, final_masks, final_reid_features):
if not self.is_training:
orig_img_shape = self.network_input_dict[DataKeys.RAW_IMAGE_SIZES][batch_idx, ...]
orig_img_shape_f = tf.cast(orig_img_shape, tf.float32)
image_shape2d_f = tf.cast(self._image_shape2d, tf.float32)
scale = (image_shape2d_f[0] / orig_img_shape_f[0] + image_shape2d_f[1] / orig_img_shape_f[1]) / 2
boxes = final_boxes / scale
clipped_boxes = clip_boxes(boxes, orig_img_shape_f)
self.extractions_per_batch_idx[batch_idx][Extractions.DET_BOXES] = clipped_boxes
self.extractions_per_batch_idx[batch_idx][Extractions.DET_PROBS] = final_probs
self.extractions_per_batch_idx[batch_idx][Extractions.DET_LABELS] = final_labels
self.extractions_per_batch_idx[batch_idx][Extractions.REID_FEATURES] = final_reid_features
if self._add_maskrcnn:
final_masks = tf.py_func(self.fill_full_mask, [clipped_boxes, final_masks, orig_img_shape],
tf.uint8, name="fill_full_mask")
self.extractions_per_batch_idx[batch_idx][Extractions.DET_MASKS] = final_masks
if DataKeys.IMAGE_ID in self.network_input_dict:
self.extractions_per_batch_idx[batch_idx][Extractions.IMAGE_ID] = self.network_input_dict[DataKeys.IMAGE_ID]
def create_reid_loss(reid_features_and_target_ids_per_time, reid_loss_per_class,
reid_loss_worst_examples_percent, loss_variant, loss_factor, distance_measure, adjacent_frames,
reid_loss_margin):
if loss_variant == 0:
assert False, "Sigmoid cross-entropy loss with adjacent frames was dropped"
assert reid_loss_worst_examples_percent == 1.0, "Hard mining currently not implemented"
if distance_measure == "cosine" or distance_measure == "normalized_euclidean":
reid_features_and_target_ids_per_time = [(tf.nn.l2_normalize(reid, axis=1), ids)
for (reid, ids) in reid_features_and_target_ids_per_time]
def compute_measure(a, b):
if distance_measure == "cosine":
return 1 - tf.matmul(a, b, transpose_b=True) # cosine similarity would be without 1-
elif distance_measure == "euclidean" or distance_measure == "normalized_euclidean":
return cdist(a, b, "euclidean")
else:
assert False, "Unknown measure for comparing reid vectors"
id_fun = partial(_create_reid_loss_for_id, reid_loss_per_class=reid_loss_per_class,
reid_loss_worst_examples_percent=reid_loss_worst_examples_percent, loss_variant=loss_variant,
margin=reid_loss_margin)
reid_loss = tf.constant(0.0)
normalization = tf.constant(0)
if adjacent_frames:
# Here, we only look at adjacent pairs of frames, looking from t to t+1 (tp1)
for (reid_features_t, target_ids_t), (reid_features_tp1, target_ids_tp1) in \
zip(reid_features_and_target_ids_per_time, reid_features_and_target_ids_per_time[1:]):
def f():
computed_measure = compute_measure(reid_features_t, reid_features_tp1)
# Finding hard pos/neg in this matrix is easier if we separate by id
reid_loss_per_id_fn = partial(id_fun, computed_measure=computed_measure, target_ids_axis_0=target_ids_t,
target_ids_axis_1=target_ids_tp1)
unique_target_ids_t, _ = tf.unique(target_ids_t)
reid_losses_per_id, normalization_per_id = tf.map_fn(reid_loss_per_id_fn, unique_target_ids_t,
dtype=(tf.float32, tf.int32))
reid_loss_t = tf.reduce_sum(reid_losses_per_id, axis=0)
normalization_t = tf.reduce_sum(normalization_per_id, axis=0)
return reid_loss_t, normalization_t
reid_loss_t, normalization_t = tf.cond(tf.logical_and(tf.size(reid_features_t) > 0,
tf.size(reid_features_tp1) > 0),
f, lambda: (tf.constant(0.0), tf.constant(1, dtype=tf.int32)))
reid_loss += reid_loss_t
normalization += normalization_t
else:
all_target_ids = tf.concat([ids for (_, ids) in reid_features_and_target_ids_per_time], axis=0)
all_reid_features = tf.concat([reid for (reid, _) in reid_features_and_target_ids_per_time], axis=0)
computed_measure = compute_measure(all_reid_features, all_reid_features)
# Finding hard pos/neg in this matrix is easier if we separate by id
reid_loss_per_id_fn = partial(id_fun, computed_measure=computed_measure,
target_ids_axis_0=all_target_ids, target_ids_axis_1=all_target_ids)
unique_target_ids, _ = tf.unique(all_target_ids)
reid_losses_per_id, normalization_per_id = tf.map_fn(reid_loss_per_id_fn, unique_target_ids,
dtype=(tf.float32, tf.int32))
reid_loss = tf.reduce_sum(reid_losses_per_id, axis=0)
normalization = tf.reduce_sum(normalization_per_id, axis=0)
if loss_variant == 1:
normalization = tf.size(all_target_ids) # Sanity check: these should be equal
reid_loss = reid_loss / tf.cast(normalization, dtype=tf.float32)
return reid_loss * loss_factor
def _create_reid_loss_for_id(id_, computed_measure, target_ids_axis_0, target_ids_axis_1, reid_loss_per_class,
reid_loss_worst_examples_percent, loss_variant, margin):
id_mask_axis_0 = tf.equal(target_ids_axis_0, id_)
id_mask_axis_1 = tf.equal(target_ids_axis_1, id_)
sliced_matrix = tf.boolean_mask(computed_measure, id_mask_axis_0)
if reid_loss_per_class:
# for the loss of each detection, consider only detections having the same class (car or pedestrian)
class_mask_axis_1 = tf.equal(tf.floordiv(target_ids_axis_1, 1000), tf.floordiv(id_, 1000))
sliced_matrix = tf.boolean_mask(sliced_matrix, class_mask_axis_1, axis=1)
id_mask_axis_1 = tf.boolean_mask(id_mask_axis_1, class_mask_axis_1)
same = tf.boolean_mask(sliced_matrix, id_mask_axis_1, axis=1)
different = tf.boolean_mask(sliced_matrix, tf.logical_not(id_mask_axis_1), axis=1)
def triplet():
if loss_variant == 2 or loss_variant == 3: # batch all
# TODO in each row, leave out the column corresponding to the same detection
all_combinations = tf.expand_dims(same, axis=2) - tf.expand_dims(different, axis=1)
loss = tf.maximum(tf.constant(margin) + all_combinations, 0)
if loss_variant == 2:
normalization = tf.size(loss)
else: # batch all no zeros
normalization = tf.count_nonzero(loss, dtype=tf.int32)
else: # batch hard
hard_pos = tf.reduce_max(same, axis=1)
hard_neg = tf.reduce_min(different, axis=1)
loss = tf.maximum(tf.constant(margin) + hard_pos - hard_neg, 0)
normalization = tf.size(loss)
return tf.reduce_sum(loss), normalization
def contrastive():
loss = tf.constant(0.5) * (tf.reduce_sum(tf.square(same)) +
tf.reduce_sum(tf.square(tf.maximum(tf.constant(margin) - different, 0))))
normalization = tf.size(same) + tf.size(different)
return loss, normalization
if loss_variant == 4:
loss, normalization = tf.cond(tf.logical_and(tf.size(same) > 0, tf.size(different) > 0), contrastive,
lambda: (tf.constant(0.0), tf.constant(1, dtype=tf.int32)))
else:
loss, normalization = tf.cond(tf.logical_and(tf.size(same) > 0, tf.size(different) > 0), triplet,
lambda: (tf.constant(0.0), tf.constant(1, dtype=tf.int32)))
return loss, normalization
# Stole this from https://raw.githubusercontent.com/VisualComputingInstitute/triplet-reid/master/loss.py :P
def all_diffs(a, b):
""" Returns a tensor of all combinations of a - b.
Args:
a (2D tensor): A batch of vectors shaped (B1, F).
b (2D tensor): A batch of vectors shaped (B2, F).
Returns:
The matrix of all pairwise differences between all vectors in `a` and in
`b`, will be of shape (B1, B2).
Note:
For convenience, if either `a` or `b` is a `Distribution` object, its
mean is used.
"""
return tf.expand_dims(a, axis=1) - tf.expand_dims(b, axis=0)
def cdist(a, b, metric='euclidean'):
"""Similar to scipy.spatial's cdist, but symbolic.
The currently supported metrics can be listed as `cdist.supported_metrics` and are:
- 'euclidean', although with a fudge-factor epsilon.
- 'sqeuclidean', the squared euclidean.
- 'cityblock', the manhattan or L1 distance.
Args:
a (2D tensor): The left-hand side, shaped (B1, F).
b (2D tensor): The right-hand side, shaped (B2, F).
metric (string): Which distance metric to use, see notes.
Returns:
The matrix of all pairwise distances between all vectors in `a` and in
`b`, will be of shape (B1, B2).
Note:
When a square root is taken (such as in the Euclidean case), a small
epsilon is added because the gradient of the square-root at zero is
undefined. Thus, it will never return exact zero in these cases.
"""
with tf.name_scope("cdist"):
diffs = all_diffs(a, b)
if metric == 'sqeuclidean':
return tf.reduce_sum(tf.square(diffs), axis=-1)
elif metric == 'euclidean':
return tf.sqrt(tf.reduce_sum(tf.square(diffs), axis=-1) + 1e-12)
elif metric == 'cityblock':
return tf.reduce_sum(tf.abs(diffs), axis=-1)
else:
raise NotImplementedError(
'The following metric is not implemented by `cdist` yet: {}'.format(metric))
cdist.supported_metrics = [
'euclidean',
'sqeuclidean',
'cityblock',
] | 15,378 |
852 | <reponame>ckamtsikis/cmssw
#include "CondCore/Utilities/interface/PayloadInspectorModule.h"
#include "CondCore/Utilities/interface/PayloadInspector.h"
#include "CondCore/CondDB/interface/Time.h"
#include "DataFormats/EcalDetId/interface/EBDetId.h"
#include "DataFormats/EcalDetId/interface/EEDetId.h"
#include "CondCore/EcalPlugins/plugins/EcalDrawUtils.h"
// the data format of the condition to be inspected
#include "CondFormats/EcalObjects/interface/EcalSRSettings.h"
#include "TH2F.h" // a 2-D histogram with four bytes per cell (float)
#include "TCanvas.h"
#include "TLine.h"
#include "TStyle.h"
#include "TLatex.h" //write mathematical equations.
#include "TPave.h"
#include "TPaveStats.h"
#include <string>
#include <fstream>
namespace {
/*******************************************************
2d plot of Ecal SR Settings Summary of 1 IOV
*******************************************************/
class EcalSRSettingsSummaryPlot : public cond::payloadInspector::PlotImage<EcalSRSettings> {
public:
EcalSRSettingsSummaryPlot() : cond::payloadInspector::PlotImage<EcalSRSettings>("Ecal SR Settings Summary - map ") {
setSingleIov(true);
}
bool fill(const std::vector<std::tuple<cond::Time_t, cond::Hash> >& iovs) override {
const int maxInCol = 27;
auto iov = iovs.front();
std::shared_ptr<EcalSRSettings> payload = fetchPayload(std::get<1>(iov));
unsigned int run = std::get<0>(iov);
TH2F* align;
int NbRows, gridRows;
int NbColumns, offset;
if (payload.get()) {
EcalSRSettings ecalSR = (*payload);
NbRows = ecalSR.srpLowInterestChannelZS_.size();
gridRows = (NbRows <= maxInCol) ? NbRows : maxInCol;
offset = ceil(1.0 * NbRows / maxInCol);
NbColumns = offset * 2 + 3;
align =
new TH2F("Ecal SR Settings Summary",
"ebDccAdcToGeV eeDccAdcToGeV Rows# srpLowInterestChannelZS srpHighInterestChannelZS",
NbColumns,
0,
NbColumns,
gridRows,
0,
gridRows);
double row = gridRows - 0.5;
double column = 3.5;
int cnt = 0;
align->Fill(0.5, gridRows - 0.5, ecalSR.ebDccAdcToGeV_);
align->Fill(1.5, gridRows - 0.5, ecalSR.eeDccAdcToGeV_);
for (int i = 0; i < gridRows; i++) {
align->Fill(2.5, gridRows - i - 0.5, i + 1);
}
for (std::vector<float>::const_iterator it = ecalSR.srpLowInterestChannelZS_.begin();
it != ecalSR.srpLowInterestChannelZS_.end();
it++) {
align->Fill(column, row, *it);
cnt++;
column = floor(1.0 * cnt / maxInCol) + 3.5;
row = (row == 0.5 ? (gridRows - 0.5) : row - 1);
}
row = gridRows - 0.5;
column = 3.5;
cnt = 0;
for (std::vector<float>::const_iterator it = ecalSR.srpHighInterestChannelZS_.begin();
it != ecalSR.srpHighInterestChannelZS_.end();
it++) {
align->Fill(column + offset, row, *it);
cnt++;
column = floor(1.0 * cnt / maxInCol) + 3.5;
row = (row == 0.5 ? (gridRows - 0.5) : row - 1);
}
} else
return false;
gStyle->SetPalette(1);
gStyle->SetOptStat(0);
TCanvas canvas("CC map", "CC map", 1000, 1000);
TLatex t1;
t1.SetNDC();
t1.SetTextAlign(26);
t1.SetTextSize(0.05);
t1.SetTextColor(2);
t1.DrawLatex(0.5, 0.96, Form("Ecal SRSettings Summary, IOV %i", run));
TPad* pad = new TPad("pad", "pad", 0.0, 0.0, 1.0, 0.94);
pad->Draw();
pad->cd();
align->Draw("TEXT");
drawTable(NbRows, NbColumns);
align->GetXaxis()->SetTickLength(0.);
align->GetXaxis()->SetLabelSize(0.);
align->GetYaxis()->SetTickLength(0.);
align->GetYaxis()->SetLabelSize(0.);
std::string ImageName(m_imageFileName);
canvas.SaveAs(ImageName.c_str());
return true;
}
};
} // namespace
// Register the classes as boost python plugin
PAYLOAD_INSPECTOR_MODULE(EcalSRSettings) { PAYLOAD_INSPECTOR_CLASS(EcalSRSettingsSummaryPlot); } | 2,012 |
348 | <reponame>chamberone/Leaflet.PixiOverlay
{"nom":"Vandoeuvre-lès-Nancy","circ":"2ème circonscription","dpt":"Meurthe-et-Moselle","inscrits":15814,"abs":8898,"votants":6916,"blancs":69,"nuls":25,"exp":6822,"res":[{"nuance":"MDM","nom":"<NAME>","voix":2557},{"nuance":"SOC","nom":"<NAME>","voix":1128},{"nuance":"LR","nom":"Mme <NAME>","voix":969},{"nuance":"FN","nom":"M. <NAME>","voix":809},{"nuance":"FI","nom":"Mme <NAME>","voix":796},{"nuance":"COM","nom":"<NAME>","voix":150},{"nuance":"ECO","nom":"Mme <NAME>","voix":144},{"nuance":"DLF","nom":"Mme <NAME>","voix":90},{"nuance":"ECO","nom":"M. <NAME>","voix":72},{"nuance":"EXG","nom":"M. <NAME>","voix":68},{"nuance":"DIV","nom":"Mme <NAME>","voix":39},{"nuance":"DIV","nom":"M. <NAME>","voix":0}]} | 313 |
403 | <reponame>bilaleluneis/QPULib<filename>Lib/Target/Pretty.h
#ifndef _TARGET_PRETTY_H_
#define _TARGET_PRETTY_H_
#include "Target/Syntax.h"
// Pretty printer for the QPULib target language
void pretty(Instr instr);
#endif
| 89 |
468 | {
"name": "pride",
"author": "<NAME>",
"license": "CC0",
"raster": "http://hexb.in/hexagons/pride.png",
"vector": "http://hexb.in/vector/pride.svg",
"description": "Hexagon symbolising the pride flag."
}
| 89 |
530 | /*
* Copyright (c) 2018, Oracle and/or its affiliates. All rights reserved.
*
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* The contents of this file are subject to the terms of either the Universal Permissive License
* v 1.0 as shown at http://oss.oracle.com/licenses/upl
*
* or the following license:
*
* Redistribution and use in source and binary forms, with or without modification, are permitted
* provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this list of conditions
* and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice, this list of
* conditions and the following disclaimer in the documentation and/or other materials provided with
* the distribution.
*
* 3. Neither the name of the copyright holder nor the names of its contributors may be used to
* endorse or promote products derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
* FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY
* WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package org.openjdk.jmc.flightrecorder.ui;
import org.eclipse.jface.util.IPropertyChangeListener;
import org.eclipse.ui.IWorkbenchPage;
import org.eclipse.ui.IWorkbenchPart;
import org.eclipse.ui.PlatformUI;
import org.eclipse.ui.part.IPage;
import org.eclipse.ui.part.MessagePage;
import org.eclipse.ui.part.PageBook;
import org.eclipse.ui.part.PageBookView;
import org.openjdk.jmc.flightrecorder.ui.messages.internal.Messages;
import org.openjdk.jmc.flightrecorder.ui.preferences.PreferenceKeys;
public class ResultPageBookView extends PageBookView {
private static final String HELP_CONTEXT_ID = FlightRecorderUI.PLUGIN_ID + ".ResultView"; //$NON-NLS-1$
private IPropertyChangeListener analysisEnabledPropertyListener;
@Override
protected IPage createDefaultPage(PageBook book) {
MessagePage page = new MessagePage();
initPage(page);
page.createControl(book);
analysisEnabledPropertyListener = e -> {
if (e.getProperty().equals(PreferenceKeys.PROPERTY_ENABLE_RECORDING_ANALYSIS)) {
setDefaultMessage(page, (Boolean) e.getNewValue());
}
};
FlightRecorderUI.getDefault().getPreferenceStore().addPropertyChangeListener(analysisEnabledPropertyListener);
setDefaultMessage(page, FlightRecorderUI.getDefault().isAnalysisEnabled());
return page;
}
private void setDefaultMessage(MessagePage page, Boolean analysisEnabled) {
page.setMessage(
analysisEnabled ? Messages.RESULT_VIEW_NO_EDITOR_SELECTED : Messages.RESULT_VIEW_ANALYSIS_DISABLED);
}
@Override
protected PageRec doCreatePage(IWorkbenchPart part) {
if (isImportant(part)) {
ResultPage p = ((JfrEditor) part).createResultPage();
initPage(p);
p.createControl(getPageBook());
PlatformUI.getWorkbench().getHelpSystem().setHelp(p.getControl(), HELP_CONTEXT_ID);
return new PageRec(part, p);
}
return new PageRec(part, getDefaultPage());
}
@Override
protected void doDestroyPage(IWorkbenchPart part, PageRec pageRecord) {
FlightRecorderUI.getDefault().getPreferenceStore()
.removePropertyChangeListener(analysisEnabledPropertyListener);
pageRecord.page.dispose();
pageRecord.dispose();
}
@Override
protected IWorkbenchPart getBootstrapPart() {
IWorkbenchPage page = getSite().getPage();
if (page != null) {
return page.getActiveEditor();
}
return null;
}
@Override
protected boolean isImportant(IWorkbenchPart part) {
return (part instanceof JfrEditor) && FlightRecorderUI.getDefault().isAnalysisEnabled();
}
}
| 1,320 |
1,350 | <filename>sdk/synapse/azure-resourcemanager-synapse/src/samples/java/com/azure/resourcemanager/synapse/generated/AzureADOnlyAuthenticationsListSamples.java<gh_stars>1000+
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
// Code generated by Microsoft (R) AutoRest Code Generator.
package com.azure.resourcemanager.synapse.generated;
import com.azure.core.util.Context;
/** Samples for AzureADOnlyAuthentications List. */
public final class AzureADOnlyAuthenticationsListSamples {
/*
* x-ms-original-file: specification/synapse/resource-manager/Microsoft.Synapse/stable/2021-06-01/examples/ListAzureADOnlyAuthentication.json
*/
/**
* Sample code: Get a list of Azure Active Directory Only Authentication property.
*
* @param manager Entry point to SynapseManager.
*/
public static void getAListOfAzureActiveDirectoryOnlyAuthenticationProperty(
com.azure.resourcemanager.synapse.SynapseManager manager) {
manager.azureADOnlyAuthentications().list("workspace-6852", "workspace-2080", Context.NONE);
}
}
| 351 |
1,205 | <gh_stars>1000+
"""
Module for the creation of composite quantum objects via the tensor product.
"""
__all__ = [
'tensor', 'super_tensor', 'composite', 'tensor_swap', 'tensor_contract'
]
import numpy as np
import scipy.sparse as sp
from qutip.cy.spmath import zcsr_kron
from qutip.qobj import Qobj
from qutip.permute import reshuffle
from qutip.superoperator import operator_to_vector
from qutip.dimensions import (
flatten, enumerate_flat, unflatten, deep_remove,
dims_to_tensor_shape, dims_idxs_to_tensor_idxs
)
import qutip.settings
import qutip.superop_reps # Avoid circular dependency here.
def tensor(*args):
"""Calculates the tensor product of input operators.
Parameters
----------
args : array_like
``list`` or ``array`` of quantum objects for tensor product.
Returns
-------
obj : qobj
A composite quantum object.
Examples
--------
>>> tensor([sigmax(), sigmax()]) # doctest: +SKIP
Quantum object: dims = [[2, 2], [2, 2]], \
shape = [4, 4], type = oper, isHerm = True
Qobj data =
[[ 0.+0.j 0.+0.j 0.+0.j 1.+0.j]
[ 0.+0.j 0.+0.j 1.+0.j 0.+0.j]
[ 0.+0.j 1.+0.j 0.+0.j 0.+0.j]
[ 1.+0.j 0.+0.j 0.+0.j 0.+0.j]]
"""
if not args:
raise TypeError("Requires at least one input argument")
if len(args) == 1 and isinstance(args[0], (list, np.ndarray)):
# this is the case when tensor is called on the form:
# tensor([q1, q2, q3, ...])
qlist = args[0]
elif len(args) == 1 and isinstance(args[0], Qobj):
# tensor is called with a single Qobj as an argument, do nothing
return args[0]
else:
# this is the case when tensor is called on the form:
# tensor(q1, q2, q3, ...)
qlist = args
if not all([isinstance(q, Qobj) for q in qlist]):
# raise error if one of the inputs is not a quantum object
raise TypeError("One of inputs is not a quantum object")
out = Qobj()
if qlist[0].issuper:
out.superrep = qlist[0].superrep
if not all([q.superrep == out.superrep for q in qlist]):
raise TypeError("In tensor products of superroperators, all must" +
"have the same representation")
out.isherm = True
for n, q in enumerate(qlist):
if n == 0:
out.data = q.data
out.dims = q.dims
else:
out.data = zcsr_kron(out.data, q.data)
out.dims = [out.dims[0] + q.dims[0], out.dims[1] + q.dims[1]]
out.isherm = out.isherm and q.isherm
if not out.isherm:
out._isherm = None
return out.tidyup() if qutip.settings.auto_tidyup else out
def super_tensor(*args):
"""Calculates the tensor product of input superoperators, by tensoring
together the underlying Hilbert spaces on which each vectorized operator
acts.
Parameters
----------
args : array_like
``list`` or ``array`` of quantum objects with ``type="super"``.
Returns
-------
obj : qobj
A composite quantum object.
"""
if isinstance(args[0], list):
args = args[0]
# Check if we're tensoring vectors or superoperators.
if all(arg.issuper for arg in args):
if not all(arg.superrep == "super" for arg in args):
raise TypeError(
"super_tensor on type='super' is only implemented for "
"superrep='super'."
)
# Reshuffle the superoperators.
shuffled_ops = list(map(reshuffle, args))
# Tensor the result.
shuffled_tensor = tensor(shuffled_ops)
# Unshuffle and return.
out = reshuffle(shuffled_tensor)
out.superrep = args[0].superrep
return out
elif all(arg.isoperket for arg in args):
# Reshuffle the superoperators.
shuffled_ops = list(map(reshuffle, args))
# Tensor the result.
shuffled_tensor = tensor(shuffled_ops)
# Unshuffle and return.
out = reshuffle(shuffled_tensor)
return out
elif all(arg.isoperbra for arg in args):
return super_tensor(*(arg.dag() for arg in args)).dag()
else:
raise TypeError(
"All arguments must be the same type, "
"either super, operator-ket or operator-bra."
)
def _isoperlike(q):
return q.isoper or q.issuper
def _isketlike(q):
return q.isket or q.isoperket
def _isbralike(q):
return q.isbra or q.isoperbra
def composite(*args):
"""
Given two or more operators, kets or bras, returns the Qobj
corresponding to a composite system over each argument.
For ordinary operators and vectors, this is the tensor product,
while for superoperators and vectorized operators, this is
the column-reshuffled tensor product.
If a mix of Qobjs supported on Hilbert and Liouville spaces
are passed in, the former are promoted. Ordinary operators
are assumed to be unitaries, and are promoted using ``to_super``,
while kets and bras are promoted by taking their projectors and
using ``operator_to_vector(ket2dm(arg))``.
"""
# First step will be to ensure everything is a Qobj at all.
if not all(isinstance(arg, Qobj) for arg in args):
raise TypeError("All arguments must be Qobjs.")
# Next, figure out if we have something oper-like (isoper or issuper),
# or something ket-like (isket or isoperket). Bra-like we'll deal with
# by turning things into ket-likes and back.
if all(map(_isoperlike, args)):
# OK, we have oper/supers.
if any(arg.issuper for arg in args):
# Note that to_super does nothing to things
# that are already type=super, while it will
# promote unitaries to superunitaries.
return super_tensor(*map(qutip.superop_reps.to_super, args))
else:
# Everything's just an oper, so ordinary tensor products work.
return tensor(*args)
elif all(map(_isketlike, args)):
# Ket-likes.
if any(arg.isoperket for arg in args):
# We have a vectorized operator, we we may need to promote
# something.
return super_tensor(*(
arg if arg.isoperket
else operator_to_vector(qutip.states.ket2dm(arg))
for arg in args
))
else:
# Everything's ordinary, so we can use the tensor product here.
return tensor(*args)
elif all(map(_isbralike, args)):
# Turn into ket-likes and recurse.
return composite(*(arg.dag() for arg in args)).dag()
else:
raise TypeError("Unsupported Qobj types [{}].".format(
", ".join(arg.type for arg in args)
))
def _tensor_contract_single(arr, i, j):
"""
Contracts a dense tensor along a single index pair.
"""
if arr.shape[i] != arr.shape[j]:
raise ValueError("Cannot contract over indices of different length.")
idxs = np.arange(arr.shape[i])
sl = tuple(slice(None, None, None)
if idx not in (i, j) else idxs for idx in range(arr.ndim))
contract_at = i if j == i + 1 else 0
return np.sum(arr[sl], axis=contract_at)
def _tensor_contract_dense(arr, *pairs):
"""
Contracts a dense tensor along one or more index pairs,
keeping track of how the indices are relabeled by the removal
of other indices.
"""
axis_idxs = list(range(arr.ndim))
for pair in pairs:
# axis_idxs.index effectively evaluates the mapping from
# original index labels to the labels after contraction.
arr = _tensor_contract_single(arr, *map(axis_idxs.index, pair))
list(map(axis_idxs.remove, pair))
return arr
def tensor_swap(q_oper, *pairs):
"""Transposes one or more pairs of indices of a Qobj.
Note that this uses dense representations and thus
should *not* be used for very large Qobjs.
Parameters
----------
pairs : tuple
One or more tuples ``(i, j)`` indicating that the
``i`` and ``j`` dimensions of the original qobj
should be swapped.
Returns
-------
sqobj : Qobj
The original Qobj with all named index pairs swapped with each other
"""
dims = q_oper.dims
tensor_pairs = dims_idxs_to_tensor_idxs(dims, pairs)
data = q_oper.data.toarray()
# Reshape into tensor indices
data = data.reshape(dims_to_tensor_shape(dims))
# Now permute the dims list so we know how to get back.
flat_dims = flatten(dims)
perm = list(range(len(flat_dims)))
for i, j in pairs:
flat_dims[i], flat_dims[j] = flat_dims[j], flat_dims[i]
for i, j in tensor_pairs:
perm[i], perm[j] = perm[j], perm[i]
dims = unflatten(flat_dims, enumerate_flat(dims))
# Next, permute the actual indices of the dense tensor.
data = data.transpose(perm)
# Reshape back, using the left and right of dims.
data = data.reshape(list(map(np.prod, dims)))
return Qobj(inpt=data, dims=dims, superrep=q_oper.superrep)
def tensor_contract(qobj, *pairs):
"""Contracts a qobj along one or more index pairs.
Note that this uses dense representations and thus
should *not* be used for very large Qobjs.
Parameters
----------
pairs : tuple
One or more tuples ``(i, j)`` indicating that the
``i`` and ``j`` dimensions of the original qobj
should be contracted.
Returns
-------
cqobj : Qobj
The original Qobj with all named index pairs contracted
away.
"""
# Record and label the original dims.
dims = qobj.dims
dims_idxs = enumerate_flat(dims)
tensor_dims = dims_to_tensor_shape(dims)
# Convert to dense first, since sparse won't support the reshaping we need.
qtens = qobj.data.toarray()
# Reshape by the flattened dims.
qtens = qtens.reshape(tensor_dims)
# Contract out the indices from the flattened object.
# Note that we need to feed pairs through dims_idxs_to_tensor_idxs
# to ensure that we are contracting the right indices.
qtens = _tensor_contract_dense(qtens, *dims_idxs_to_tensor_idxs(dims, pairs))
# Remove the contracted indexes from dims so we know how to
# reshape back.
# This concerns dims, and not the tensor indices, so we need
# to make sure to use the original dims indices and not the ones
# generated by dims_to_* functions.
contracted_idxs = deep_remove(dims_idxs, *flatten(list(map(list, pairs))))
contracted_dims = unflatten(flatten(dims), contracted_idxs)
# We don't need to check for tensor idxs versus dims idxs here,
# as column- versus row-stacking will never move an index for the
# vectorized operator spaces all the way from the left to the right.
l_mtx_dims, r_mtx_dims = map(np.product, map(flatten, contracted_dims))
# Reshape back into a 2D matrix.
qmtx = qtens.reshape((l_mtx_dims, r_mtx_dims))
# Return back as a qobj.
return Qobj(qmtx, dims=contracted_dims, superrep=qobj.superrep)
import qutip.states
| 4,566 |
1,069 | <reponame>Leopere/django-th
#!/usr/bin/env python
# coding: utf-8
from __future__ import unicode_literals
from django.core.management.base import BaseCommand
from django_th.recycle import recycle
class Command(BaseCommand):
help = 'Trigger all data from cache in version 2'
def handle(self, *args, **options):
"""
get all the triggers that need to be handled
"""
recycle()
| 156 |
1,861 | <filename>cpp/spectrum/core/decisions/MetadataDecision.cpp<gh_stars>1000+
// Copyright (c) Facebook, Inc. and its affiliates.
//
// This source code is licensed under the MIT license found in the
// LICENSE file in the root directory of this source tree.
#include "MetadataDecision.h"
namespace facebook {
namespace spectrum {
namespace core {
namespace decisions {
image::Metadata calculateOutputMetadata(
const image::Specification& inputImageSpecification,
const folly::Optional<image::Metadata>& extraMetadata,
const image::Orientation& metadataOrientation,
const bool compressorSupportsSettingMetadata) {
if (compressorSupportsSettingMetadata) {
auto metadata = inputImageSpecification.metadata;
if (extraMetadata.hasValue()) {
metadata.merge(*extraMetadata);
}
metadata.entries().setOrientation(metadataOrientation);
return metadata;
} else {
SPECTRUM_ENFORCE_IF_NOT(metadataOrientation == image::Orientation::Up);
return image::Metadata{};
}
}
} // namespace decisions
} // namespace core
} // namespace spectrum
} // namespace facebook
| 337 |
323 | /*
* Copyright (c) 2018, SUSE LLC.
*
* This program is licensed under the BSD license, read LICENSE.BSD
* for further information
*/
struct solv_zchunk;
extern struct solv_zchunk *solv_zchunk_open(FILE *fp, unsigned int streamid);
extern ssize_t solv_zchunk_read(struct solv_zchunk *zck, char *buf, size_t len);
extern int solv_zchunk_close(struct solv_zchunk *zck);
| 140 |
507 | <gh_stars>100-1000
# tests/test_provider_camptocamp_jwt.py
# Automatically generated by tools/makecode.py (24-Sep-2021 15:19:50 UTC)
def test_provider_import():
import terrascript.provider.camptocamp.jwt
def test_resource_import():
from terrascript.resource.camptocamp.jwt import jwt_hashed_token
from terrascript.resource.camptocamp.jwt import jwt_signed_token
# TODO: Shortcut imports without namespace for official and supported providers.
# TODO: This has to be moved into a required_providers block.
# def test_version_source():
#
# import terrascript.provider.camptocamp.jwt
#
# t = terrascript.provider.camptocamp.jwt.jwt()
# s = str(t)
#
# assert 'https://github.com/camptocamp/terraform-provider-jwt' in s
# assert '0.0.3' in s
| 292 |
338 | <filename>ants/contrib/bids/cohort.py
"""
BIDSCohort class for handling BIDS datasets.
This class allows you to streamline various tasks
related to neuroimage processing, machine learning
analysis, and deep learning sampling
NOTES
------
# Example: run N4 bias correction on the T1 images of a BIDS dataset
cohort = BIDSCohort(directory='~/desktop/projects/dlbs-bet/data/raw-bids/')
def n4(img):
return img.n4_bias_correction()
cohort.apply_to_images(fn=n4, modality='T1w', subjects='*',
out_suffix='N4')
"""
import ants
from bids.grabbids import BIDSLayout
class Cohort(object):
"""
Base class for Cohort objects. This class allows
you to streamline the processing and analysis of
medical imaging datasets
"""
def __init__(self):
pass
class CSVCohort(Cohort): pass
class FolderCohort(Cohort): pass
class ListCohort(Cohort): pass
class CSVSampler(object):
def __init__(self, dataframe, input_reader=None, target_reader=None,
input_transform=None, target_transform=None, co_transform=None,
input_return_processor=None, target_return_processor=None, co_return_processor=None):
pass
def generate(self):
"""
Return a generator that can be passed in to Keras `fit_generator`
"""
pass
class BIDSCohort(BIDSLayout):
"""
BIDSCohort class for handling BIDS datasets.
"""
def __init__(self, path, **kwargs):
"""
Initialize a BIDS cohort object
"""
super(BIDSCohort, self).__init__(path=path, **kwargs)
def apply_to_images(self, fn, modality, image_type=None, subjects='*', out_suffix=''):
for subject in self.subjects:
in_file = self.get_modality(subject=subject, modality=modality)
img = ants.image_read(in_file)
img_proc = fn(img)
out_file = in_file.replace('.nii.gz', '%s.nii.gz' % out_suffix)
ants.image_write(img_proc, out_file)
def __getitem__(self, index):
"""
Access items from the cohort.
Arguments
---------
index : string
if index is a subject ID, then a dictionary will be returned
where keys are the available modalities and values is the
file path or list of file paths available for that modality
"""
pass
def create_sampler(self, inputs, targets, input_reader=None, target_reader=None,
input_transform=None, target_transform=None, co_transform=None,
input_return_processor=None, target_return_processor=None, co_return_processor=None):
"""
Create a BIDSSampler that can be used to generate infinite augmented samples
"""
pass
def copy_structure(self, other_directory):
"""
Copy the folder structure of the BIDSCohort
to another base directory, without any of the
actual files being copied.
This is useful for creating a separate BIDSCohort
for processed data.
"""
pass
| 1,237 |
16,989 | // Copyright 2015 The Bazel Authors. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.devtools.build.lib.shell;
import static com.google.common.truth.Truth.assertThat;
import java.util.logging.Level;
import java.util.logging.Logger;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.junit.runners.JUnit4;
/** Tests {@link LogUtil#toTruncatedString}. */
/*
* Note: The toTruncatedString method uses the platform encoding intentionally,
* so the unittest does to. Check out the comment in the implementation in
* case you're wondering why.
*/
@RunWith(JUnit4.class)
public class ToTruncatedStringTest {
@Before
public final void configureLogger() throws Exception {
// enable all log statements to ensure there are no problems with
// logging code
Logger.getLogger("com.google.devtools.build.lib.shell.Command").setLevel(Level.FINEST);
}
@Test
public void testTruncatingNullYieldsEmptyString() {
assertThat(LogUtil.toTruncatedString(null)).isEmpty();
}
@Test
public void testTruncatingEmptyArrayYieldsEmptyString() {
assertThat(LogUtil.toTruncatedString(new byte[0])).isEmpty();
}
@Test
public void testTruncatingSampleArrayYieldsTruncatedString() {
String sampleInput = "Well, there could be a lot of output, but we want " +
"to produce a useful log. A log is useful if it contains the " +
"interesting information (like what the command was), and maybe " +
"some of the output. However, too much is too much, so we just " +
"cut it after 150 bytes ...";
String expectedOutput = "Well, there could be a lot of output, but we " +
"want to produce a useful log. A log is useful if it contains " +
"the interesting information (like what the c[... truncated. " +
"original size was 261 bytes.]";
assertThat(LogUtil.toTruncatedString(sampleInput.getBytes())).isEqualTo(expectedOutput);
}
@Test
public void testTruncatingHelloWorldYieldsHelloWorld() {
String helloWorld = "Hello, world.";
assertThat(LogUtil.toTruncatedString(helloWorld.getBytes())).isEqualTo(helloWorld);
}
}
| 871 |
364 | <reponame>rodrigo-bruno/DeathStarBench
{
"secret": "secret",
"social-graph-service": {
"addr": "social-graph-service",
"port": 9090
},
"social-graph-mongodb": {
"addr": "social-graph-mongodb",
"port": 27017
},
"social-graph-redis": {
"addr": "social-graph-redis",
"port": 6379
},
"write-home-timeline-service": {
"addr": "write-home-timeline-service",
"port": 9090
},
"write-home-timeline-rabbitmq": {
"addr": "write-home-timeline-rabbitmq",
"port": 5672
},
"home-timeline-redis": {
"addr": "home-timeline-redis",
"port": 6379
},
"compose-post-service": {
"addr": "compose-post-service",
"port": 9090
},
"compose-post-redis": {
"addr": "compose-post-redis",
"port": 6379
},
"user-timeline-service": {
"addr": "user-timeline-service",
"port": 9090
},
"user-timeline-mongodb": {
"addr": "user-timeline-mongodb",
"port": 27017
},
"user-timeline-redis": {
"addr": "user-timeline-redis",
"port": 6379
},
"post-storage-service": {
"addr": "post-storage-service",
"port": 9090
},
"post-storage-mongodb": {
"addr": "post-storage-mongodb",
"port": 27017
},
"post-storage-memcached": {
"addr": "post-storage-memcached",
"port": 11211
},
"unique-id-service": {
"addr": "unique-id-service",
"port": 9090
},
"media-service": {
"addr": "media-service",
"port": 9090
},
"text-service": {
"addr": "media-service",
"port": 9090
},
"user-mention-service": {
"addr": "user-mention-service",
"port": 9090
},
"url-shorten-service": {
"addr": "url-shorten-service",
"port": 9090
},
"url-shorten-memcached": {
"addr": "url-shorten-memcached",
"port": 11211
},
"url-shorten-mongodb": {
"addr": "url-shorten-mongodb",
"port": 27017
},
"user-service": {
"addr": "user-service",
"port": 9090
},
"user-memcached": {
"addr": "user-memcached",
"port": 11211
},
"user-mongodb": {
"addr": "user-mongodb",
"port": 27017
},
"home-timeline-service": {
"addr": "home-timeline-service",
"port": 9090
}
} | 987 |
190,993 | /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "tensorflow/java/src/main/native/graph_operation_builder_jni.h"
#include <cstring>
#include <memory>
#include "tensorflow/c/c_api.h"
#include "tensorflow/java/src/main/native/exception_jni.h"
namespace {
TF_OperationDescription* requireHandle(JNIEnv* env, jlong handle) {
if (handle == 0) {
throwException(env, kIllegalStateException,
"Operation has already been built");
return nullptr;
}
return reinterpret_cast<TF_OperationDescription*>(handle);
}
bool resolveOutput(JNIEnv* env, jlong op_handle, jint index, TF_Output* out) {
if (op_handle == 0) {
throwException(env, kIllegalStateException,
"close() was called on the Graph");
return false;
}
out->oper = reinterpret_cast<TF_Operation*>(op_handle);
out->index = static_cast<int>(index);
return true;
}
TF_Tensor* requireTensor(JNIEnv* env, jlong handle) {
if (handle == 0) {
throwException(env, kIllegalStateException,
"close() has been called on the Tensor");
return nullptr;
}
return reinterpret_cast<TF_Tensor*>(handle);
}
} // namespace
JNIEXPORT jlong JNICALL Java_org_tensorflow_GraphOperationBuilder_allocate(
JNIEnv* env, jclass clazz, jlong graph_handle, jstring type, jstring name) {
if (graph_handle == 0) {
throwException(env, kIllegalStateException,
"close() has been called on the Graph");
return 0;
}
TF_Graph* graph = reinterpret_cast<TF_Graph*>(graph_handle);
const char* op_type = env->GetStringUTFChars(type, nullptr);
const char* op_name = env->GetStringUTFChars(name, nullptr);
TF_OperationDescription* d = TF_NewOperation(graph, op_type, op_name);
env->ReleaseStringUTFChars(name, op_name);
env->ReleaseStringUTFChars(type, op_type);
static_assert(sizeof(jlong) >= sizeof(TF_OperationDescription*),
"Cannot represent a C TF_OperationDescription as a Java long");
return reinterpret_cast<jlong>(d);
}
JNIEXPORT jlong JNICALL Java_org_tensorflow_GraphOperationBuilder_finish(
JNIEnv* env, jclass clazz, jlong handle) {
TF_OperationDescription* d = requireHandle(env, handle);
if (d == nullptr) return 0;
TF_Status* status = TF_NewStatus();
TF_Operation* op = TF_FinishOperation(d, status);
if (throwExceptionIfNotOK(env, status)) {
TF_DeleteStatus(status);
return reinterpret_cast<jlong>(op);
}
TF_DeleteStatus(status);
return 0;
}
JNIEXPORT void JNICALL Java_org_tensorflow_GraphOperationBuilder_addInput(
JNIEnv* env, jclass clazz, jlong handle, jlong op_handle, jint index) {
TF_Output out;
if (!resolveOutput(env, op_handle, index, &out)) return;
TF_OperationDescription* d = requireHandle(env, handle);
if (d == nullptr) return;
TF_AddInput(d, out);
}
JNIEXPORT void JNICALL Java_org_tensorflow_GraphOperationBuilder_addInputList(
JNIEnv* env, jclass clazz, jlong handle, jlongArray op_handles,
jintArray indices) {
TF_OperationDescription* d = requireHandle(env, handle);
if (d == nullptr) return;
const size_t n = static_cast<size_t>(env->GetArrayLength(op_handles));
if (env->GetArrayLength(indices) != n) {
throwException(env, kIllegalArgumentException,
"mismatch in number of Operations (%d) and output indices "
"(%d) provided",
n, env->GetArrayLength(indices));
return;
}
std::unique_ptr<TF_Output[]> o(new TF_Output[n]);
jlong* oph = env->GetLongArrayElements(op_handles, nullptr);
jint* idx = env->GetIntArrayElements(indices, nullptr);
bool ok = true;
for (int i = 0; i < n && ok; ++i) {
ok = resolveOutput(env, oph[i], idx[i], &o[i]);
}
env->ReleaseIntArrayElements(indices, idx, JNI_ABORT);
env->ReleaseLongArrayElements(op_handles, oph, JNI_ABORT);
if (!ok) return;
TF_AddInputList(d, o.get(), n);
}
JNIEXPORT void JNICALL
Java_org_tensorflow_GraphOperationBuilder_addControlInput(JNIEnv* env,
jclass clazz,
jlong handle,
jlong op_handle) {
if (op_handle == 0) {
throwException(env, kIllegalStateException,
"control input is not valid, "
"perhaps the Graph containing it has been closed()?");
return;
}
TF_Operation* control = reinterpret_cast<TF_Operation*>(op_handle);
TF_OperationDescription* d = requireHandle(env, handle);
if (d == nullptr) return;
TF_AddControlInput(d, control);
}
JNIEXPORT void JNICALL Java_org_tensorflow_GraphOperationBuilder_setDevice(
JNIEnv* env, jclass clazz, jlong handle, jstring device) {
TF_OperationDescription* d = requireHandle(env, handle);
if (d == nullptr) return;
const char* cdevice = env->GetStringUTFChars(device, nullptr);
TF_SetDevice(d, cdevice);
env->ReleaseStringUTFChars(device, cdevice);
}
JNIEXPORT void JNICALL Java_org_tensorflow_GraphOperationBuilder_setAttrString(
JNIEnv* env, jclass clazz, jlong handle, jstring name, jbyteArray value) {
static_assert(sizeof(jbyte) == 1,
"Require Java byte to be represented as a single byte");
TF_OperationDescription* d = requireHandle(env, handle);
if (d == nullptr) return;
const char* cname = env->GetStringUTFChars(name, nullptr);
jbyte* cvalue = env->GetByteArrayElements(value, nullptr);
TF_SetAttrString(d, cname, cvalue, env->GetArrayLength(value));
env->ReleaseByteArrayElements(value, cvalue, JNI_ABORT);
env->ReleaseStringUTFChars(name, cname);
}
#define DEFINE_SET_ATTR_SCALAR(name, jtype, ctype) \
JNIEXPORT void JNICALL \
Java_org_tensorflow_GraphOperationBuilder_setAttr##name( \
JNIEnv* env, jclass clazz, jlong handle, jstring name, \
jtype value) { \
static_assert( \
sizeof(ctype) >= sizeof(jtype), \
"Information loss when converting between Java and C types"); \
TF_OperationDescription* d = requireHandle(env, handle); \
if (d == nullptr) return; \
const char* cname = env->GetStringUTFChars(name, nullptr); \
TF_SetAttr##name(d, cname, static_cast<ctype>(value)); \
env->ReleaseStringUTFChars(name, cname); \
}
#define DEFINE_SET_ATTR_LIST(name, jname, jtype, ctype) \
JNIEXPORT void JNICALL \
Java_org_tensorflow_GraphOperationBuilder_setAttr##name##List( \
JNIEnv* env, jclass clazz, jlong handle, jstring name, \
jtype##Array value) { \
TF_OperationDescription* d = requireHandle(env, handle); \
if (d == nullptr) return; \
const char* cname = env->GetStringUTFChars(name, nullptr); \
/* Make a copy of the array to paper over any differences */ \
/* in byte representations of the jtype and ctype */ \
/* For example, jint vs TF_DataType. */ \
/* If this copy turns out to be a problem in practice */ \
/* can avoid it for many types. */ \
const int n = env->GetArrayLength(value); \
std::unique_ptr<ctype[]> cvalue(new ctype[n]); \
jtype* elems = env->Get##jname##ArrayElements(value, nullptr); \
for (int i = 0; i < n; ++i) { \
cvalue[i] = static_cast<ctype>(elems[i]); \
} \
TF_SetAttr##name##List(d, cname, cvalue.get(), n); \
env->Release##jname##ArrayElements(value, elems, JNI_ABORT); \
env->ReleaseStringUTFChars(name, cname); \
}
#define DEFINE_SET_ATTR(name, jname, jtype, ctype) \
DEFINE_SET_ATTR_SCALAR(name, jtype, ctype) \
DEFINE_SET_ATTR_LIST(name, jname, jtype, ctype)
DEFINE_SET_ATTR(Int, Long, jlong, int64_t);
DEFINE_SET_ATTR(Float, Float, jfloat, float);
DEFINE_SET_ATTR(Bool, Boolean, jboolean, unsigned char);
DEFINE_SET_ATTR(Type, Int, jint, TF_DataType);
#undef DEFINE_SET_ATTR
#undef DEFINE_SET_ATTR_LIST
#undef DEFINE_SET_ATTR_SCALAR
JNIEXPORT void JNICALL Java_org_tensorflow_GraphOperationBuilder_setAttrTensor(
JNIEnv* env, jclass clazz, jlong handle, jstring name,
jlong tensor_handle) {
TF_OperationDescription* d = requireHandle(env, handle);
if (d == nullptr) return;
TF_Tensor* t = requireTensor(env, tensor_handle);
if (t == nullptr) return;
const char* cname = env->GetStringUTFChars(name, nullptr);
TF_Status* status = TF_NewStatus();
TF_SetAttrTensor(d, cname, t, status);
throwExceptionIfNotOK(env, status);
TF_DeleteStatus(status);
env->ReleaseStringUTFChars(name, cname);
}
JNIEXPORT void JNICALL
Java_org_tensorflow_GraphOperationBuilder_setAttrTensorList(
JNIEnv* env, jclass clazz, jlong handle, jstring name,
jlongArray tensor_handles) {
TF_OperationDescription* d = requireHandle(env, handle);
if (d == nullptr) return;
const int n = env->GetArrayLength(tensor_handles);
std::unique_ptr<TF_Tensor*[]> tensors(new TF_Tensor*[n]);
jlong* jhandles = env->GetLongArrayElements(tensor_handles, nullptr);
bool ok = true;
for (int i = 0; i < n && ok; ++i) {
tensors[i] = requireTensor(env, jhandles[i]);
ok = !env->ExceptionCheck();
}
env->ReleaseLongArrayElements(tensor_handles, jhandles, JNI_ABORT);
if (!ok) return;
const char* cname = env->GetStringUTFChars(name, nullptr);
TF_Status* status = TF_NewStatus();
TF_SetAttrTensorList(d, cname, tensors.get(), n, status);
throwExceptionIfNotOK(env, status);
TF_DeleteStatus(status);
env->ReleaseStringUTFChars(name, cname);
}
JNIEXPORT void JNICALL Java_org_tensorflow_GraphOperationBuilder_setAttrShape(
JNIEnv* env, jclass clazz, jlong handle, jstring name, jlongArray shape,
jint num_dims) {
TF_OperationDescription* d = requireHandle(env, handle);
if (d == nullptr) return;
std::unique_ptr<int64_t[]> cvalue;
// num_dims and env->GetArrayLength(shape) are assumed to be consistent.
// i.e., either num_dims < 0 or num_dims == env->GetArrayLength(shape).
if (num_dims > 0) {
cvalue.reset(new int64_t[num_dims]);
jlong* elems = env->GetLongArrayElements(shape, nullptr);
for (int i = 0; i < num_dims; ++i) {
cvalue[i] = static_cast<int64_t>(elems[i]);
}
env->ReleaseLongArrayElements(shape, elems, JNI_ABORT);
}
const char* cname = env->GetStringUTFChars(name, nullptr);
TF_SetAttrShape(d, cname, cvalue.get(), static_cast<int>(num_dims));
env->ReleaseStringUTFChars(name, cname);
}
JNIEXPORT void JNICALL
Java_org_tensorflow_GraphOperationBuilder_setAttrShapeList(
JNIEnv* env, jclass clazz, jlong handle, jstring name, jlongArray shapes,
jintArray num_dims) {
TF_OperationDescription* d = requireHandle(env, handle);
if (d == nullptr) return;
std::unique_ptr<int64_t[]> cshapes;
std::unique_ptr<int64_t*[]> cdims;
std::unique_ptr<int[]> cnum_dims;
const int num_dims_length = env->GetArrayLength(num_dims);
if (num_dims_length > 0) {
const int shapes_length = env->GetArrayLength(shapes);
cshapes.reset(new int64_t[shapes_length]);
cdims.reset(new int64_t*[num_dims_length]);
cnum_dims.reset(new int[num_dims_length]);
jlong* shapes_elems =
static_cast<jlong*>(env->GetPrimitiveArrayCritical(shapes, nullptr));
std::memcpy(cshapes.get(), shapes_elems, shapes_length << 3);
env->ReleasePrimitiveArrayCritical(shapes, shapes_elems, JNI_ABORT);
int64_t* cshapes_ptr = cshapes.get();
jint* num_dims_elems =
static_cast<jint*>(env->GetPrimitiveArrayCritical(num_dims, nullptr));
for (int i = 0; i < num_dims_length; ++i) {
cnum_dims[i] = static_cast<int>(num_dims_elems[i]);
cdims[i] = cshapes_ptr;
if (cnum_dims[i] > 0) {
cshapes_ptr += cnum_dims[i];
}
}
env->ReleasePrimitiveArrayCritical(num_dims, num_dims_elems, JNI_ABORT);
}
const char* cname = env->GetStringUTFChars(name, nullptr);
TF_SetAttrShapeList(d, cname, cdims.get(), cnum_dims.get(), num_dims_length);
env->ReleaseStringUTFChars(name, cname);
}
JNIEXPORT void JNICALL
Java_org_tensorflow_GraphOperationBuilder_setAttrStringList(
JNIEnv* env, jclass object, jlong handle, jstring name,
jobjectArray values) {
TF_OperationDescription* d = requireHandle(env, handle);
if (d == nullptr) return;
const char* cname = env->GetStringUTFChars(name, nullptr);
int num_values = env->GetArrayLength(values);
static_assert(sizeof(jbyte) == 1,
"Require Java byte to be represented as a single byte");
std::unique_ptr<jbyteArray[]> jarrays(new jbyteArray[num_values]);
std::unique_ptr<jbyte*[]> jvalues(new jbyte*[num_values]);
std::unique_ptr<void*[]> cvalues(new void*[num_values]);
std::unique_ptr<size_t[]> lengths(new size_t[num_values]);
for (int i = 0; i < num_values; ++i) {
jbyteArray v =
static_cast<jbyteArray>(env->GetObjectArrayElement(values, i));
jarrays[i] = v;
jvalues[i] = env->GetByteArrayElements(v, nullptr);
cvalues[i] = jvalues[i];
lengths[i] = static_cast<size_t>(env->GetArrayLength(v));
}
TF_SetAttrStringList(d, cname, cvalues.get(), lengths.get(), num_values);
for (int i = 0; i < num_values; ++i) {
env->ReleaseByteArrayElements(jarrays[i], jvalues[i], JNI_ABORT);
}
env->ReleaseStringUTFChars(name, cname);
}
| 6,121 |
1,615 | //
// MLNLayoutNode.h
//
//
// Created by MoMo on 2018/10/24.
//
#import <UIKit/UIKit.h>
#import "MLNViewConst.h"
#define isLayoutNodeWidthNeedMerge(NODE) (NODE.widthType == MLNLayoutMeasurementTypeMatchParent &&\
(NODE.supernode.mergedWidthType == MLNLayoutMeasurementTypeWrapContent || \
NODE.supernode.isHorizontalMaxMode))
#define isLayoutNodeHeightNeedMerge(NODE) (NODE.heightType == MLNLayoutMeasurementTypeMatchParent &&\
(NODE.supernode.mergedHeightType == MLNLayoutMeasurementTypeWrapContent || \
NODE.supernode.isVerticalMaxMode))
NS_ASSUME_NONNULL_BEGIN
typedef enum : NSUInteger {
MLNLayoutNodeStatusIdle = 0, // By default.
MLNLayoutNodeStatusNeedLayout,
MLNLayoutNodeStatusHasNewLayout,
MLNLayoutNodeStatusUp2Date,
} MLNLayoutNodeStatus;
typedef enum : NSUInteger {
MLNLayoutStrategySimapleAuto = 0, // By default.
MLNLayoutStrategyNativeFrame,
} MLNLayoutStrategy;
@interface MLNLayoutNode : NSObject
//*******
//****** Absolute
//*****
@property (nonatomic, assign, readonly) CGFloat x;
@property (nonatomic, assign, readonly) CGFloat y;
@property (nonatomic, assign, readonly) CGFloat width;
@property (nonatomic, assign, readonly) CGFloat height;
//*******
//****** MaxSize
//*****
@property (nonatomic, assign) CGFloat maxWidth;
@property (nonatomic, assign) CGFloat maxHeight;
@property (nonatomic, assign) CGFloat minWidth;
@property (nonatomic, assign) CGFloat minHeight;
//*******
//****** Gravity
//*****
@property (nonatomic, assign) enum MLNGravity gravity;
//*******
//****** Margin
//*****
@property (nonatomic, assign) CGFloat marginTop;
@property (nonatomic, assign) CGFloat marginBottom;
@property (nonatomic, assign) CGFloat marginLeft;
@property (nonatomic, assign) CGFloat marginRight;
//*******
//****** Padding
//*****
@property (nonatomic, assign) CGFloat paddingTop;
@property (nonatomic, assign) CGFloat paddingBottom;
@property (nonatomic, assign) CGFloat paddingLeft;
@property (nonatomic, assign) CGFloat paddingRight;
@property (nonatomic, assign, getter=isPaddingNeedUpdated) BOOL paddingNeedUpdated;// default is YES.
- (void)paddingUpdated;
//*******
//****** Measure
//*****
@property (nonatomic, assign) CGFloat measuredX;
@property (nonatomic, assign) CGFloat measuredY;
@property (nonatomic, assign) CGFloat measuredWidth;
@property (nonatomic, assign) CGFloat measuredHeight;
@property (nonatomic, assign) CGFloat lastMeasuredMaxWidth;
@property (nonatomic, assign) CGFloat lastMeasuredMaxHeight;
@property (nonatomic, assign) CGFloat lastGravityZoneWidth;
@property (nonatomic, assign) CGFloat lastGravityZoneHeight;
//*******
//****** Offset
//*****
@property (nonatomic, assign) CGFloat offsetX;
@property (nonatomic, assign) CGFloat offsetY;
@property (nonatomic, assign) CGFloat offsetWidth;
@property (nonatomic, assign) CGFloat offsetHeight;
//*******
//****** anchorPoint
//*****
@property (nonatomic, assign, readonly) CGPoint anchorPoint;
//*******
//****** State
//*****
@property (nonatomic, assign, readonly) MLNLayoutStrategy layoutStrategy;
@property (nonatomic, assign, readonly) MLNLayoutNodeStatus status;
@property (nonatomic, assign, getter=isWrapContent) BOOL wrapContent;
@property (nonatomic, assign) MLNLayoutMeasurementType widthType;
@property (nonatomic, assign) MLNLayoutMeasurementType heightType;
@property (nonatomic, assign, readonly) MLNLayoutMeasurementType mergedWidthType;
@property (nonatomic, assign, readonly) MLNLayoutMeasurementType mergedHeightType;
@property (nonatomic, assign, getter=isEnable) BOOL enable;
@property (nonatomic, assign, getter=isRoot) BOOL root;
@property (nonatomic, assign) CGFloat priority;
@property (nonatomic, assign) BOOL isVerticalMaxMode;
@property (nonatomic, assign) BOOL isHorizontalMaxMode;
@property (nonatomic, assign, getter=isGone) BOOL gone;
- (BOOL)isDirty;
- (BOOL)hasNewLayout;
- (void)changeLayoutStrategyTo:(MLNLayoutStrategy)layoutStrategy;
//*******
//****** weight
//*****
@property (nonatomic, assign) int weight;
@property (nonatomic, assign) CGFloat widthProportion;
@property (nonatomic, assign) CGFloat heightProportion;
@property (nonatomic, assign) BOOL isWidthExcatly;
@property (nonatomic, assign) BOOL isHeightExcatly;
- (CGFloat)calculateWidthBaseOnWeightWithMaxWidth:(CGFloat)maxWidth;
- (CGFloat)calculateHeightBaseOnWeightWithMaxHeight:(CGFloat)maxHeight;
//*******
//****** Node
//*****
@property (nonatomic, weak) MLNLayoutNode *supernode;
@property (nonatomic, strong, nullable) MLNLayoutNode *overlayNode;
//*******
//****** Root Node
//*****
@property (nonatomic, weak) MLNLayoutNode *rootnode;
//*******
//****** View
//*****
@property (nonatomic, weak, readonly) UIView *targetView;
//*******
//****** Initialization
//*****
- (instancetype)initWithTargetView:(nullable UIView *)targetView NS_DESIGNATED_INITIALIZER;
//*******
//****** Node Tree
//*****
@property (nonatomic, assign) NSUInteger idx;
- (BOOL)isContainer;
- (void)removeFromSupernode;
//*******
//****** Measure Size
//*****
- (void)mergeMeasurementTypes;
- (CGSize)measureSizeWithMaxWidth:(CGFloat)maxWidth maxHeight:(CGFloat)maxHeight;
- (CGFloat)myMaxWidthWithMaxWidth:(CGFloat)maxWidth;
- (CGFloat)myMaxHeightWithMaxHeight:(CGFloat)maxHeight;
- (void)measureSizeLightMatchParentWithMaxWidth:(CGFloat)maxWidth maxHeight:(CGFloat)maxHeight;
- (CGFloat)measurePriority;
- (void)forceUseMatchParentForWidthMeasureType;
- (void)forceUseMatchParentForHeightMeasureType;
//*******
//****** Layout
//*****
- (void)changeX:(CGFloat)x;
- (void)changeY:(CGFloat)y;
- (void)changeWidth:(CGFloat)width;
- (void)changeHeight:(CGFloat)height;
- (void)changeAnchorPoint:(CGPoint)point;
- (void)updateTargetViewFrameIfNeed;
- (void)needLayout;
- (void)needLayoutAndSpread;
- (void)needUpdateLayout;
- (void)updatedLayout;
- (void)requestLayout;
- (void)layoutOverlayNode;
//*******
//****** bind and unbind
//*****
- (void)bindSuper:(MLNLayoutNode *)supernode;
- (void)unbind;
@end
NS_ASSUME_NONNULL_END
| 2,428 |
421 |
//<Snippet1>
#using <System.Xml.dll>
#using <System.dll>
using namespace System;
using namespace System::IO;
using namespace System::Xml;
using namespace System::Xml::Serialization;
public ref class Group
{
public:
// This attribute will be overridden.
[SoapAttributeAttribute(Namespace="http://www.cpandl.com")]
String^ GroupName;
};
public ref class Run
{
public:
void SerializeOverride( String^ filename )
{
// Create an instance of the XmlSerializer class
// that overrides the serialization.
XmlSerializer^ overRideSerializer = CreateOverrideSerializer();
// Writing the file requires a TextWriter.
TextWriter^ writer = gcnew StreamWriter( filename );
// Create an instance of the class that will be serialized.
Group^ myGroup = gcnew Group;
// Set the Object* properties.
myGroup->GroupName = ".NET";
// Serialize the class, and close the TextWriter.
overRideSerializer->Serialize( writer, myGroup );
writer->Close();
}
private:
XmlSerializer^ CreateOverrideSerializer()
{
SoapAttributeOverrides^ mySoapAttributeOverrides = gcnew SoapAttributeOverrides;
SoapAttributes^ mySoapAttributes = gcnew SoapAttributes;
// Create a new SoapAttributeAttribute to the
// one applied to the Group class. The resulting XML
// stream will use the new namespace and attribute name.
SoapAttributeAttribute^ mySoapAttribute = gcnew SoapAttributeAttribute;
mySoapAttribute->AttributeName = "TeamName";
// Change the Namespace.
mySoapAttribute->Namespace = "http://www.cohowinery.com";
mySoapAttributes->SoapAttribute = mySoapAttribute;
mySoapAttributeOverrides->Add( Group::typeid, "GroupName", mySoapAttributes );
XmlTypeMapping^ myMapping = (gcnew SoapReflectionImporter( mySoapAttributeOverrides ))->ImportTypeMapping( Group::typeid );
XmlSerializer^ ser = gcnew XmlSerializer( myMapping );
return ser;
}
};
int main()
{
Run^ test = gcnew Run;
test->SerializeOverride( "SoapOveride.xml" );
}
//<?xml version=S"1.0" encoding=S"utf-8" ?>
// <Group xmlns:xsi=S"http://www.w3.org/2001/XMLSchema-instance"
//xmlns:xsd=S"http://www.w3.org/2001/XMLSchema" n1:TeamName=S".NET"
//xmlns:n1=S"http://www.cohowinery" />
//</Snippet1>
| 928 |
635 | <gh_stars>100-1000
/*
* Copyright 2018 - 2019 <NAME> (i-net software)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package de.inetsoftware.jwebassembly.runtime;
import java.util.ArrayList;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.junit.runners.Parameterized;
import de.inetsoftware.jwebassembly.ScriptEngine;
import de.inetsoftware.jwebassembly.WasmRule;
/**
* @author <NAME>
*/
@RunWith( Parameterized.class )
public abstract class AbstractBaseTest {
private final WasmRule wasm;
private final ScriptEngine script;
private final String method;
private final Object[] params;
protected AbstractBaseTest( WasmRule wasm, ScriptEngine script, String method, Object[] params ) {
this.wasm = wasm;
this.script = script;
this.method = method;
this.params = params;
}
protected static void addParam( ArrayList<Object[]> list, ScriptEngine script, String method, Object ...params ) {
list.add( new Object[]{script, method, params} );
}
/**
* Get the ScriptEngine with which the test is running.
*
* @return the engine
*/
protected ScriptEngine getScriptEngine() {
return script;
}
/**
* Get the name of the method that is currently tested
* @return the name
*/
protected String getMethod() {
return method;
}
@Before
public void before() throws Exception {
wasm.before( script );
}
@Test
public void test() {
wasm.test( script, method, params );
}
}
| 718 |
1,350 | // Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
// Code generated by Microsoft (R) AutoRest Code Generator.
package com.azure.resourcemanager.cognitiveservices.implementation;
import com.azure.resourcemanager.cognitiveservices.fluent.models.AccountSkuListResultInner;
import com.azure.resourcemanager.cognitiveservices.models.AccountSku;
import com.azure.resourcemanager.cognitiveservices.models.AccountSkuListResult;
import java.util.Collections;
import java.util.List;
public final class AccountSkuListResultImpl implements AccountSkuListResult {
private AccountSkuListResultInner innerObject;
private final com.azure.resourcemanager.cognitiveservices.CognitiveServicesManager serviceManager;
AccountSkuListResultImpl(
AccountSkuListResultInner innerObject,
com.azure.resourcemanager.cognitiveservices.CognitiveServicesManager serviceManager) {
this.innerObject = innerObject;
this.serviceManager = serviceManager;
}
public List<AccountSku> value() {
List<AccountSku> inner = this.innerModel().value();
if (inner != null) {
return Collections.unmodifiableList(inner);
} else {
return Collections.emptyList();
}
}
public AccountSkuListResultInner innerModel() {
return this.innerObject;
}
private com.azure.resourcemanager.cognitiveservices.CognitiveServicesManager manager() {
return this.serviceManager;
}
}
| 503 |
505 | package de.rieckpil.blog;
import org.junit.jupiter.api.Test;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.test.autoconfigure.jdbc.JdbcTest;
import org.springframework.jdbc.core.JdbcTemplate;
import javax.sql.DataSource;
import static org.junit.jupiter.api.Assertions.assertNotNull;
@JdbcTest
public class JdbcAccessTest {
@Autowired
private DataSource dataSource;
@Autowired
private JdbcTemplate jdbcTemplate;
@Test
public void shouldReturnBooks() {
assertNotNull(dataSource);
assertNotNull(jdbcTemplate);
}
}
| 211 |
830 | <gh_stars>100-1000
r"""
=================================
Wasserstein 1D with PyTorch
=================================
In this small example, we consider the following minization problem:
.. math::
\mu^* = \min_\mu W(\mu,\nu)
where :math:`\nu` is a reference 1D measure. The problem is handled
by a projected gradient descent method, where the gradient is computed
by pyTorch automatic differentiation. The projection on the simplex
ensures that the iterate will remain on the probability simplex.
This example illustrates both `wasserstein_1d` function and backend use within
the POT framework.
"""
# Author: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
#
# License: MIT License
import numpy as np
import matplotlib.pylab as pl
import matplotlib as mpl
import torch
from ot.lp import wasserstein_1d
from ot.datasets import make_1D_gauss as gauss
from ot.utils import proj_simplex
red = np.array(mpl.colors.to_rgb('red'))
blue = np.array(mpl.colors.to_rgb('blue'))
n = 100 # nb bins
# bin positions
x = np.arange(n, dtype=np.float64)
# Gaussian distributions
a = gauss(n, m=20, s=5) # m= mean, s= std
b = gauss(n, m=60, s=10)
# enforce sum to one on the support
a = a / a.sum()
b = b / b.sum()
device = "cuda" if torch.cuda.is_available() else "cpu"
# use pyTorch for our data
x_torch = torch.tensor(x).to(device=device)
a_torch = torch.tensor(a).to(device=device).requires_grad_(True)
b_torch = torch.tensor(b).to(device=device)
lr = 1e-6
nb_iter_max = 800
loss_iter = []
pl.figure(1, figsize=(8, 4))
pl.plot(x, a, 'b', label='Source distribution')
pl.plot(x, b, 'r', label='Target distribution')
for i in range(nb_iter_max):
# Compute the Wasserstein 1D with torch backend
loss = wasserstein_1d(x_torch, x_torch, a_torch, b_torch, p=2)
# record the corresponding loss value
loss_iter.append(loss.clone().detach().cpu().numpy())
loss.backward()
# performs a step of projected gradient descent
with torch.no_grad():
grad = a_torch.grad
a_torch -= a_torch.grad * lr # step
a_torch.grad.zero_()
a_torch.data = proj_simplex(a_torch) # projection onto the simplex
# plot one curve every 10 iterations
if i % 10 == 0:
mix = float(i) / nb_iter_max
pl.plot(x, a_torch.clone().detach().cpu().numpy(), c=(1 - mix) * blue + mix * red)
pl.legend()
pl.title('Distribution along the iterations of the projected gradient descent')
pl.show()
pl.figure(2)
pl.plot(range(nb_iter_max), loss_iter, lw=3)
pl.title('Evolution of the loss along iterations', fontsize=16)
pl.show()
# %%
# Wasserstein barycenter
# ---------
# In this example, we consider the following Wasserstein barycenter problem
# $$ \\eta^* = \\min_\\eta\;\;\; (1-t)W(\\mu,\\eta) + tW(\\eta,\\nu)$$
# where :math:`\\mu` and :math:`\\nu` are reference 1D measures, and :math:`t`
# is a parameter :math:`\in [0,1]`. The problem is handled by a project gradient
# descent method, where the gradient is computed by pyTorch automatic differentiation.
# The projection on the simplex ensures that the iterate will remain on the
# probability simplex.
#
# This example illustrates both `wasserstein_1d` function and backend use within the
# POT framework.
device = "cuda" if torch.cuda.is_available() else "cpu"
# use pyTorch for our data
x_torch = torch.tensor(x).to(device=device)
a_torch = torch.tensor(a).to(device=device)
b_torch = torch.tensor(b).to(device=device)
bary_torch = torch.tensor((a + b).copy() / 2).to(device=device).requires_grad_(True)
lr = 1e-6
nb_iter_max = 2000
loss_iter = []
# instant of the interpolation
t = 0.5
for i in range(nb_iter_max):
# Compute the Wasserstein 1D with torch backend
loss = (1 - t) * wasserstein_1d(x_torch, x_torch, a_torch.detach(), bary_torch, p=2) + t * wasserstein_1d(x_torch, x_torch, b_torch, bary_torch, p=2)
# record the corresponding loss value
loss_iter.append(loss.clone().detach().cpu().numpy())
loss.backward()
# performs a step of projected gradient descent
with torch.no_grad():
grad = bary_torch.grad
bary_torch -= bary_torch.grad * lr # step
bary_torch.grad.zero_()
bary_torch.data = proj_simplex(bary_torch) # projection onto the simplex
pl.figure(3, figsize=(8, 4))
pl.plot(x, a, 'b', label='Source distribution')
pl.plot(x, b, 'r', label='Target distribution')
pl.plot(x, bary_torch.clone().detach().cpu().numpy(), c='green', label='W barycenter')
pl.legend()
pl.title('Wasserstein barycenter computed by gradient descent')
pl.show()
pl.figure(4)
pl.plot(range(nb_iter_max), loss_iter, lw=3)
pl.title('Evolution of the loss along iterations', fontsize=16)
pl.show()
| 1,764 |
765 | /*
* Copyright (c) 2014 The University of Wisconsin
*
* Copyright (c) 2006 INRIA (Institut National de Recherche en
* Informatique et en Automatique / French National Research Institute
* for Computer Science and Applied Mathematics)
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met: redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer;
* redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution;
* neither the name of the copyright holders nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/* @file
* Implementation of a TAGE branch predictor
*/
#include "cpu/pred/tage.hh"
#include "base/intmath.hh"
#include "base/logging.hh"
#include "base/random.hh"
#include "base/trace.hh"
#include "debug/Fetch.hh"
#include "debug/Tage.hh"
namespace gem5
{
namespace branch_prediction
{
TAGE::TAGE(const TAGEParams ¶ms) : BPredUnit(params), tage(params.tage)
{
}
// PREDICTOR UPDATE
void
TAGE::update(ThreadID tid, Addr branch_pc, bool taken, void* bp_history,
bool squashed, const StaticInstPtr & inst, Addr corrTarget)
{
assert(bp_history);
TageBranchInfo *bi = static_cast<TageBranchInfo*>(bp_history);
TAGEBase::BranchInfo *tage_bi = bi->tageBranchInfo;
if (squashed) {
// This restores the global history, then update it
// and recomputes the folded histories.
tage->squash(tid, taken, tage_bi, corrTarget);
return;
}
int nrand = random_mt.random<int>() & 3;
if (bi->tageBranchInfo->condBranch) {
DPRINTF(Tage, "Updating tables for branch:%lx; taken?:%d\n",
branch_pc, taken);
tage->updateStats(taken, bi->tageBranchInfo);
tage->condBranchUpdate(tid, branch_pc, taken, tage_bi, nrand,
corrTarget, bi->tageBranchInfo->tagePred);
}
// optional non speculative update of the histories
tage->updateHistories(tid, branch_pc, taken, tage_bi, false, inst,
corrTarget);
delete bi;
}
void
TAGE::squash(ThreadID tid, void *bp_history)
{
TageBranchInfo *bi = static_cast<TageBranchInfo*>(bp_history);
DPRINTF(Tage, "Deleting branch info: %lx\n", bi->tageBranchInfo->branchPC);
delete bi;
}
bool
TAGE::predict(ThreadID tid, Addr branch_pc, bool cond_branch, void* &b)
{
TageBranchInfo *bi = new TageBranchInfo(*tage);//nHistoryTables+1);
b = (void*)(bi);
return tage->tagePredict(tid, branch_pc, cond_branch, bi->tageBranchInfo);
}
bool
TAGE::lookup(ThreadID tid, Addr branch_pc, void* &bp_history)
{
bool retval = predict(tid, branch_pc, true, bp_history);
TageBranchInfo *bi = static_cast<TageBranchInfo*>(bp_history);
DPRINTF(Tage, "Lookup branch: %lx; predict:%d\n", branch_pc, retval);
tage->updateHistories(tid, branch_pc, retval, bi->tageBranchInfo, true);
return retval;
}
void
TAGE::btbUpdate(ThreadID tid, Addr branch_pc, void* &bp_history)
{
TageBranchInfo *bi = static_cast<TageBranchInfo*>(bp_history);
tage->btbUpdate(tid, branch_pc, bi->tageBranchInfo);
}
void
TAGE::uncondBranch(ThreadID tid, Addr br_pc, void* &bp_history)
{
DPRINTF(Tage, "UnConditionalBranch: %lx\n", br_pc);
predict(tid, br_pc, false, bp_history);
TageBranchInfo *bi = static_cast<TageBranchInfo*>(bp_history);
tage->updateHistories(tid, br_pc, true, bi->tageBranchInfo, true);
}
} // namespace branch_prediction
} // namespace gem5
| 1,705 |
777 | <reponame>breandan/graphviz-java<filename>graphviz-java/src/test/java/guru/nidi/graphviz/attribute/validate/ColorListDatatypeTest.java
/*
* Copyright © 2015 <NAME> (<EMAIL>)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package guru.nidi.graphviz.attribute.validate;
import org.junit.jupiter.api.Test;
class ColorListDatatypeTest extends DatatypeTestBase {
ColorListDatatypeTest() {
super(new ColorListDatatype());
}
@Test
void colorListOk() {
assertOk("#12af 44");
assertOk("#12af 44;.5");
assertOk("#12af 44:blu");
assertOk("#12af 44;1:blu;0");
}
@Test
void colorListNok() {
assertMessage("has the invalid color value '#12'.", "#12;1");
assertMessage("has the invalid color factor 'a' in '#121314;a'.", "#121314;a");
assertMessage("is missing color factor after ';' in '#121314;'.", "#121314;");
assertMessage("has a color factor '2' not between 0 and 1 in '#121314;2'.", "#121314;2");
assertMessage("has a sum of factors 2.0 > 1 in 'blu;1:red;1'.", "blu;1:red;1");
}
}
| 592 |
412 | /* FUNCTION: __CPROVER_danger_execute */
#ifndef __CPROVER_cegis_number_of_vars
#define __CPROVER_cegis_number_of_vars 2
#endif
#ifndef __CPROVER_cegis_number_of_consts
#define __CPROVER_cegis_number_of_consts 1
#endif
#ifndef __CPROVER_cegis_number_of_ops
#define __CPROVER_cegis_number_of_ops 3
#endif
#ifndef __CPROVER_cegis_max_solution_size
#define __CPROVER_cegis_max_solution_size 1
#endif
const void *__CPROVER_cegis_OPS[__CPROVER_cegis_number_of_ops];
void *__CPROVER_cegis_RESULT_OPS[__CPROVER_cegis_max_solution_size];
typedef unsigned char opcodet;
typedef unsigned char opt;
struct __CPROVER_cegis_instructiont
{
opcodet opcode;
opt op0;
opt op1;
opt op2;
};
#define __CPROVER_cegis_max_instruction 24u
void __CPROVER_danger_execute(struct __CPROVER_cegis_instructiont *program,
unsigned char size)
{
for (unsigned char i = 0; i < size; ++i)
{
#define opcode program[i].opcode
__CPROVER_assume(opcode <= __CPROVER_cegis_max_instruction);
const unsigned int op0_id=program[i].op0;
const unsigned int op1_id=program[i].op1;
const unsigned int op2_id=program[i].op2;
const unsigned int max_op_index=__CPROVER_cegis_number_of_vars + i;
__CPROVER_assume(op0_id < max_op_index && op1_id < max_op_index && op2_id < max_op_index
&& (op0_id >= __CPROVER_cegis_number_of_consts || op1_id >= __CPROVER_cegis_number_of_consts || op2_id >= __CPROVER_cegis_number_of_consts)
&& (opcode > 5u || op0_id <= op1_id) && (opcode < 21u || !op1_id)
&& (opcode == 9u || !op2_id)
&& (opcode != 9u || op0_id != op2_id || op1_id <= op2_id));
const unsigned int * const op0_ptr=__CPROVER_cegis_OPS[op0_id];
const unsigned int * const op1_ptr=__CPROVER_cegis_OPS[op1_id];
const unsigned int * const op2_ptr=__CPROVER_cegis_OPS[op2_id];
__CPROVER_assume(op0_ptr && op1_ptr && op2_ptr); // No null pointers in op array
const unsigned int op0=*op0_ptr;
const unsigned int op1=*op1_ptr;
__CPROVER_assume((opcode != 19 && opcode != 20) || op1); // Avoid div by 0.
const unsigned int op2=*op2_ptr;
#define sop0 ((int) op0)
#define sop1 ((int) op1)
#define sop2 ((int) op2)
unsigned int result;
if (opcode < 15)
if (opcode < 7)
if (opcode < 3)
if (opcode < 1)
__CPROVER_cegis_opcode_0: result=op0 + op1;
else if (opcode < 2)
__CPROVER_cegis_opcode_1: result=op0 * op1;
else
__CPROVER_cegis_opcode_2: result=op0 &op1;
else
if (opcode < 5)
if (opcode < 4)
__CPROVER_cegis_opcode_3: result=op0 | op1;
else
__CPROVER_cegis_opcode_4: result=op0 ^ op1;
else if (opcode < 6)
__CPROVER_cegis_opcode_5: result=op0 != op1;
else
__CPROVER_cegis_opcode_6: result=!op0 || op1;
else
if (opcode < 11)
if (opcode < 9)
if (opcode < 8)
{
__CPROVER_cegis_opcode_first_7: result=op0 < op1;
if (result) result=op0;
else __CPROVER_cegis_opcode_last_7: result=op1;
}
else
{
__CPROVER_cegis_opcode_first_8: result=op0 > op1;
if (result) result=op0;
else __CPROVER_cegis_opcode_last_8: result=op1;
}
else if (opcode < 10)
{
__CPROVER_cegis_opcode_first_9: if (op0) result=op1;
else __CPROVER_cegis_opcode_last_9: result=op2;
}
else
__CPROVER_cegis_opcode_10: result=op0 - op1;
else
if (opcode < 13)
if (opcode < 12)
{
__CPROVER_cegis_opcode_first_11: result=op1;
//result%=sizeof(op0);
result%=32u;
__CPROVER_cegis_opcode_last_11: result=op0 << result;
}
else
{
__CPROVER_cegis_opcode_first_12: result=op1;
//result%=sizeof(op0);
result%=32u;
__CPROVER_cegis_opcode_last_12: result=op0 >> result;
}
else if (opcode < 14)
{
__CPROVER_cegis_opcode_first_13: result=op1;
//result%=sizeof(op0);
result%=32u;
__CPROVER_cegis_opcode_last_13: result=op0 >> result;
}
else
{
__CPROVER_cegis_opcode_first_14: result=op1;
//result%=sizeof(op0);
result%=32u;
__CPROVER_cegis_opcode_last_14: result=sop0 >> result;
}
else if (opcode < 19)
if (opcode < 17)
if (opcode < 16)
__CPROVER_cegis_opcode_15: result=op0 <= op1;
else
__CPROVER_cegis_opcode_16: result=op0 < op1;
else if (opcode < 18)
__CPROVER_cegis_opcode_17: result=sop0 <= sop1;
else
__CPROVER_cegis_opcode_18: result=sop0 < sop1;
else if (opcode < 23)
if (opcode < 21)
if (opcode < 20)
__CPROVER_cegis_opcode_19: result=op0 / op1;
else
__CPROVER_cegis_opcode_20: result=op0 % op1;
else if (opcode < 22)
__CPROVER_cegis_opcode_21: result=-op0;
else
__CPROVER_cegis_opcode_22: result=~op0;
else if (opcode < 24)
//__CPROVER_cegis_opcode_23: result=0u;
__CPROVER_cegis_opcode_23: result=sop0 == -1;
else
__CPROVER_cegis_opcode_24: result=op0;
//__CPROVER_cegis_opcode_24: result=sop0 != -1;
*(unsigned int *)__CPROVER_cegis_RESULT_OPS[i]=result;
}
}
| 2,698 |
2,151 | // Copyright 2017 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef CHROMEOS_SERVICES_DEVICE_SYNC_DEVICE_SYNC_IMPL_H_
#define CHROMEOS_SERVICES_DEVICE_SYNC_DEVICE_SYNC_IMPL_H_
#include "base/macros.h"
#include "base/memory/ref_counted.h"
#include "base/memory/weak_ptr.h"
#include "chromeos/services/device_sync/public/mojom/device_sync.mojom.h"
#include "components/cryptauth/cryptauth_enrollment_manager.h"
#include "components/cryptauth/cryptauth_gcm_manager.h"
#include "components/cryptauth/remote_device_provider.h"
#include "components/signin/core/browser/account_info.h"
#include "mojo/public/cpp/bindings/binding_set.h"
#include "mojo/public/cpp/bindings/interface_ptr_set.h"
#include "services/preferences/public/cpp/pref_service_factory.h"
#include "services/preferences/public/mojom/preferences.mojom.h"
class PrefService;
namespace base {
class Clock;
} // namespace base
namespace cryptauth {
class CryptAuthClientFactory;
class CryptAuthDeviceManager;
class GcmDeviceInfoProvider;
class SoftwareFeatureManager;
} // namespace cryptauth
namespace gcm {
class GCMDriver;
} // namespace gcm
namespace identity {
class IdentityManager;
} // namespace identity
namespace net {
class URLRequestContextGetter;
} // namespace net
namespace service_manager {
class Connector;
} // namespace service_manager
namespace chromeos {
namespace device_sync {
// Concrete DeviceSync implementation. When DeviceSyncImpl is constructed, it
// starts an initialization flow with the following steps:
// (1) Verify that the primary user is logged in with a valid account ID.
// (2) Connect to the Prefs Service associated with that account.
// (3) Instantiate classes which communicate with the CryptAuth back-end.
// (4) Check enrollment state; if not yet enrolled, enroll the device.
// (5) When enrollment is valid, listen for device sync updates.
class DeviceSyncImpl : public mojom::DeviceSync,
public cryptauth::CryptAuthEnrollmentManager::Observer,
public cryptauth::RemoteDeviceProvider::Observer {
public:
class Factory {
public:
static std::unique_ptr<DeviceSyncImpl> NewInstance(
identity::IdentityManager* identity_manager,
gcm::GCMDriver* gcm_driver,
service_manager::Connector* connector,
cryptauth::GcmDeviceInfoProvider* gcm_device_info_provider,
scoped_refptr<net::URLRequestContextGetter> url_request_context);
static void SetInstanceForTesting(Factory* test_factory);
virtual ~Factory();
virtual std::unique_ptr<DeviceSyncImpl> BuildInstance(
identity::IdentityManager* identity_manager,
gcm::GCMDriver* gcm_driver,
service_manager::Connector* connector,
cryptauth::GcmDeviceInfoProvider* gcm_device_info_provider,
scoped_refptr<net::URLRequestContextGetter> url_request_context);
private:
static Factory* test_factory_instance_;
};
~DeviceSyncImpl() override;
// Binds a request to this implementation. Should be called each time that the
// service receives a request.
void BindRequest(mojom::DeviceSyncRequest request);
protected:
// mojom::DeviceSync:
void AddObserver(mojom::DeviceSyncObserverPtr observer,
AddObserverCallback callback) override;
void ForceEnrollmentNow(ForceEnrollmentNowCallback callback) override;
void ForceSyncNow(ForceSyncNowCallback callback) override;
void GetLocalDeviceMetadata(GetLocalDeviceMetadataCallback callback) override;
void GetSyncedDevices(GetSyncedDevicesCallback callback) override;
void SetSoftwareFeatureState(
const std::string& device_public_key,
cryptauth::SoftwareFeature software_feature,
bool enabled,
bool is_exclusive,
SetSoftwareFeatureStateCallback callback) override;
void FindEligibleDevices(cryptauth::SoftwareFeature software_feature,
FindEligibleDevicesCallback callback) override;
void GetDebugInfo(GetDebugInfoCallback callback) override;
// cryptauth::CryptAuthEnrollmentManager::Observer:
void OnEnrollmentFinished(bool success) override;
// cryptauth::RemoteDeviceProvider::Observer:
void OnSyncDeviceListChanged() override;
private:
friend class DeviceSyncServiceTest;
enum class Status {
FETCHING_ACCOUNT_INFO,
CONNECTING_TO_USER_PREFS,
WAITING_FOR_ENROLLMENT,
READY
};
// Wrapper around preferences code. This class is necessary so that tests can
// override this functionality to use a fake PrefService rather than a real
// connection to the Preferences service.
class PrefConnectionDelegate {
public:
virtual ~PrefConnectionDelegate();
virtual scoped_refptr<PrefRegistrySimple> CreatePrefRegistry();
virtual void ConnectToPrefService(
service_manager::Connector* connector,
scoped_refptr<PrefRegistrySimple> pref_registry,
prefs::ConnectCallback callback);
};
DeviceSyncImpl(
identity::IdentityManager* identity_manager,
gcm::GCMDriver* gcm_driver,
service_manager::Connector* connector,
cryptauth::GcmDeviceInfoProvider* gcm_device_info_provider,
scoped_refptr<net::URLRequestContextGetter> url_request_context,
base::Clock* clock,
std::unique_ptr<PrefConnectionDelegate> pref_connection_delegate);
void ProcessPrimaryAccountInfo(const AccountInfo& primary_account_info);
void ConnectToPrefStore();
void OnConnectedToPrefService(std::unique_ptr<PrefService> pref_service);
void InitializeCryptAuthManagementObjects();
void CompleteInitializationAfterSuccessfulEnrollment();
base::Optional<cryptauth::RemoteDevice> GetSyncedDeviceWithPublicKey(
const std::string& public_key) const;
void OnSetSoftwareFeatureStateSuccess(
const base::RepeatingCallback<void(const base::Optional<std::string>&)>&
callback);
void OnSetSoftwareFeatureStateError(
const base::RepeatingCallback<void(const base::Optional<std::string>&)>&
callback,
const std::string& error);
void OnFindEligibleDevicesSuccess(
const base::RepeatingCallback<
void(const base::Optional<std::string>&,
mojom::FindEligibleDevicesResponsePtr)>& callback,
const std::vector<cryptauth::ExternalDeviceInfo>& eligible_devices,
const std::vector<cryptauth::IneligibleDevice>& ineligible_devices);
void OnFindEligibleDevicesError(
const base::RepeatingCallback<
void(const base::Optional<std::string>&,
mojom::FindEligibleDevicesResponsePtr)>& callback,
const std::string& error);
void SetPrefConnectionDelegateForTesting(
std::unique_ptr<PrefConnectionDelegate> pref_connection_delegate);
identity::IdentityManager* identity_manager_;
gcm::GCMDriver* gcm_driver_;
service_manager::Connector* connector_;
cryptauth::GcmDeviceInfoProvider* gcm_device_info_provider_;
scoped_refptr<net::URLRequestContextGetter> url_request_context_;
base::Clock* clock_;
std::unique_ptr<PrefConnectionDelegate> pref_connection_delegate_;
Status status_;
AccountInfo primary_account_info_;
std::unique_ptr<PrefService> pref_service_;
std::unique_ptr<cryptauth::CryptAuthGCMManager> cryptauth_gcm_manager_;
std::unique_ptr<cryptauth::CryptAuthClientFactory> cryptauth_client_factory_;
std::unique_ptr<cryptauth::CryptAuthEnrollmentManager>
cryptauth_enrollment_manager_;
std::unique_ptr<cryptauth::CryptAuthDeviceManager> cryptauth_device_manager_;
std::unique_ptr<cryptauth::RemoteDeviceProvider> remote_device_provider_;
std::unique_ptr<cryptauth::SoftwareFeatureManager> software_feature_manager_;
mojo::InterfacePtrSet<mojom::DeviceSyncObserver> observers_;
mojo::BindingSet<mojom::DeviceSync> bindings_;
base::WeakPtrFactory<DeviceSyncImpl> weak_ptr_factory_;
DISALLOW_COPY_AND_ASSIGN(DeviceSyncImpl);
};
} // namespace device_sync
} // namespace chromeos
#endif // CHROMEOS_SERVICES_DEVICE_SYNC_DEVICE_SYNC_IMPL_H_
| 2,663 |
659 | from test.apps.openapi.schema import OpenAPIVersion
import pytest
from _pytest.main import ExitCode
import schemathesis
from schemathesis import models
@pytest.fixture(autouse=True)
def unregister_hooks():
yield
schemathesis.hooks.unregister_all()
@pytest.mark.operations("success")
def test_custom_cli_handlers(testdir, cli, schema_url, app):
# When `after_init_cli_run_handlers` redefines handlers
module = testdir.make_importable_pyfile(
hook="""
import click
import schemathesis
from schemathesis.cli.handlers import EventHandler
from schemathesis.runner import events
class SimpleHandler(EventHandler):
def handle_event(self, context, event):
if isinstance(event, events.Finished):
click.echo("Done!")
@schemathesis.hooks.register
def after_init_cli_run_handlers(
context,
handlers,
execution_context
):
handlers[:] = [SimpleHandler()]
"""
)
result = cli.main("--pre-run", module.purebasename, "run", schema_url)
# Then CLI should run successfully
assert result.exit_code == ExitCode.OK, result.stdout
# And the output should contain only the input from the new handler
assert result.stdout.strip() == "Done!"
@pytest.mark.parametrize("openapi_version", (OpenAPIVersion("3.0"),))
@pytest.mark.operations("success")
def test_before_call(testdir, cli, cli_args):
# When the `before_call` hook is registered
module = testdir.make_importable_pyfile(
hook="""
import schemathesis
note = print # To avoid linting error
@schemathesis.hooks.register
def before_call(context, case):
note("\\nBefore!")
case.query = {"q": "42"}
"""
)
result = cli.main("--pre-run", module.purebasename, "run", *cli_args)
assert result.exit_code == ExitCode.OK, result.stdout
# Then it should be called before each `case.call`
assert "Before!" in result.stdout.splitlines()
@pytest.mark.parametrize("openapi_version", (OpenAPIVersion("3.0"),))
@pytest.mark.operations("success")
def test_after_call(testdir, cli, cli_args):
# When the `after_call` hook is registered
# And it modifies the response and making it incorrect
module = testdir.make_importable_pyfile(
hook="""
import schemathesis
import requests
@schemathesis.hooks.register
def after_call(context, case, response):
data = b'{"wrong": 42}'
if isinstance(response, requests.Response):
response._content = data
else:
response.set_data(data)
"""
)
result = cli.main("--pre-run", module.purebasename, "run", *cli_args, "-c", "all")
# Then the tests should fail
assert result.exit_code == ExitCode.TESTS_FAILED, result.stdout
assert 'Response payload: `{"wrong": 42}`' in result.stdout.splitlines()
@pytest.mark.parametrize("openapi_version", (OpenAPIVersion("3.0"),))
@pytest.mark.operations("success")
def test_process_call_kwargs(testdir, cli, cli_args, mocker, app_type):
# When the `process_call_kwargs` hook is registered
# And it modifies `kwargs` by adding a new key there
module = testdir.make_importable_pyfile(
hook="""
import schemathesis
import requests
@schemathesis.hooks.register
def process_call_kwargs(context, case, kwargs):
if case.app is not None:
kwargs["follow_redirects"] = False
else:
kwargs["allow_redirects"] = False
"""
)
if app_type == "real":
spy = mocker.spy(models.Case, "call")
else:
spy = mocker.spy(models.Case, "call_wsgi")
result = cli.main("--pre-run", module.purebasename, "run", *cli_args)
assert result.exit_code == ExitCode.OK, result.stdout
if app_type == "real":
assert spy.call_args[1]["allow_redirects"] is False
else:
assert spy.call_args[1]["follow_redirects"] is False
| 1,493 |
325 | /* SPDX-License-Identifier: BSD-3-Clause
*
* Copyright(c) 2017 Intel Corporation. All rights reserved.
*
* Author: <NAME> <<EMAIL>>
*/
/**
* \file arch/xtensa/include/arch/schedule/task.h
* \brief Arch task header file
* \authors <NAME> <<EMAIL>>
*/
#ifdef __SOF_SCHEDULE_TASK_H__
#ifndef __ARCH_SCHEDULE_TASK_H__
#define __ARCH_SCHEDULE_TASK_H__
#endif /* __ARCH_SCHEDULE_TASK_H__ */
#else
#error "This file shouldn't be included from outside of sof/schedule/task.h"
#endif /* __SOF_SCHEDULE_TASK_H__ */
| 219 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.