max_stars_count
int64 301
224k
| text
stringlengths 6
1.05M
| token_count
int64 3
727k
|
---|---|---|
47,880 | /*
* Copyright (C) 2011 The Guava Authors
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
* in compliance with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
package com.google.common.primitives;
import static com.google.common.base.Preconditions.checkArgument;
import static com.google.common.base.Preconditions.checkNotNull;
import static com.google.common.base.Preconditions.checkPositionIndexes;
import com.google.common.annotations.Beta;
import com.google.common.annotations.GwtCompatible;
import com.google.errorprone.annotations.CanIgnoreReturnValue;
import java.math.BigInteger;
import java.util.Arrays;
import java.util.Comparator;
/**
* Static utility methods pertaining to {@code long} primitives that interpret values as
* <i>unsigned</i> (that is, any negative value {@code x} is treated as the positive value {@code
* 2^64 + x}). The methods for which signedness is not an issue are in {@link Longs}, as well as
* signed versions of methods for which signedness is an issue.
*
* <p>In addition, this class provides several static methods for converting a {@code long} to a
* {@code String} and a {@code String} to a {@code long} that treat the {@code long} as an unsigned
* number.
*
* <p>Users of these utilities must be <i>extremely careful</i> not to mix up signed and unsigned
* {@code long} values. When possible, it is recommended that the {@link UnsignedLong} wrapper class
* be used, at a small efficiency penalty, to enforce the distinction in the type system.
*
* <p>See the Guava User Guide article on <a
* href="https://github.com/google/guava/wiki/PrimitivesExplained#unsigned-support">unsigned
* primitive utilities</a>.
*
* @author <NAME>
* @author <NAME>
* @author <NAME>
* @since 10.0
*/
@Beta
@GwtCompatible
@ElementTypesAreNonnullByDefault
public final class UnsignedLongs {
private UnsignedLongs() {}
public static final long MAX_VALUE = -1L; // Equivalent to 2^64 - 1
/**
* A (self-inverse) bijection which converts the ordering on unsigned longs to the ordering on
* longs, that is, {@code a <= b} as unsigned longs if and only if {@code flip(a) <= flip(b)} as
* signed longs.
*/
private static long flip(long a) {
return a ^ Long.MIN_VALUE;
}
/**
* Compares the two specified {@code long} values, treating them as unsigned values between {@code
* 0} and {@code 2^64 - 1} inclusive.
*
* <p><b>Java 8 users:</b> use {@link Long#compareUnsigned(long, long)} instead.
*
* @param a the first unsigned {@code long} to compare
* @param b the second unsigned {@code long} to compare
* @return a negative value if {@code a} is less than {@code b}; a positive value if {@code a} is
* greater than {@code b}; or zero if they are equal
*/
public static int compare(long a, long b) {
return Longs.compare(flip(a), flip(b));
}
/**
* Returns the least value present in {@code array}, treating values as unsigned.
*
* @param array a <i>nonempty</i> array of unsigned {@code long} values
* @return the value present in {@code array} that is less than or equal to every other value in
* the array according to {@link #compare}
* @throws IllegalArgumentException if {@code array} is empty
*/
public static long min(long... array) {
checkArgument(array.length > 0);
long min = flip(array[0]);
for (int i = 1; i < array.length; i++) {
long next = flip(array[i]);
if (next < min) {
min = next;
}
}
return flip(min);
}
/**
* Returns the greatest value present in {@code array}, treating values as unsigned.
*
* @param array a <i>nonempty</i> array of unsigned {@code long} values
* @return the value present in {@code array} that is greater than or equal to every other value
* in the array according to {@link #compare}
* @throws IllegalArgumentException if {@code array} is empty
*/
public static long max(long... array) {
checkArgument(array.length > 0);
long max = flip(array[0]);
for (int i = 1; i < array.length; i++) {
long next = flip(array[i]);
if (next > max) {
max = next;
}
}
return flip(max);
}
/**
* Returns a string containing the supplied unsigned {@code long} values separated by {@code
* separator}. For example, {@code join("-", 1, 2, 3)} returns the string {@code "1-2-3"}.
*
* @param separator the text that should appear between consecutive values in the resulting string
* (but not at the start or end)
* @param array an array of unsigned {@code long} values, possibly empty
*/
public static String join(String separator, long... array) {
checkNotNull(separator);
if (array.length == 0) {
return "";
}
// For pre-sizing a builder, just get the right order of magnitude
StringBuilder builder = new StringBuilder(array.length * 5);
builder.append(toString(array[0]));
for (int i = 1; i < array.length; i++) {
builder.append(separator).append(toString(array[i]));
}
return builder.toString();
}
/**
* Returns a comparator that compares two arrays of unsigned {@code long} values <a
* href="http://en.wikipedia.org/wiki/Lexicographical_order">lexicographically</a>. That is, it
* compares, using {@link #compare(long, long)}), the first pair of values that follow any common
* prefix, or when one array is a prefix of the other, treats the shorter array as the lesser. For
* example, {@code [] < [1L] < [1L, 2L] < [2L] < [1L << 63]}.
*
* <p>The returned comparator is inconsistent with {@link Object#equals(Object)} (since arrays
* support only identity equality), but it is consistent with {@link Arrays#equals(long[],
* long[])}.
*/
public static Comparator<long[]> lexicographicalComparator() {
return LexicographicalComparator.INSTANCE;
}
enum LexicographicalComparator implements Comparator<long[]> {
INSTANCE;
@Override
public int compare(long[] left, long[] right) {
int minLength = Math.min(left.length, right.length);
for (int i = 0; i < minLength; i++) {
if (left[i] != right[i]) {
return UnsignedLongs.compare(left[i], right[i]);
}
}
return left.length - right.length;
}
@Override
public String toString() {
return "UnsignedLongs.lexicographicalComparator()";
}
}
/**
* Sorts the array, treating its elements as unsigned 64-bit integers.
*
* @since 23.1
*/
public static void sort(long[] array) {
checkNotNull(array);
sort(array, 0, array.length);
}
/**
* Sorts the array between {@code fromIndex} inclusive and {@code toIndex} exclusive, treating its
* elements as unsigned 64-bit integers.
*
* @since 23.1
*/
public static void sort(long[] array, int fromIndex, int toIndex) {
checkNotNull(array);
checkPositionIndexes(fromIndex, toIndex, array.length);
for (int i = fromIndex; i < toIndex; i++) {
array[i] = flip(array[i]);
}
Arrays.sort(array, fromIndex, toIndex);
for (int i = fromIndex; i < toIndex; i++) {
array[i] = flip(array[i]);
}
}
/**
* Sorts the elements of {@code array} in descending order, interpreting them as unsigned 64-bit
* integers.
*
* @since 23.1
*/
public static void sortDescending(long[] array) {
checkNotNull(array);
sortDescending(array, 0, array.length);
}
/**
* Sorts the elements of {@code array} between {@code fromIndex} inclusive and {@code toIndex}
* exclusive in descending order, interpreting them as unsigned 64-bit integers.
*
* @since 23.1
*/
public static void sortDescending(long[] array, int fromIndex, int toIndex) {
checkNotNull(array);
checkPositionIndexes(fromIndex, toIndex, array.length);
for (int i = fromIndex; i < toIndex; i++) {
array[i] ^= Long.MAX_VALUE;
}
Arrays.sort(array, fromIndex, toIndex);
for (int i = fromIndex; i < toIndex; i++) {
array[i] ^= Long.MAX_VALUE;
}
}
/**
* Returns dividend / divisor, where the dividend and divisor are treated as unsigned 64-bit
* quantities.
*
* <p><b>Java 8 users:</b> use {@link Long#divideUnsigned(long, long)} instead.
*
* @param dividend the dividend (numerator)
* @param divisor the divisor (denominator)
* @throws ArithmeticException if divisor is 0
*/
public static long divide(long dividend, long divisor) {
if (divisor < 0) { // i.e., divisor >= 2^63:
if (compare(dividend, divisor) < 0) {
return 0; // dividend < divisor
} else {
return 1; // dividend >= divisor
}
}
// Optimization - use signed division if dividend < 2^63
if (dividend >= 0) {
return dividend / divisor;
}
/*
* Otherwise, approximate the quotient, check, and correct if necessary. Our approximation is
* guaranteed to be either exact or one less than the correct value. This follows from fact that
* floor(floor(x)/i) == floor(x/i) for any real x and integer i != 0. The proof is not quite
* trivial.
*/
long quotient = ((dividend >>> 1) / divisor) << 1;
long rem = dividend - quotient * divisor;
return quotient + (compare(rem, divisor) >= 0 ? 1 : 0);
}
/**
* Returns dividend % divisor, where the dividend and divisor are treated as unsigned 64-bit
* quantities.
*
* <p><b>Java 8 users:</b> use {@link Long#remainderUnsigned(long, long)} instead.
*
* @param dividend the dividend (numerator)
* @param divisor the divisor (denominator)
* @throws ArithmeticException if divisor is 0
* @since 11.0
*/
public static long remainder(long dividend, long divisor) {
if (divisor < 0) { // i.e., divisor >= 2^63:
if (compare(dividend, divisor) < 0) {
return dividend; // dividend < divisor
} else {
return dividend - divisor; // dividend >= divisor
}
}
// Optimization - use signed modulus if dividend < 2^63
if (dividend >= 0) {
return dividend % divisor;
}
/*
* Otherwise, approximate the quotient, check, and correct if necessary. Our approximation is
* guaranteed to be either exact or one less than the correct value. This follows from the fact
* that floor(floor(x)/i) == floor(x/i) for any real x and integer i != 0. The proof is not
* quite trivial.
*/
long quotient = ((dividend >>> 1) / divisor) << 1;
long rem = dividend - quotient * divisor;
return rem - (compare(rem, divisor) >= 0 ? divisor : 0);
}
/**
* Returns the unsigned {@code long} value represented by the given decimal string.
*
* <p><b>Java 8 users:</b> use {@link Long#parseUnsignedLong(String)} instead.
*
* @throws NumberFormatException if the string does not contain a valid unsigned {@code long}
* value
* @throws NullPointerException if {@code string} is null (in contrast to {@link
* Long#parseLong(String)})
*/
@CanIgnoreReturnValue
public static long parseUnsignedLong(String string) {
return parseUnsignedLong(string, 10);
}
/**
* Returns the unsigned {@code long} value represented by a string with the given radix.
*
* <p><b>Java 8 users:</b> use {@link Long#parseUnsignedLong(String, int)} instead.
*
* @param string the string containing the unsigned {@code long} representation to be parsed.
* @param radix the radix to use while parsing {@code string}
* @throws NumberFormatException if the string does not contain a valid unsigned {@code long} with
* the given radix, or if {@code radix} is not between {@link Character#MIN_RADIX} and {@link
* Character#MAX_RADIX}.
* @throws NullPointerException if {@code string} is null (in contrast to {@link
* Long#parseLong(String)})
*/
@CanIgnoreReturnValue
public static long parseUnsignedLong(String string, int radix) {
checkNotNull(string);
if (string.length() == 0) {
throw new NumberFormatException("empty string");
}
if (radix < Character.MIN_RADIX || radix > Character.MAX_RADIX) {
throw new NumberFormatException("illegal radix: " + radix);
}
int maxSafePos = ParseOverflowDetection.maxSafeDigits[radix] - 1;
long value = 0;
for (int pos = 0; pos < string.length(); pos++) {
int digit = Character.digit(string.charAt(pos), radix);
if (digit == -1) {
throw new NumberFormatException(string);
}
if (pos > maxSafePos && ParseOverflowDetection.overflowInParse(value, digit, radix)) {
throw new NumberFormatException("Too large for unsigned long: " + string);
}
value = (value * radix) + digit;
}
return value;
}
/**
* Returns the unsigned {@code long} value represented by the given string.
*
* <p>Accepts a decimal, hexadecimal, or octal number given by specifying the following prefix:
*
* <ul>
* <li>{@code 0x}<i>HexDigits</i>
* <li>{@code 0X}<i>HexDigits</i>
* <li>{@code #}<i>HexDigits</i>
* <li>{@code 0}<i>OctalDigits</i>
* </ul>
*
* @throws NumberFormatException if the string does not contain a valid unsigned {@code long}
* value
* @since 13.0
*/
@CanIgnoreReturnValue
public static long decode(String stringValue) {
ParseRequest request = ParseRequest.fromString(stringValue);
try {
return parseUnsignedLong(request.rawValue, request.radix);
} catch (NumberFormatException e) {
NumberFormatException decodeException =
new NumberFormatException("Error parsing value: " + stringValue);
decodeException.initCause(e);
throw decodeException;
}
}
/*
* We move the static constants into this class so ProGuard can inline UnsignedLongs entirely
* unless the user is actually calling a parse method.
*/
private static final class ParseOverflowDetection {
private ParseOverflowDetection() {}
// calculated as 0xffffffffffffffff / radix
static final long[] maxValueDivs = new long[Character.MAX_RADIX + 1];
static final int[] maxValueMods = new int[Character.MAX_RADIX + 1];
static final int[] maxSafeDigits = new int[Character.MAX_RADIX + 1];
static {
BigInteger overflow = new BigInteger("10000000000000000", 16);
for (int i = Character.MIN_RADIX; i <= Character.MAX_RADIX; i++) {
maxValueDivs[i] = divide(MAX_VALUE, i);
maxValueMods[i] = (int) remainder(MAX_VALUE, i);
maxSafeDigits[i] = overflow.toString(i).length() - 1;
}
}
/**
* Returns true if (current * radix) + digit is a number too large to be represented by an
* unsigned long. This is useful for detecting overflow while parsing a string representation of
* a number. Does not verify whether supplied radix is valid, passing an invalid radix will give
* undefined results or an ArrayIndexOutOfBoundsException.
*/
static boolean overflowInParse(long current, int digit, int radix) {
if (current >= 0) {
if (current < maxValueDivs[radix]) {
return false;
}
if (current > maxValueDivs[radix]) {
return true;
}
// current == maxValueDivs[radix]
return (digit > maxValueMods[radix]);
}
// current < 0: high bit is set
return true;
}
}
/**
* Returns a string representation of x, where x is treated as unsigned.
*
* <p><b>Java 8 users:</b> use {@link Long#toUnsignedString(long)} instead.
*/
public static String toString(long x) {
return toString(x, 10);
}
/**
* Returns a string representation of {@code x} for the given radix, where {@code x} is treated as
* unsigned.
*
* <p><b>Java 8 users:</b> use {@link Long#toUnsignedString(long, int)} instead.
*
* @param x the value to convert to a string.
* @param radix the radix to use while working with {@code x}
* @throws IllegalArgumentException if {@code radix} is not between {@link Character#MIN_RADIX}
* and {@link Character#MAX_RADIX}.
*/
public static String toString(long x, int radix) {
checkArgument(
radix >= Character.MIN_RADIX && radix <= Character.MAX_RADIX,
"radix (%s) must be between Character.MIN_RADIX and Character.MAX_RADIX",
radix);
if (x == 0) {
// Simply return "0"
return "0";
} else if (x > 0) {
return Long.toString(x, radix);
} else {
char[] buf = new char[64];
int i = buf.length;
if ((radix & (radix - 1)) == 0) {
// Radix is a power of two so we can avoid division.
int shift = Integer.numberOfTrailingZeros(radix);
int mask = radix - 1;
do {
buf[--i] = Character.forDigit(((int) x) & mask, radix);
x >>>= shift;
} while (x != 0);
} else {
// Separate off the last digit using unsigned division. That will leave
// a number that is nonnegative as a signed integer.
long quotient;
if ((radix & 1) == 0) {
// Fast path for the usual case where the radix is even.
quotient = (x >>> 1) / (radix >>> 1);
} else {
quotient = divide(x, radix);
}
long rem = x - quotient * radix;
buf[--i] = Character.forDigit((int) rem, radix);
x = quotient;
// Simple modulo/division approach
while (x > 0) {
buf[--i] = Character.forDigit((int) (x % radix), radix);
x /= radix;
}
}
// Generate string
return new String(buf, i, buf.length - i);
}
}
}
| 6,496 |
17,703 | #pragma once
#include "envoy/upstream/retry.h"
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "retry_host_predicate.h"
namespace Envoy {
namespace Upstream {
using ::testing::NiceMock;
class TestRetryHostPredicateFactory : public RetryHostPredicateFactory {
public:
RetryHostPredicateSharedPtr createHostPredicate(const Protobuf::Message&, uint32_t) override {
return std::make_shared<NiceMock<MockRetryHostPredicate>>();
}
std::string name() const override { return "envoy.test_host_predicate"; }
ProtobufTypes::MessagePtr createEmptyConfigProto() override {
// Using Struct instead of a custom per-filter empty config proto
// This is only allowed in tests.
return ProtobufTypes::MessagePtr{new Envoy::ProtobufWkt::Struct()};
}
};
} // namespace Upstream
} // namespace Envoy
| 273 |
4,606 | # pylint: disable=unused-argument
from dagster import In, job, op, root_input_manager
def read_dataframe_from_table(**_kwargs):
pass
# start_marker
@op(ins={"dataframe": In(root_manager_key="my_root_manager")})
def my_op(dataframe):
"""Do some stuff"""
@root_input_manager
def table1_loader(_):
return read_dataframe_from_table(name="table1")
@job(resource_defs={"my_root_manager": table1_loader})
def my_job():
my_op()
# end_marker
| 178 |
903 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.lucene.codecs;
import java.io.Closeable;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import org.apache.lucene.index.DocIDMerger;
import org.apache.lucene.index.FieldInfo;
import org.apache.lucene.index.MergeState;
import org.apache.lucene.index.NumericDocValues;
import org.apache.lucene.index.SegmentWriteState;
/**
* Abstract API that consumes normalization values. Concrete implementations of this actually do
* "something" with the norms (write it into the index in a specific format).
*
* <p>The lifecycle is:
*
* <ol>
* <li>NormsConsumer is created by {@link NormsFormat#normsConsumer(SegmentWriteState)}.
* <li>{@link #addNormsField} is called for each field with normalization values. The API is a
* "pull" rather than "push", and the implementation is free to iterate over the values
* multiple times ({@link Iterable#iterator()}).
* <li>After all fields are added, the consumer is {@link #close}d.
* </ol>
*
* @lucene.experimental
*/
public abstract class NormsConsumer implements Closeable {
/** Sole constructor. (For invocation by subclass constructors, typically implicit.) */
protected NormsConsumer() {}
/**
* Writes normalization values for a field.
*
* @param field field information
* @param normsProducer NormsProducer of the numeric norm values
* @throws IOException if an I/O error occurred.
*/
public abstract void addNormsField(FieldInfo field, NormsProducer normsProducer)
throws IOException;
/**
* Merges in the fields from the readers in <code>mergeState</code>. The default implementation
* calls {@link #mergeNormsField} for each field, filling segments with missing norms for the
* field with zeros. Implementations can override this method for more sophisticated merging
* (bulk-byte copying, etc).
*/
public void merge(MergeState mergeState) throws IOException {
for (NormsProducer normsProducer : mergeState.normsProducers) {
if (normsProducer != null) {
normsProducer.checkIntegrity();
}
}
for (FieldInfo mergeFieldInfo : mergeState.mergeFieldInfos) {
if (mergeFieldInfo.hasNorms()) {
mergeNormsField(mergeFieldInfo, mergeState);
}
}
}
/** Tracks state of one numeric sub-reader that we are merging */
private static class NumericDocValuesSub extends DocIDMerger.Sub {
private final NumericDocValues values;
public NumericDocValuesSub(MergeState.DocMap docMap, NumericDocValues values) {
super(docMap);
this.values = values;
assert values.docID() == -1;
}
@Override
public int nextDoc() throws IOException {
return values.nextDoc();
}
}
/**
* Merges the norms from <code>toMerge</code>.
*
* <p>The default implementation calls {@link #addNormsField}, passing an Iterable that merges and
* filters deleted documents on the fly.
*/
public void mergeNormsField(final FieldInfo mergeFieldInfo, final MergeState mergeState)
throws IOException {
// TODO: try to share code with default merge of DVConsumer by passing MatchAllBits ?
addNormsField(
mergeFieldInfo,
new NormsProducer() {
@Override
public NumericDocValues getNorms(FieldInfo fieldInfo) throws IOException {
if (fieldInfo != mergeFieldInfo) {
throw new IllegalArgumentException("wrong fieldInfo");
}
List<NumericDocValuesSub> subs = new ArrayList<>();
assert mergeState.docMaps.length == mergeState.docValuesProducers.length;
for (int i = 0; i < mergeState.docValuesProducers.length; i++) {
NumericDocValues norms = null;
NormsProducer normsProducer = mergeState.normsProducers[i];
if (normsProducer != null) {
FieldInfo readerFieldInfo = mergeState.fieldInfos[i].fieldInfo(mergeFieldInfo.name);
if (readerFieldInfo != null && readerFieldInfo.hasNorms()) {
norms = normsProducer.getNorms(readerFieldInfo);
}
}
if (norms != null) {
subs.add(new NumericDocValuesSub(mergeState.docMaps[i], norms));
}
}
final DocIDMerger<NumericDocValuesSub> docIDMerger =
DocIDMerger.of(subs, mergeState.needsIndexSort);
return new NumericDocValues() {
private int docID = -1;
private NumericDocValuesSub current;
@Override
public int docID() {
return docID;
}
@Override
public int nextDoc() throws IOException {
current = docIDMerger.next();
if (current == null) {
docID = NO_MORE_DOCS;
} else {
docID = current.mappedDocID;
}
return docID;
}
@Override
public int advance(int target) throws IOException {
throw new UnsupportedOperationException();
}
@Override
public boolean advanceExact(int target) throws IOException {
throw new UnsupportedOperationException();
}
@Override
public long cost() {
return 0;
}
@Override
public long longValue() throws IOException {
return current.values.longValue();
}
};
}
@Override
public void checkIntegrity() {}
@Override
public void close() {}
});
}
}
| 2,543 |
347 | <reponame>hbraha/ovirt-engine<gh_stars>100-1000
package org.ovirt.engine.ui.webadmin.section.main.view;
import java.util.HashMap;
import java.util.Map;
import org.ovirt.engine.core.common.businessentities.Quota;
import org.ovirt.engine.core.common.businessentities.QuotaCluster;
import org.ovirt.engine.core.common.businessentities.QuotaStorage;
import org.ovirt.engine.core.common.utils.SizeConverter;
import org.ovirt.engine.core.searchbackend.QuotaConditionFieldAutoCompleter;
import org.ovirt.engine.ui.common.idhandler.ElementIdHandler;
import org.ovirt.engine.ui.common.presenter.FragmentParams;
import org.ovirt.engine.ui.common.uicommon.model.MainModelProvider;
import org.ovirt.engine.ui.common.widget.renderer.DiskSizeRenderer;
import org.ovirt.engine.ui.common.widget.table.column.AbstractLinkColumn;
import org.ovirt.engine.ui.common.widget.table.column.AbstractTextColumn;
import org.ovirt.engine.ui.uicommonweb.models.quota.QuotaListModel;
import org.ovirt.engine.ui.uicommonweb.place.WebAdminApplicationPlaces;
import org.ovirt.engine.ui.webadmin.ApplicationConstants;
import org.ovirt.engine.ui.webadmin.ApplicationMessages;
import org.ovirt.engine.ui.webadmin.gin.AssetProvider;
import org.ovirt.engine.ui.webadmin.section.main.presenter.MainQuotaPresenter;
import org.ovirt.engine.ui.webadmin.widget.table.column.AbstractQuotaPercentColumn;
import org.ovirt.engine.ui.webadmin.widget.table.column.QuotaDcStatusColumn;
import com.google.gwt.cell.client.FieldUpdater;
import com.google.gwt.core.client.GWT;
import com.google.gwt.i18n.client.NumberFormat;
import com.google.inject.Inject;
public class MainQuotaView extends AbstractMainWithDetailsTableView<Quota, QuotaListModel> implements MainQuotaPresenter.ViewDef {
private static final NumberFormat decimalFormat = NumberFormat.getDecimalFormat();
private static final DiskSizeRenderer<Number> diskSizeRenderer =
new DiskSizeRenderer<>(SizeConverter.SizeUnit.GiB);
interface ViewIdHandler extends ElementIdHandler<MainQuotaView> {
ViewIdHandler idHandler = GWT.create(ViewIdHandler.class);
}
private static final ApplicationConstants constants = AssetProvider.getConstants();
private static final ApplicationMessages messages = AssetProvider.getMessages();
@Inject
public MainQuotaView(MainModelProvider<Quota, QuotaListModel> modelProvider) {
super(modelProvider);
ViewIdHandler.idHandler.generateAndSetIds(this);
initTable();
initWidget(getTable());
}
void initTable() {
getTable().enableColumnResizing();
QuotaDcStatusColumn dcStatusColumn = new QuotaDcStatusColumn();
dcStatusColumn.setContextMenuTitle(constants.dcStatusQuota());
getTable().addColumn(dcStatusColumn, constants.empty(), "30px"); //$NON-NLS-1$
AbstractTextColumn<Quota> nameColumn = new AbstractLinkColumn<Quota>(new FieldUpdater<Quota, String>() {
@Override
public void update(int index, Quota quota, String value) {
Map<String, String> parameters = new HashMap<>();
parameters.put(FragmentParams.NAME.getName(), quota.getName());
parameters.put(FragmentParams.DATACENTER.getName(), quota.getStoragePoolName());
//The link was clicked, now fire an event to switch to details.
getPlaceTransitionHandler().handlePlaceTransition(
WebAdminApplicationPlaces.quotaClusterSubTabPlace, parameters);
}
}) {
@Override
public String getValue(Quota object) {
return object.getQuotaName() == null ? "" : object.getQuotaName(); //$NON-NLS-1$
}
};
nameColumn.makeSortable(QuotaConditionFieldAutoCompleter.NAME);
getTable().addColumn(nameColumn, constants.nameQuota(), "120px"); //$NON-NLS-1$
AbstractTextColumn<Quota> dataCenterColumn = new AbstractTextColumn<Quota>() {
@Override
public String getValue(Quota object) {
return object.getStoragePoolName() == null ? "" : object.getStoragePoolName();
}
};
dataCenterColumn.makeSortable(QuotaConditionFieldAutoCompleter.STORAGEPOOLNAME);
getTable().addColumn(dataCenterColumn, constants.dcQuota(), "120px"); //$NON-NLS-1$
AbstractTextColumn<Quota> descriptionColumn = new AbstractTextColumn<Quota>() {
@Override
public String getValue(Quota object) {
return object.getDescription() == null ? "" : object.getDescription(); //$NON-NLS-1$
}
};
descriptionColumn.makeSortable(QuotaConditionFieldAutoCompleter.DESCRIPTION);
getTable().addColumn(descriptionColumn, constants.descriptionQuota(), "120px"); //$NON-NLS-1$
getTable().addColumn(new AbstractQuotaPercentColumn<Quota>() {
@Override
protected Integer getProgressValue(Quota object) {
int value;
long allocated = 0;
long used = 0;
if (object.getGlobalQuotaCluster() != null) {
allocated = object.getGlobalQuotaCluster().getMemSizeMB();
used = object.getGlobalQuotaCluster().getMemSizeMBUsage();
} else {
for (QuotaCluster quotaCluster : object.getQuotaClusters()) {
if (!QuotaCluster.UNLIMITED_MEM.equals(quotaCluster.getMemSizeMB())) {
allocated += quotaCluster.getMemSizeMB() != null ? quotaCluster.getMemSizeMB() : 0;
used += quotaCluster.getMemSizeMBUsage();
} else {
allocated = QuotaCluster.UNLIMITED_MEM;
break;
}
}
}
if (allocated == 0) {
return 0;
}
value = (int)(((double)used/allocated) * 100);
return allocated < 0 ? -1 : value > 100 ? 100 : value;
}
},
constants.usedMemoryQuota(), "145px"); //$NON-NLS-1$
getTable().addColumn(new AbstractTextColumn<Quota>() {
@Override
public String getValue(Quota object) {
int value;
long allocated = 0;
long used = 0;
if (object.getGlobalQuotaCluster() != null) {
allocated = object.getGlobalQuotaCluster().getMemSizeMB();
used = object.getGlobalQuotaCluster().getMemSizeMBUsage();
} else {
for (QuotaCluster quotaCluster : object.getQuotaClusters()) {
if (!QuotaCluster.UNLIMITED_MEM.equals(quotaCluster.getMemSizeMB())) {
allocated += quotaCluster.getMemSizeMB() != null ? quotaCluster.getMemSizeMB() : 0;
used += quotaCluster.getMemSizeMBUsage();
} else {
allocated = QuotaCluster.UNLIMITED_MEM;
break;
}
}
}
value = (int)(allocated-used);
String returnVal;
if (allocated < 0) {
returnVal = constants.unlimited();
} else if (value <= 0){
returnVal = messages.mebibytes("0"); //$NON-NLS-1$
} else if (value <= 5*1024) {
returnVal = messages.mebibytes(String.valueOf(value));
} else {
returnVal = messages.gibibytes(decimalFormat.format((double)value/1024));
}
return returnVal;
}
}, constants.freeMemory(), "100px"); //$NON-NLS-1$
getTable().addColumn(new AbstractQuotaPercentColumn<Quota>() {
@Override
protected Integer getProgressValue(Quota object) {
int value;
int allocated = 0;
int used = 0;
if (object.getGlobalQuotaCluster() != null) {
allocated =
object.getGlobalQuotaCluster().getVirtualCpu() != null ? object.getGlobalQuotaCluster()
.getVirtualCpu() : 0;
used = object.getGlobalQuotaCluster().getVirtualCpuUsage();
} else {
for (QuotaCluster quotaCluster : object.getQuotaClusters()) {
if (!QuotaCluster.UNLIMITED_VCPU.equals(quotaCluster.getVirtualCpu())) {
allocated += quotaCluster.getVirtualCpu();
used += quotaCluster.getVirtualCpuUsage();
} else {
allocated = QuotaCluster.UNLIMITED_VCPU;
break;
}
}
}
if (allocated == 0) {
return 0;
}
value = (int)(((double)used/allocated) * 100);
return allocated < 0 ? -1 : value > 100 ? 100 : value;
}
},
constants.runningCpuQuota(), "135px"); //$NON-NLS-1$
getTable().addColumn(new AbstractTextColumn<Quota>() {
@Override
public String getValue(Quota object) {
int value;
int allocated = 0;
int used = 0;
if (object.getGlobalQuotaCluster() != null) {
allocated =
object.getGlobalQuotaCluster().getVirtualCpu() != null ? object.getGlobalQuotaCluster()
.getVirtualCpu() : 0;
used = object.getGlobalQuotaCluster().getVirtualCpuUsage();
} else {
for (QuotaCluster quotaCluster : object.getQuotaClusters()) {
if (!QuotaCluster.UNLIMITED_VCPU.equals(quotaCluster.getVirtualCpu())) {
allocated += quotaCluster.getVirtualCpu();
used += quotaCluster.getVirtualCpuUsage();
} else {
allocated = QuotaCluster.UNLIMITED_VCPU;
break;
}
}
}
value = allocated - used;
String returnVal;
if (allocated < 0) {
returnVal = constants.unlimited();
} else if (value <= 0) {
returnVal = "0"; //$NON-NLS-1$
} else {
returnVal = value + ""; //$NON-NLS-1$
}
return returnVal;
}
}, constants.freeVcpu(), "100px"); //$NON-NLS-1$
getTable().addColumn(new AbstractQuotaPercentColumn<Quota>() {
@Override
protected Integer getProgressValue(Quota object) {
int value;
double allocated = 0;
double used = 0;
if (object.getGlobalQuotaStorage() != null) {
allocated = object.getGlobalQuotaStorage().getStorageSizeGB();
used = object.getGlobalQuotaStorage().getStorageSizeGBUsage();
} else {
for (QuotaStorage quotaStorage : object.getQuotaStorages()) {
if (!QuotaStorage.UNLIMITED.equals(quotaStorage.getStorageSizeGB())) {
allocated += quotaStorage.getStorageSizeGB();
used += quotaStorage.getStorageSizeGBUsage();
} else {
allocated = QuotaStorage.UNLIMITED;
break;
}
}
}
if (allocated == 0) {
return 0;
}
value = (int)((used/allocated) * 100);
return allocated < 0 ? -1 : value > 100 ? 100 : value;
}
},
constants.usedStorageQuota(), "155px"); //$NON-NLS-1$
getTable().addColumn(new AbstractTextColumn<Quota>() {
@Override
public String getValue(Quota object) {
double value;
double allocated = 0;
double used = 0;
if (object.getGlobalQuotaStorage() != null) {
allocated = object.getGlobalQuotaStorage().getStorageSizeGB();
used = object.getGlobalQuotaStorage().getStorageSizeGBUsage();
} else {
for (QuotaStorage quotaStorage : object.getQuotaStorages()) {
if (!QuotaStorage.UNLIMITED.equals(quotaStorage.getStorageSizeGB())) {
allocated += quotaStorage.getStorageSizeGB();
used += quotaStorage.getStorageSizeGBUsage();
} else {
allocated = QuotaStorage.UNLIMITED;
break;
}
}
}
value = allocated - used;
String returnVal;
if (allocated < 0) {
returnVal = constants.unlimited();
} else if (value <= 0) {
returnVal = messages.gibibytes("0"); //$NON-NLS-1$
} else {
returnVal = diskSizeRenderer.render(value);
}
return returnVal;
}
}, constants.freeStorage(), "100px"); //$NON-NLS-1$
}
}
| 6,967 |
313 | {"status_id":6217380061511680,"text":"Otowa jev tumca iru gi egleru hup gambuol balcaha fuk uwpupu nem vugogzuh. #wotjuwu","user":{"user_id":1841614712471552,"name":"<NAME>","screen_name":"@faba","created_at":1159617933,"followers_count":1,"friends_count":21,"favourites_count":22},"created_at":1359492675,"favorite_count":84,"retweet_count":374,"entities":{"hashtags":[{"text":"#wotjuwu","indices":[5,14]}]},"in_reply_to_status_id":null} | 175 |
2,360 | <filename>var/spack/repos/builtin/packages/py-msgpack-numpy/package.py
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyMsgpackNumpy(PythonPackage):
"""This package provides encoding and decoding routines
that enable the serialization and deserialization of
numerical and array data types provided by numpy using the
highly efficient msgpack format. Serialization of Python's
native complex data types is also supported."""
homepage = "https://github.com/lebedov/msgpack-numpy"
pypi = "msgpack-numpy/msgpack-numpy-0.4.7.1.tar.gz"
version('0.4.7.1', sha256='7eaf51acf82d7c467d21aa71df94e1c051b2055e54b755442051b474fa7cf5e1')
version('0.4.7', sha256='8e975dd7dd9eb13cbf5e8cd90af1f12af98706bbeb7acfcbd8d558fd005a85d7')
version('0.4.6', sha256='ef3c5fe3d6cbab5c9db97de7062681c18f82d32a37177aaaf58b483d0336f135')
version('0.4.5', sha256='4e88a4147db70f69dce1556317291e04e5107ee7b93ea300f92f1187120da7ec')
version('0.4.4.3', sha256='c7db37ce01e268190568cf66a6a65d1ad81e3bcfa55dd824103c9b324608a44e')
version('0.4.4.2', sha256='ac3db232710070ac64d8e1c5123550a1c1fef45d77b6789d2170cbfd2ec711f3')
version('0.4.4.1', sha256='b7641ccf9f0f4e91a533e8c7be5e34d3f12ff877480879b252113d65c510eeef')
depends_on('py-setuptools', type='build')
depends_on('[email protected]:', type=('build', 'run'))
depends_on('[email protected]:', type=('build', 'run'))
| 712 |
3,212 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.nifi.nar;
import org.aspectj.lang.ProceedingJoinPoint;
import org.aspectj.lang.annotation.Around;
import org.aspectj.lang.annotation.Aspect;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import static java.nio.file.StandardCopyOption.REPLACE_EXISTING;
/**
* AspectJ aspect to handle native library loading by absolute path in multiple classloaders.
*
* The aspect intercepts the {@link System#load(String)} / {@link Runtime#load(String)} calls and creates a copy of the native library
* in the system temp folder with a unique name, then passes this new path to the original load() method.
* In this way, different classloaders will load different native libraries and the "Native Library ... already loaded in another classloader"
* error can be avoided.
*
* To put it into effect, the AspectJ agent needs to be configured in bootstrap.conf (see the necessary config there, commented out by default).
*
* This aspect handles the native library loading when the library is being loaded by its absolute path.
* For loading a native library by its logical name, see {@link AbstractNativeLibHandlingClassLoader}.
*/
@Aspect
public class LoadNativeLibAspect {
private final Logger logger = LoggerFactory.getLogger(getClass());
@Around("call(void java.lang.System.load(String)) || call(void java.lang.Runtime.load(String))")
public void around(ProceedingJoinPoint joinPoint) throws Throwable {
String origLibPathStr = (String) joinPoint.getArgs()[0];
if (origLibPathStr == null || origLibPathStr.isEmpty()) {
logger.info("Native library path specified as null or empty string, proceeding normally");
joinPoint.proceed();
return;
}
Path origLibPath = Paths.get(origLibPathStr);
if (!Files.exists(origLibPath)) {
logger.info("Native library does not exist, proceeding normally");
joinPoint.proceed();
return;
}
String libFileName = origLibPath.getFileName().toString();
ClassLoader contextClassLoader = Thread.currentThread().getContextClassLoader();
String prefix = contextClassLoader.getClass().getName() + "@" + contextClassLoader.hashCode() + "_";
String suffix = "_" + libFileName;
Path tempLibPath = Files.createTempFile(prefix, suffix);
Files.copy(origLibPath, tempLibPath, REPLACE_EXISTING);
logger.info("Loading native library via absolute path (original lib: {}, copied lib: {}", origLibPath, tempLibPath);
joinPoint.proceed(new Object[]{tempLibPath.toString()});
}
}
| 1,078 |
2,151 | /*
* Copyright (c) 1999
* Silicon Graphics Computer Systems, Inc.
*
* Copyright (c) 1999
* <NAME>
*
* This material is provided "as is", with absolutely no warranty expressed
* or implied. Any use is at your own risk.
*
* Permission to use or copy this software for any purpose is hereby granted
* without fee, provided the above notices are retained on all copies.
* Permission to modify the code and to distribute modified code is granted,
* provided the above notices are retained, and a notice that the code was
* modified is included with the above copyright notice.
*
*/
#include "stlport_prefix.h"
#include <cstdio>
#include <locale>
#include <istream>
#include "c_locale.h"
#include "acquire_release.h"
_STLP_BEGIN_NAMESPACE
_STLP_MOVE_TO_PRIV_NAMESPACE
// default "C" values for month and day names
const char default_dayname[][14] = {
"Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat",
"Sunday", "Monday", "Tuesday", "Wednesday", "Thursday",
"Friday", "Saturday"};
const char default_monthname[][24] = {
"Jan", "Feb", "Mar", "Apr", "May", "Jun",
"Jul", "Aug", "Sep", "Oct", "Nov", "Dec",
"January", "February", "March", "April", "May", "June",
"July", "August", "September", "October", "November", "December"};
#ifndef _STLP_NO_WCHAR_T
const wchar_t default_wdayname[][14] = {
L"Sun", L"Mon", L"Tue", L"Wed", L"Thu", L"Fri", L"Sat",
L"Sunday", L"Monday", L"Tuesday", L"Wednesday", L"Thursday",
L"Friday", L"Saturday"};
const wchar_t default_wmonthname[][24] = {
L"Jan", L"Feb", L"Mar", L"Apr", L"May", L"Jun",
L"Jul", L"Aug", L"Sep", L"Oct", L"Nov", L"Dec",
L"January", L"February", L"March", L"April", L"May", L"June",
L"July", L"August", L"September", L"October", L"November", L"December"};
#endif
#if defined (__BORLANDC__)
_Time_Info time_init<char>::_M_timeinfo;
# ifndef _STLP_NO_WCHAR_T
_WTime_Info time_init<wchar_t>::_M_timeinfo;
# endif
#endif
// _Init_time_info: initialize table with
// "C" values (note these are not defined in the C standard, so this
// is somewhat arbitrary).
static void _Init_timeinfo_base(_Time_Info_Base& table) {
table._M_time_format = "%H:%M:%S";
table._M_date_format = "%m/%d/%y";
table._M_date_time_format = "%m/%d/%y";
}
static void _Init_timeinfo(_Time_Info& table) {
int i;
for (i = 0; i < 14; ++i)
table._M_dayname[i] = default_dayname[i];
for (i = 0; i < 24; ++i)
table._M_monthname[i] = default_monthname[i];
table._M_am_pm[0] = "AM";
table._M_am_pm[1] = "PM";
_Init_timeinfo_base(table);
}
#ifndef _STLP_NO_WCHAR_T
static void _Init_timeinfo(_WTime_Info& table) {
int i;
for (i = 0; i < 14; ++i)
table._M_dayname[i] = default_wdayname[i];
for (i = 0; i < 24; ++i)
table._M_monthname[i] = default_wmonthname[i];
table._M_am_pm[0] = L"AM";
table._M_am_pm[1] = L"PM";
_Init_timeinfo_base(table);
}
#endif
static void _Init_timeinfo_base(_Time_Info_Base& table, _Locale_time * time) {
table._M_time_format = _Locale_t_fmt(time);
if ( table._M_time_format == "%T" ) {
table._M_time_format = "%H:%M:%S";
} else if ( table._M_time_format == "%r" ) {
table._M_time_format = "%I:%M:%S %p";
} else if ( table._M_time_format == "%R" ) {
table._M_time_format = "%H:%M";
}
table._M_date_format = _Locale_d_fmt(time);
table._M_date_time_format = _Locale_d_t_fmt(time);
table._M_long_date_format = _Locale_long_d_fmt(time);
table._M_long_date_time_format = _Locale_long_d_t_fmt(time);
}
static void _Init_timeinfo(_Time_Info& table, _Locale_time * time) {
int i;
for (i = 0; i < 7; ++i)
table._M_dayname[i] = _Locale_abbrev_dayofweek(time, i);
for (i = 0; i < 7; ++i)
table._M_dayname[i+7] = _Locale_full_dayofweek(time, i);
for (i = 0; i < 12; ++i)
table._M_monthname[i] = _Locale_abbrev_monthname(time, i);
for (i = 0; i < 12; ++i)
table._M_monthname[i+12] = _Locale_full_monthname(time, i);
table._M_am_pm[0] = _Locale_am_str(time);
table._M_am_pm[1] = _Locale_pm_str(time);
_Init_timeinfo_base(table, time);
}
#ifndef _STLP_NO_WCHAR_T
static void _Init_timeinfo(_WTime_Info& table, _Locale_time * time) {
wchar_t buf[128];
int i;
for (i = 0; i < 7; ++i)
table._M_dayname[i] = _WLocale_abbrev_dayofweek(time, i, _STLP_ARRAY_AND_SIZE(buf));
for (i = 0; i < 7; ++i)
table._M_dayname[i+7] = _WLocale_full_dayofweek(time, i, _STLP_ARRAY_AND_SIZE(buf));
for (i = 0; i < 12; ++i)
table._M_monthname[i] = _WLocale_abbrev_monthname(time, i, _STLP_ARRAY_AND_SIZE(buf));
for (i = 0; i < 12; ++i)
table._M_monthname[i+12] = _WLocale_full_monthname(time, i, _STLP_ARRAY_AND_SIZE(buf));
table._M_am_pm[0] = _WLocale_am_str(time, _STLP_ARRAY_AND_SIZE(buf));
table._M_am_pm[1] = _WLocale_pm_str(time, _STLP_ARRAY_AND_SIZE(buf));
_Init_timeinfo_base(table, time);
}
#endif
template <class _Ch, class _TimeInfo>
void __subformat(_STLP_BASIC_IOSTRING(_Ch) &buf, const ctype<_Ch>& ct,
const string& format, const _TimeInfo& table, const tm* t) {
const char * cp = format.data();
const char * cp_end = cp + format.size();
while (cp != cp_end) {
if (*cp == '%') {
char mod = 0;
++cp;
if (*cp == '#') {
mod = *cp; ++cp;
}
__write_formatted_timeT(buf, ct, *cp++, mod, table, t);
} else
buf.append(1, *cp++);
}
}
static void __append(__iostring &buf, const string& name)
{ buf.append(name.data(), name.data() + name.size()); }
static void __append(__iowstring &buf, const wstring& name)
{ buf.append(name.data(), name.data() + name.size()); }
static void __append(__iostring &buf, char *first, char *last, const ctype<char>& /* ct */)
{ buf.append(first, last); }
static void __append(__iowstring &buf, char *first, char *last, const ctype<wchar_t>& ct) {
wchar_t _wbuf[64];
ct.widen(first, last, _wbuf);
buf.append(_wbuf, _wbuf + (last - first));
}
#if defined (__GNUC__)
/* The number of days from the first day of the first ISO week of this
year to the year day YDAY with week day WDAY. ISO weeks start on
Monday; the first ISO week has the year's first Thursday. YDAY may
be as small as YDAY_MINIMUM. */
# define __ISO_WEEK_START_WDAY 1 /* Monday */
# define __ISO_WEEK1_WDAY 4 /* Thursday */
# define __YDAY_MINIMUM (-366)
# define __TM_YEAR_BASE 1900
static int
__iso_week_days(int yday, int wday) {
/* Add enough to the first operand of % to make it nonnegative. */
int big_enough_multiple_of_7 = (-__YDAY_MINIMUM / 7 + 2) * 7;
return (yday
- (yday - wday + __ISO_WEEK1_WDAY + big_enough_multiple_of_7) % 7
+ __ISO_WEEK1_WDAY - __ISO_WEEK_START_WDAY);
}
# define __is_leap(year)\
((year) % 4 == 0 && ((year) % 100 != 0 || (year) % 400 == 0))
#endif
#define __hour12(hour) \
(((hour) % 12 == 0) ? (12) : (hour) % 12)
#if !defined (_STLP_USE_SAFE_STRING_FUNCTIONS)
# define _STLP_SPRINTF sprintf
#else
# define _STLP_SPRINTF sprintf_s
#endif
template <class _Ch, class _TimeInfo>
void _STLP_CALL __write_formatted_timeT(_STLP_BASIC_IOSTRING(_Ch) &buf,
const ctype<_Ch>& ct,
char format, char modifier,
const _TimeInfo& table, const tm* t) {
char _buf[64];
char *_bend;
switch (format) {
case 'a':
__append(buf, table._M_dayname[t->tm_wday]);
break;
case 'A':
__append(buf, table._M_dayname[t->tm_wday + 7]);
break;
case 'b':
__append(buf, table._M_monthname[t->tm_mon]);
break;
case 'B':
__append(buf, table._M_monthname[t->tm_mon + 12]);
break;
case 'c':
__subformat(buf, ct, (modifier != '#') ? table._M_date_time_format
: table._M_long_date_time_format, table, t);
break;
case 'd':
_STLP_SPRINTF(_buf, (modifier != '#') ? "%.2ld" : "%ld", (long)t->tm_mday);
__append(buf, _buf, ((long)t->tm_mday < 10L && modifier == '#') ? _buf + 1 : _buf + 2, ct);
break;
case 'e':
_STLP_SPRINTF(_buf, "%2ld", (long)t->tm_mday);
__append(buf, _buf, _buf + 2, ct);
break;
case 'H':
_STLP_SPRINTF(_buf, (modifier != '#') ? "%.2ld" : "%ld", (long)t->tm_hour);
__append(buf, _buf, ((long)t->tm_hour < 10L && modifier == '#') ? _buf + 1 : _buf + 2, ct);
break;
case 'I':
_STLP_SPRINTF(_buf, (modifier != '#') ? "%.2ld" : "%ld", (long)__hour12(t->tm_hour));
__append(buf, _buf, ((long)__hour12(t->tm_hour) < 10L && modifier == '#') ? _buf + 1 : _buf + 2, ct);
break;
case 'j':
_bend = __write_integer(_buf, 0, (long)((long)t->tm_yday + 1));
__append(buf, _buf, _bend, ct);
break;
case 'm':
_STLP_SPRINTF(_buf, (modifier != '#') ? "%.2ld" : "%ld", (long)t->tm_mon + 1);
__append(buf, _buf, ((long)(t->tm_mon + 1) < 10L && modifier == '#') ? _buf + 1 : _buf + 2, ct);
break;
case 'M':
_STLP_SPRINTF(_buf, (modifier != '#') ? "%.2ld" : "%ld", (long)t->tm_min);
__append(buf, _buf, ((long)t->tm_min < 10L && modifier == '#') ? _buf + 1 : _buf + 2, ct);
break;
case 'p':
__append(buf, table._M_am_pm[t->tm_hour / 12]);
break;
case 'S': // pad with zeros
_STLP_SPRINTF(_buf, (modifier != '#') ? "%.2ld" : "%ld", (long)t->tm_sec);
__append(buf, _buf, ((long)t->tm_sec < 10L && modifier == '#') ? _buf + 1 : _buf + 2, ct);
break;
case 'U':
_bend = __write_integer(_buf, 0, long((t->tm_yday - t->tm_wday + 7) / 7));
__append(buf, _buf, _bend, ct);
break;
case 'w':
_bend = __write_integer(_buf, 0, (long)t->tm_wday);
__append(buf, _buf, _bend, ct);
break;
case 'W':
_bend = __write_integer(_buf, 0,
(long)(t->tm_wday == 0 ? (t->tm_yday + 1) / 7 :
(t->tm_yday + 8 - t->tm_wday) / 7));
__append(buf, _buf, _bend, ct);
break;
case'x':
__subformat(buf, ct, (modifier != '#') ? table._M_date_format
: table._M_long_date_format, table, t);
break;
case 'X':
__subformat(buf, ct, table._M_time_format, table, t);
break;
case 'y':
_bend = __write_integer(_buf, 0, (long)((long)(t->tm_year + 1900) % 100));
__append(buf, _buf, _bend, ct);
break;
case 'Y':
_bend = __write_integer(_buf, 0, (long)((long)t->tm_year + 1900));
__append(buf, _buf, _bend, ct);
break;
case '%':
buf.append(1, ct.widen('%'));
break;
#if defined (__GNUC__)
// fbp : at least on SUN
# if defined (_STLP_UNIX) && !defined (__linux__)
# define __USE_BSD 1
# endif
/*********************************************
* JGS, handle various extensions *
*********************************************/
case 'h': /* POSIX.2 extension */
// same as 'b', abbrev month name
__append(buf, table._M_monthname[t->tm_mon]);
break;
case 'C': /* POSIX.2 extension */
// same as 'd', the day
_STLP_SPRINTF(_buf, "%2ld", (long)t->tm_mday);
__append(buf, _buf, _buf + 2, ct);
break;
case 'D': /* POSIX.2 extension */
// same as 'x'
__subformat(buf, ct, table._M_date_format, table, t);
break;
case 'k': /* GNU extension */
_STLP_SPRINTF(_buf, "%2ld", (long)t->tm_hour);
__append(buf, _buf, _buf + 2, ct);
break;
case 'l': /* GNU extension */
_STLP_SPRINTF(_buf, "%2ld", (long)t->tm_hour % 12);
__append(buf, _buf, _buf + 2, ct);
break;
case 'n': /* POSIX.2 extension */
buf.append(1, ct.widen('\n'));
break;
case 'R': /* GNU extension */
__subformat(buf, ct, "%H:%M", table, t);
break;
case 'r': /* POSIX.2 extension */
__subformat(buf, ct, "%I:%M:%S %p", table, t);
break;
case 'T': /* POSIX.2 extension. */
__subformat(buf, ct, "%H:%M:%S", table, t);
break;
case 't': /* POSIX.2 extension. */
buf.append(1, ct.widen('\t'));
case 'u': /* POSIX.2 extension. */
_bend = __write_integer(_buf, 0, long((t->tm_wday - 1 + 7)) % 7 + 1);
__append(buf, _buf, _bend, ct);
break;
case 's': {
time_t __t = mktime(__CONST_CAST(tm*, t));
_bend = __write_integer(_buf, 0, (long)__t );
__append(buf, _buf, _bend, ct);
break;
}
case 'g': /* GNU extension */
case 'G': {
int year = t->tm_year + __TM_YEAR_BASE;
int days = __iso_week_days (t->tm_yday, t->tm_wday);
if (days < 0) {
/* This ISO week belongs to the previous year. */
year--;
days = __iso_week_days (t->tm_yday + (365 + __is_leap (year)), t->tm_wday);
}
else {
int d = __iso_week_days (t->tm_yday - (365 + __is_leap (year)), t->tm_wday);
if (0 <= d) {
/* This ISO week belongs to the next year. */
++year;
days = d;
}
}
long val;
switch (format) {
case 'g':
val = (long)(year % 100 + 100) % 100;
break;
case 'G':
val = (long)year;
break;
default:
val = (long)days / 7 + 1;
break;
}
_bend = __write_integer(_buf, 0, val);
__append(buf, _buf, _bend, ct);
break;
}
# if defined (_STLP_USE_GLIBC)
case 'z': /* GNU extension. */
if (t->tm_isdst < 0)
break;
{
int diff;
# if defined (__USE_BSD) || defined (__BEOS__)
diff = t->tm_gmtoff;
# else
diff = t->__tm_gmtoff;
# endif
if (diff < 0) {
buf.append(1, ct.widen('-'));
diff = -diff;
} else
buf.append(1, ct.widen('+'));
diff /= 60;
_STLP_SPRINTF(_buf, "%.4d", (diff / 60) * 100 + diff % 60);
__append(buf, _buf, _buf + 4, ct);
break;
}
# endif /* __GLIBC__ */
#endif /* __GNUC__ */
default:
break;
}
}
void _STLP_CALL __write_formatted_time(__iostring &buf, const ctype<char>& ct,
char format, char modifier,
const _Time_Info& table, const tm* t)
{ __write_formatted_timeT(buf, ct, format, modifier, table, t); }
void _STLP_CALL __write_formatted_time(__iowstring &buf, const ctype<wchar_t>& ct,
char format, char modifier,
const _WTime_Info& table, const tm* t)
{ __write_formatted_timeT(buf, ct, format, modifier, table, t); }
static time_base::dateorder __get_date_order(_Locale_time* time) {
const char * fmt = _Locale_d_fmt(time);
char first, second, third;
while (*fmt != 0 && *fmt != '%') ++fmt;
if (*fmt == 0)
return time_base::no_order;
first = *++fmt;
while (*fmt != 0 && *fmt != '%') ++fmt;
if (*fmt == 0)
return time_base::no_order;
second = *++fmt;
while (*fmt != 0 && *fmt != '%') ++fmt;
if (*fmt == 0)
return time_base::no_order;
third = *++fmt;
switch (first) {
case 'd':
return (second == 'm' && third == 'y') ? time_base::dmy
: time_base::no_order;
case 'm':
return (second == 'd' && third == 'y') ? time_base::mdy
: time_base::no_order;
case 'y':
switch (second) {
case 'd':
return third == 'm' ? time_base::ydm : time_base::no_order;
case 'm':
return third == 'd' ? time_base::ymd : time_base::no_order;
default:
return time_base::no_order;
}
default:
return time_base::no_order;
}
}
time_init<char>::time_init()
: _M_dateorder(time_base::no_order)
{ _Init_timeinfo(_M_timeinfo); }
time_init<char>::time_init(const char* __name) {
if (!__name)
locale::_M_throw_on_null_name();
int __err_code;
char buf[_Locale_MAX_SIMPLE_NAME];
_Locale_time *__time = __acquire_time(__name, buf, 0, &__err_code);
if (!__time)
locale::_M_throw_on_creation_failure(__err_code, __name, "time");
_Init_timeinfo(this->_M_timeinfo, __time);
_M_dateorder = __get_date_order(__time);
__release_time(__time);
}
time_init<char>::time_init(_Locale_time *__time) {
_Init_timeinfo(this->_M_timeinfo, __time);
_M_dateorder = __get_date_order(__time);
}
#ifndef _STLP_NO_WCHAR_T
time_init<wchar_t>::time_init()
: _M_dateorder(time_base::no_order)
{ _Init_timeinfo(_M_timeinfo); }
time_init<wchar_t>::time_init(const char* __name) {
if (!__name)
locale::_M_throw_on_null_name();
int __err_code;
char buf[_Locale_MAX_SIMPLE_NAME];
_Locale_time *__time = __acquire_time(__name, buf, 0, &__err_code);
if (!__time)
locale::_M_throw_on_creation_failure(__err_code, __name, "time");
_Init_timeinfo(this->_M_timeinfo, __time);
_M_dateorder = __get_date_order(__time);
__release_time(__time);
}
time_init<wchar_t>::time_init(_Locale_time *__time) {
_Init_timeinfo(this->_M_timeinfo, __time);
_M_dateorder = __get_date_order(__time);
}
#endif
_STLP_MOVE_TO_STD_NAMESPACE
#if !defined (_STLP_NO_FORCE_INSTANTIATE)
template class time_get<char, istreambuf_iterator<char, char_traits<char> > >;
template class time_put<char, ostreambuf_iterator<char, char_traits<char> > >;
# ifndef _STLP_NO_WCHAR_T
template class time_get<wchar_t, istreambuf_iterator<wchar_t, char_traits<wchar_t> > >;
template class time_put<wchar_t, ostreambuf_iterator<wchar_t, char_traits<wchar_t> > >;
# endif
#endif
_STLP_END_NAMESPACE
| 8,324 |
3,834 | <reponame>cmarincia/enso
package org.enso.base.text;
/** A char sequence which allows to access a slice of another char sequence without copying. */
class StringSlice implements CharSequence {
private final CharSequence text;
private final int subStart, subEnd;
/** Constructs a slice of the given text. */
public StringSlice(CharSequence text, int start, int end) {
this.text = text;
this.subStart = start;
this.subEnd = end;
}
@Override
public int length() {
return subEnd - subStart;
}
@Override
public char charAt(int index) {
return text.charAt(subStart + index);
}
@Override
public CharSequence subSequence(int start, int end) {
return new StringSlice(text, subStart + start, subStart + end);
}
@Override
public String toString() {
return text.subSequence(subStart, subEnd).toString();
}
}
| 282 |
1,306 | HANDLE_OPCODE(OP_THROW /*vAA*/)
{
Object* obj;
/*
* We don't create an exception here, but the process of searching
* for a catch block can do class lookups and throw exceptions.
* We need to update the saved PC.
*/
EXPORT_PC();
vsrc1 = INST_AA(inst);
ILOGV("|throw v%d (%p)", vsrc1, (void*)GET_REGISTER(vsrc1));
obj = (Object*) GET_REGISTER(vsrc1);
if (!checkForNull(obj)) {
/* will throw a null pointer exception */
LOGVV("Bad exception");
} else {
/* use the requested exception */
dvmSetException(self, obj);
}
GOTO_exceptionThrown();
}
OP_END
| 349 |
730 | //
// UIColor+HO.h
// HighlightObjC
//
// Created by cyan on 30/12/2016.
// Copyright © 2016 cyan. All rights reserved.
//
#import <UIKit/UIKit.h>
@interface UIColor (HO)
+ (UIColor *)colorWithCSS:(NSString *)css;
@end
| 92 |
875 | <filename>src/test/org/apache/sqoop/testcategories/sqooptest/SqoopTest.java
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.sqoop.testcategories.sqooptest;
/**
* SqoopTest includes UnitTest, IntegrationTest and ManualTest.
*
* UnitTest:
* A unit test shall test one class at a time having it's dependencies mocked.
* A unit test shall not start a mini cluster nor an embedded database and it shall not use a JDBC driver.
*
* IntegrationTest:
* An integration test shall test if independently developed classes work together correctly.
* An integration test checks a whole scenario and thus may start mini clusters or embedded databases and may connect to external resources like RDBMS instances.
*
* ManualTest:
* Deprecated category, shall not be used nor extended.
*/
public interface SqoopTest {
}
| 396 |
2,671 | s = set([1,2,3])
t = set([3,4,5])
a = s.symmetric_difference(t)
b = t.symmetric_difference(s)
print a
print a == b
print a == set([1,2,4,5])
| 72 |
918 | <reponame>mvachhani/incubator-gobblin
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gobblin.runtime.job_catalog;
import java.net.URI;
import java.util.Collection;
import java.util.List;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.google.common.base.Optional;
import com.google.common.util.concurrent.AbstractIdleService;
import org.apache.gobblin.metrics.MetricContext;
import org.apache.gobblin.metrics.Tag;
import org.apache.gobblin.runtime.api.JobCatalog;
import org.apache.gobblin.runtime.api.JobCatalogListener;
import org.apache.gobblin.runtime.api.JobSpec;
import org.apache.gobblin.runtime.api.JobSpecNotFoundException;
/**
* A JobCatalog decorator that caches all JobSpecs in memory.
*
*/
public class CachingJobCatalog extends AbstractIdleService implements JobCatalog {
protected final JobCatalog _fallback;
protected final InMemoryJobCatalog _cache;
protected final Logger _log;
public CachingJobCatalog(JobCatalog fallback, Optional<Logger> log) {
_log = log.isPresent() ? log.get() : LoggerFactory.getLogger(getClass());
_fallback = fallback;
_cache = new InMemoryJobCatalog(log);
_fallback.addListener(new FallbackCatalogListener());
}
/** {@inheritDoc} */
@Override
public Collection<JobSpec> getJobs() {
return _cache.getJobs();
}
/** {@inheritDoc} */
@Override
public JobSpec getJobSpec(URI uri) throws JobSpecNotFoundException {
try {
return _cache.getJobSpec(uri);
}
catch (RuntimeException e) {
return _fallback.getJobSpec(uri);
}
}
@Override
protected void startUp() {
_cache.startAsync();
try {
_cache.awaitRunning(2, TimeUnit.SECONDS);
} catch (TimeoutException te) {
throw new RuntimeException("Failed to start " + CachingJobCatalog.class.getName(), te);
}
}
@Override
protected void shutDown() {
_cache.stopAsync();
try {
_cache.awaitTerminated(2, TimeUnit.SECONDS);
} catch (TimeoutException te) {
throw new RuntimeException("Failed to stop " + CachingJobCatalog.class.getName(), te);
}
}
/** {@inheritDoc} */
@Override
public void addListener(JobCatalogListener jobListener) {
_cache.addListener(jobListener);
}
/** {@inheritDoc} */
@Override
public void removeListener(JobCatalogListener jobListener) {
_cache.removeListener(jobListener);
}
/** Refreshes the cache if the underlying fallback catalog changes. */
private class FallbackCatalogListener implements JobCatalogListener {
@Override
public void onAddJob(JobSpec addedJob) {
_cache.put(addedJob);
}
@Override
public void onDeleteJob(URI deletedJobURI, String deletedJobVersion) {
_cache.remove(deletedJobURI);
}
@Override
public void onUpdateJob(JobSpec updatedJob) {
_cache.put(updatedJob);
}
}
@Override
public void registerWeakJobCatalogListener(JobCatalogListener jobListener) {
_cache.registerWeakJobCatalogListener(jobListener);
}
@Override public MetricContext getMetricContext() {
return _fallback.getMetricContext();
}
@Override public boolean isInstrumentationEnabled() {
return _fallback.isInstrumentationEnabled();
}
@Override public List<Tag<?>> generateTags(org.apache.gobblin.configuration.State state) {
return _fallback.generateTags(state);
}
@Override public void switchMetricContext(List<Tag<?>> tags) {
_fallback.switchMetricContext(tags);
}
@Override public void switchMetricContext(MetricContext context) {
_fallback.switchMetricContext(context);
}
@Override public StandardMetrics getMetrics() {
return _fallback.getMetrics();
}
}
| 1,447 |
2,015 | <reponame>vktr/PicoTorrent<gh_stars>1000+
#pragma once
#include <wx/wxprec.h>
#ifndef WX_PRECOMP
#include <wx/wx.h>
#endif
namespace pt
{
namespace UI
{
namespace Dialogs
{
class AddTrackerDialog : public wxDialog
{
public:
AddTrackerDialog(wxWindow* parent, wxWindowID id);
virtual ~AddTrackerDialog();
int GetTier();
std::string GetUrl();
void SetTier(int tier);
private:
wxTextCtrl* m_url;
wxTextCtrl* m_tier;
};
}
}
}
| 236 |
622 | <gh_stars>100-1000
/*
* Copyright (C) 2012-2016 Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.facebook.nifty.ssl;
import org.apache.tomcat.jni.SSL;
import org.jboss.netty.handler.ssl.OpenSslEngine;
import javax.net.ssl.SSLEngine;
import javax.net.ssl.SSLException;
import javax.net.ssl.SSLSession;
import javax.security.cert.CertificateException;
import javax.security.cert.X509Certificate;
import java.lang.reflect.Field;
/**
* This class provides a method to extract properties of the SSL session
* from an engine.
* Netty's OpenSSL engine class does not implement getSession() fully, thus
* we have to extract the properties that we need ourselves.
*/
public class OpenSslSessionHelper {
private static Field sslField;
static {
try {
sslField = OpenSslEngine.class.getDeclaredField("ssl");
sslField.setAccessible(true);
}
catch (Throwable t) {
// Ignore.
}
}
public static SslSession getSession(SSLEngine sslEngine) throws SSLException {
if (!(sslEngine instanceof OpenSslEngine)) {
throw new IllegalArgumentException("ssl engine not openssl engine");
}
OpenSslEngine engine = (OpenSslEngine) sslEngine;
if (sslField == null) {
throw new SSLException("SSL field is null");
}
try {
long sslPtr = (long) sslField.get(engine);
if (sslPtr == 0) {
throw new SSLException("SSL not initialized");
}
String alpn = SSL.getAlpnSelected(sslPtr);
String npn = SSL.getNextProtoNegotiated(sslPtr);
String version = SSL.getVersion(sslPtr);
String cipher = SSL.getCipherForSSL(sslPtr);
long establishedTime = SSL.getTime(sslPtr);
// TODO: return the entire chain.
// tc-native thinks that the chain is null, so we supply only the
// leaf cert.
byte[] cert = SSL.getPeerCertificate(sslPtr);
X509Certificate certificate = null;
if (cert != null) {
certificate = X509Certificate.getInstance(cert);
}
return new SslSession(alpn, npn, version, cipher, establishedTime, certificate);
}
catch (IllegalAccessException e) {
throw new SSLException(e);
}
catch (CertificateException e) {
throw new SSLException(e);
}
}
}
| 1,186 |
350 | //=================================================================================================
// SAMPLE Single Include
//-------------------------------------------------------------------------------------------------
// Identical to 'SampleBasic' except for how the NetImgui code is integrated to the project.
//
// Other samples use the 'NetImgui_Api.h' and link against a pre compiled 'NetImgui' library.
//
// This sample uses the define 'NETIMGUI_IMPLEMENTATION' before including 'NetImgui_Api.h'.
// This tell 'NetImgui_Api.h' to also include every 'NetImgui' sources file,
// removing the need to link against the library.
//
// Note: Another (more conventional) way of compiling 'NetImgui' with your code,
// is to includes its sources file directly in your project. This single header include
// approach was added for potential convenience, minimizing changes to a project.
//=================================================================================================
// Defining this value before '#include <NetImgui_Api.h>', also load all 'NetImgui' client sources
// It should only be done in 1 source file (to avoid duplicate symbol at link time),
// other location can still include 'NetImgui_Api.h', but without using the define
#define NETIMGUI_IMPLEMENTATION
#include <NetImgui_Api.h>
#include "..\Common\Sample.h"
namespace SampleClient
{
//=================================================================================================
//
//=================================================================================================
bool Client_Startup()
{
if( !NetImgui::Startup() )
return false;
// Can have more ImGui initialization here, like loading extra fonts.
// ...
return true;
}
//=================================================================================================
//
//=================================================================================================
void Client_Shutdown()
{
NetImgui::Shutdown(true);
}
//=================================================================================================
// Function used by the sample, to draw all ImGui Content
//=================================================================================================
ImDrawData* Client_Draw()
{
//---------------------------------------------------------------------------------------------
// (1) Start a new Frame.
//---------------------------------------------------------------------------------------------
ImGui::NewFrame();
//-----------------------------------------------------------------------------------------
// (2) Draw ImGui Content
//-----------------------------------------------------------------------------------------
ClientUtil_ImGuiContent_Common("SampleSingleInclude"); //Note: Connection to remote server done in there
ImGui::SetNextWindowPos(ImVec2(32,48), ImGuiCond_Once);
ImGui::SetNextWindowSize(ImVec2(400,400), ImGuiCond_Once);
if( ImGui::Begin("Sample Single Include", nullptr) )
{
ImGui::TextColored(ImVec4(0.1, 1, 0.1, 1), "Basic demonstration of netImgui code integration.");
ImGui::TextWrapped("Create a basic Window with some text.");
ImGui::NewLine();
ImGui::TextWrapped("Identical to SampleBasic, the only difference is how the client code was included in the project.");
ImGui::NewLine();
ImGui::TextColored(ImVec4(0.1, 1, 0.1, 1), "Where are we drawing: ");
ImGui::SameLine();
ImGui::TextUnformatted(NetImgui::IsDrawingRemote() ? "Remote Draw" : "Local Draw");
ImGui::NewLine();
ImGui::TextColored(ImVec4(0.1, 1, 0.1, 1), "Filler content");
ImGui::TextWrapped("Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.");
}
ImGui::End();
//---------------------------------------------------------------------------------------------
// (3) Finish the frame
//---------------------------------------------------------------------------------------------
ImGui::Render();
//---------------------------------------------------------------------------------------------
// (4) Return content to draw by local renderer. Stop drawing locally when remote connected
//---------------------------------------------------------------------------------------------
return !NetImgui::IsConnected() ? ImGui::GetDrawData() : nullptr;
}
} // namespace SampleClient
| 1,141 |
562 | #include <iostream>
#include "oatpp-postgresql/Types.hpp"
int main() {
{
oatpp::postgresql::Uuid uuid;
}
return 0;
}
| 66 |
14,668 | <gh_stars>1000+
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/metrics/bucket_ranges.h"
#include <stdint.h>
#include "testing/gtest/include/gtest/gtest.h"
namespace base {
namespace {
TEST(BucketRangesTest, NormalSetup) {
BucketRanges ranges(5);
ASSERT_EQ(5u, ranges.size());
ASSERT_EQ(4u, ranges.bucket_count());
for (int i = 0; i < 5; ++i) {
EXPECT_EQ(0, ranges.range(i));
}
EXPECT_EQ(0u, ranges.checksum());
ranges.set_range(3, 100);
EXPECT_EQ(100, ranges.range(3));
}
TEST(BucketRangesTest, Equals) {
// Compare empty ranges.
BucketRanges ranges1(3);
BucketRanges ranges2(3);
BucketRanges ranges3(5);
EXPECT_TRUE(ranges1.Equals(&ranges2));
EXPECT_FALSE(ranges1.Equals(&ranges3));
EXPECT_FALSE(ranges2.Equals(&ranges3));
// Compare full filled ranges.
ranges1.set_range(0, 0);
ranges1.set_range(1, 1);
ranges1.set_range(2, 2);
ranges1.set_checksum(100);
ranges2.set_range(0, 0);
ranges2.set_range(1, 1);
ranges2.set_range(2, 2);
ranges2.set_checksum(100);
EXPECT_TRUE(ranges1.Equals(&ranges2));
// Checksum does not match.
ranges1.set_checksum(99);
EXPECT_FALSE(ranges1.Equals(&ranges2));
ranges1.set_checksum(100);
// Range does not match.
ranges1.set_range(1, 3);
EXPECT_FALSE(ranges1.Equals(&ranges2));
}
TEST(BucketRangesTest, Checksum) {
BucketRanges ranges(3);
ranges.set_range(0, 0);
ranges.set_range(1, 1);
ranges.set_range(2, 2);
ranges.ResetChecksum();
EXPECT_EQ(289217253u, ranges.checksum());
ranges.set_range(2, 3);
EXPECT_FALSE(ranges.HasValidChecksum());
ranges.ResetChecksum();
EXPECT_EQ(2843835776u, ranges.checksum());
EXPECT_TRUE(ranges.HasValidChecksum());
}
} // namespace
} // namespace base
| 761 |
1,694 | <gh_stars>1000+
package org.stagemonitor.benchmark.profiler;
public class ClassOptimalPerformanceProfied {
public int method1() {
OptimalPerformanceProfilerMock.start();
try {
return method2(1) + method3() + method5();
} finally {
OptimalPerformanceProfilerMock.stop("public int ClassToProfile.method1()");
}
}
private int method2(int i) {
OptimalPerformanceProfilerMock.start();
try {
return 1 + i;
} finally {
OptimalPerformanceProfilerMock.stop("public int ClassToProfile.method2(int i)");
}
}
private int method3() {
OptimalPerformanceProfilerMock.start();
try {
return method4();
} finally {
OptimalPerformanceProfilerMock.stop("public int ClassToProfile.method3()");
}
}
private int method4() {
OptimalPerformanceProfilerMock.start();
try {
return 4;
} finally {
OptimalPerformanceProfilerMock.stop("public int ClassToProfile.method4()");
}
}
private int method5() {
OptimalPerformanceProfilerMock.start();
try {
return method6() + method7();
} finally {
OptimalPerformanceProfilerMock.stop("public int ClassToProfile.method5()");
}
}
private int method6() {
OptimalPerformanceProfilerMock.start();
try {
return 6;
} finally {
OptimalPerformanceProfilerMock.stop("public int ClassToProfile.method6()");
}
}
private int method7() {
OptimalPerformanceProfilerMock.start();
try {
return method8();
} finally {
OptimalPerformanceProfilerMock.stop("public int ClassToProfile.method7()");
}
}
private int method8() {
OptimalPerformanceProfilerMock.start();
try {
return method9();
} finally {
OptimalPerformanceProfilerMock.stop("public int ClassToProfile.method8()");
}
}
private int method9() {
OptimalPerformanceProfilerMock.start();
try {
return 9;
} finally {
OptimalPerformanceProfilerMock.stop("public int ClassToProfile.method9()");
}
}
}
| 694 |
6,036 | // Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#pragma once
#include "core/providers/cuda/cuda_kernel.h"
#include "orttraining/core/graph/horovod_adapters.h"
#include "orttraining/core/graph/optimizer_config.h"
namespace onnxruntime {
namespace cuda {
class HorovodAllReduce final : public CudaKernel {
public:
HorovodAllReduce(const OpKernelInfo& info) : CudaKernel(info) {
unique_name = "AllReduceNode_" + info.node().Name();
int64_t reduce_op;
// bugbug
int64_t adasum_type = training::AdasumReductionType::None;
info.GetAttrOrDefault("reduce_op", &reduce_op, static_cast<int64_t>(hvd::ReduceOp::SUM));
info.GetAttrOrDefault("reduce_algo", &adasum_type, static_cast<int64_t>(training::AdasumReductionType::None));
reduce_op_ = GetReduceOp(reduce_op);
adasum_type_ = static_cast<training::AdasumReductionType>(adasum_type);
}
Status ComputeInternal(OpKernelContext* context) const override;
private:
std::string unique_name;
hvd::ReduceOp reduce_op_;
training::AdasumReductionType adasum_type_;
};
class HorovodBarrier final : public CudaKernel {
public:
HorovodBarrier(const OpKernelInfo& info) : CudaKernel(info) {
// bugbug
int64_t adasum_type = training::AdasumReductionType::None;
info.GetAttrOrDefault("reduce_algo", &adasum_type, static_cast<int64_t>(training::AdasumReductionType::None));
adasum_type_ = static_cast<training::AdasumReductionType>(adasum_type);
}
Status ComputeInternal(OpKernelContext* context) const override;
private:
training::AdasumReductionType adasum_type_;
};
} // namespace cuda
} // namespace onnxruntime
| 619 |
400 | <reponame>ygy203/xipki
/*
*
* Copyright (c) 2013 - 2020 <NAME>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.xipki.ca.server;
import org.bouncycastle.asn1.ASN1Integer;
import org.bouncycastle.asn1.x500.X500Name;
import org.bouncycastle.asn1.x509.Extensions;
import org.bouncycastle.asn1.x509.SubjectPublicKeyInfo;
import org.xipki.util.Args;
import java.util.Date;
/**
* Certificate template data.
*
* @author <NAME>
* @since 2.0.0
*/
public class CertTemplateData {
private final X500Name subject;
private final SubjectPublicKeyInfo publicKeyInfo;
private final Date notBefore;
private final Date notAfter;
private final String certprofileName;
private final boolean caGenerateKeypair;
private final Extensions extensions;
private final ASN1Integer certReqId;
public CertTemplateData(X500Name subject, SubjectPublicKeyInfo publicKeyInfo, Date notBefore,
Date notAfter, Extensions extensions, String certprofileName) {
this(subject, publicKeyInfo, notBefore, notAfter, extensions, certprofileName, null, false);
}
public CertTemplateData(X500Name subject, SubjectPublicKeyInfo publicKeyInfo,
Date notBefore, Date notAfter, Extensions extensions, String certprofileName,
ASN1Integer certReqId, boolean caGenerateKeypair) {
this.publicKeyInfo = publicKeyInfo;
this.subject = Args.notNull(subject, "subject");
this.certprofileName = Args.toNonBlankLower(certprofileName, "certprofileName");
this.extensions = extensions;
this.notBefore = notBefore;
this.notAfter = notAfter;
this.certReqId = certReqId;
this.caGenerateKeypair = caGenerateKeypair;
}
public X500Name getSubject() {
return subject;
}
public SubjectPublicKeyInfo getPublicKeyInfo() {
return publicKeyInfo;
}
public boolean isCaGenerateKeypair() {
return caGenerateKeypair;
}
public Date getNotBefore() {
return notBefore;
}
public Date getNotAfter() {
return notAfter;
}
public String getCertprofileName() {
return certprofileName;
}
public Extensions getExtensions() {
return extensions;
}
public ASN1Integer getCertReqId() {
return certReqId;
}
}
| 850 |
824 | import unittest
import numpy as np
import pytest
import torch
from pfrl.nn import empirical_normalization
class TestEmpiricalNormalization(unittest.TestCase):
def test_small_cpu(self):
self._test_small(gpu=-1)
@pytest.mark.gpu
def test_small_gpu(self):
self._test_small(gpu=0)
def _test_small(self, gpu):
en = empirical_normalization.EmpiricalNormalization(10)
if gpu >= 0:
device = "cuda:{}".format(gpu)
en.to(device)
else:
device = "cpu"
xs = []
for t in range(10):
x = np.random.normal(loc=4, scale=2, size=(t + 3, 10))
en(torch.tensor(x, device=device))
xs.extend(list(x))
xs = np.stack(xs)
true_mean = np.mean(xs, axis=0)
true_std = np.std(xs, axis=0)
np.testing.assert_allclose(en.mean.cpu().numpy(), true_mean, rtol=1e-4)
np.testing.assert_allclose(en.std.cpu().numpy(), true_std, rtol=1e-4)
@pytest.mark.slow
def test_large(self):
en = empirical_normalization.EmpiricalNormalization(10)
for _ in range(10000):
x = np.random.normal(loc=4, scale=2, size=(7, 10))
en(torch.tensor(x))
x = 2 * np.random.normal(loc=4, scale=2, size=(1, 10))
enx = en(torch.tensor(x), update=False)
np.testing.assert_allclose(en.mean.cpu().numpy(), 4, rtol=1e-1)
np.testing.assert_allclose(en.std.cpu().numpy(), 2, rtol=1e-1)
# Compare with the ground-truth normalization
np.testing.assert_allclose((x - 4) / 2, enx, rtol=1e-1)
# Test inverse
np.testing.assert_allclose(x, en.inverse(torch.tensor(enx)), rtol=1e-4)
def test_batch_axis(self):
shape = (2, 3, 4)
for batch_axis in range(3):
en = empirical_normalization.EmpiricalNormalization(
shape=shape[:batch_axis] + shape[batch_axis + 1 :],
batch_axis=batch_axis,
)
for _ in range(10):
x = np.random.rand(*shape)
en(torch.tensor(x))
def test_until(self):
en = empirical_normalization.EmpiricalNormalization(7, until=20)
last_mean = None
last_std = None
for t in range(15):
en(torch.tensor(np.random.rand(2, 7) + t))
if 1 <= t < 10:
self.assertFalse(
np.allclose(en.mean.cpu().numpy(), last_mean, rtol=1e-4)
)
self.assertFalse(np.allclose(en.std.cpu().numpy(), last_std, rtol=1e-4))
elif t >= 10:
np.testing.assert_allclose(en.mean.cpu().numpy(), last_mean, rtol=1e-4)
np.testing.assert_allclose(en.std.cpu().numpy(), last_std, rtol=1e-4)
last_mean = en.mean
last_std = en.std
| 1,480 |
2,978 | /*
* Copyright Strimzi authors.
* License: Apache License 2.0 (see the file LICENSE or http://apache.org/licenses/LICENSE-2.0.html).
*/
package io.strimzi.operator.topic;
import io.vertx.core.Promise;
import io.vertx.junit5.Checkpoint;
import io.vertx.junit5.VertxExtension;
import io.vertx.junit5.VertxTestContext;
import org.junit.jupiter.api.Assumptions;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.extension.ExtendWith;
import java.util.Collections;
import java.util.concurrent.ThreadLocalRandom;
import static org.hamcrest.CoreMatchers.instanceOf;
import static org.hamcrest.CoreMatchers.is;
import static org.hamcrest.CoreMatchers.nullValue;
import static org.hamcrest.MatcherAssert.assertThat;
@ExtendWith(VertxExtension.class)
public abstract class TopicStoreTestBase {
protected TopicStore store;
protected abstract boolean canRunTest();
@Test
public void testCrud(VertxTestContext context) {
Assumptions.assumeTrue(canRunTest());
Checkpoint async = context.checkpoint();
String topicName = "my_topic_" + ThreadLocalRandom.current().nextInt(Integer.MAX_VALUE);
Topic topic = new Topic.Builder(topicName, 2,
(short) 3, Collections.singletonMap("foo", "bar")).build();
Promise<Void> failedCreateCompleted = Promise.promise();
// Create the topic
store.create(topic)
.onComplete(context.succeeding())
// Read the topic
.compose(v -> store.read(new TopicName(topicName)))
.onComplete(context.succeeding(readTopic -> context.verify(() -> {
// assert topics equal
assertThat(readTopic.getTopicName(), is(topic.getTopicName()));
assertThat(readTopic.getNumPartitions(), is(topic.getNumPartitions()));
assertThat(readTopic.getNumReplicas(), is(topic.getNumReplicas()));
assertThat(readTopic.getConfig(), is(topic.getConfig()));
})))
// try to create it again: assert an error
.compose(v -> store.create(topic))
.onComplete(context.failing(e -> context.verify(() -> {
assertThat(e, instanceOf(TopicStore.EntityExistsException.class));
failedCreateCompleted.complete();
})));
Topic updatedTopic = new Topic.Builder(topic)
.withNumPartitions(3)
.withConfigEntry("fruit", "apple")
.build();
failedCreateCompleted.future()
// update my_topic
.compose(v -> store.update(updatedTopic))
.onComplete(context.succeeding())
// re-read it and assert equal
.compose(v -> store.read(new TopicName(topicName)))
.onComplete(context.succeeding(rereadTopic -> context.verify(() -> {
// assert topics equal
assertThat(rereadTopic.getTopicName(), is(updatedTopic.getTopicName()));
assertThat(rereadTopic.getNumPartitions(), is(updatedTopic.getNumPartitions()));
assertThat(rereadTopic.getNumReplicas(), is(updatedTopic.getNumReplicas()));
assertThat(rereadTopic.getConfig(), is(updatedTopic.getConfig()));
})))
// delete it
.compose(v -> store.delete(updatedTopic.getTopicName()))
.onComplete(context.succeeding())
// assert we can't read it again
.compose(v -> store.read(new TopicName(topicName)))
.onComplete(context.succeeding(deletedTopic -> context.verify(() ->
assertThat(deletedTopic, is(nullValue()))))
)
// delete it again: assert an error
.compose(v -> store.delete(updatedTopic.getTopicName()))
.onComplete(context.failing(e -> context.verify(() -> {
assertThat(e, instanceOf(TopicStore.NoSuchEntityExistsException.class));
async.flag();
})));
}
}
| 1,754 |
342 | <reponame>None1637/osu-droid-1
package org.anddev.andengine.util.pool;
/**
* (c) 2010 <NAME>
* (c) 2011 Zynga Inc.
*
* @author <NAME>
* @since 23:16:25 - 31.08.2010
*/
public class EntityDetachRunnablePoolUpdateHandler extends RunnablePoolUpdateHandler<EntityDetachRunnablePoolItem> {
// ===========================================================
// Constants
// ===========================================================
// ===========================================================
// Fields
// ===========================================================
// ===========================================================
// Constructors
// ===========================================================
// ===========================================================
// Getter & Setter
// ===========================================================
// ===========================================================
// Methods for/from SuperClass/Interfaces
// ===========================================================
@Override
protected EntityDetachRunnablePoolItem onAllocatePoolItem() {
return new EntityDetachRunnablePoolItem();
}
// ===========================================================
// Methods
// ===========================================================
// ===========================================================
// Inner and Anonymous Classes
// ===========================================================
}
| 296 |
1,538 | <filename>demos/speed/view.c
/*
* SPEED - by <NAME>, 1999
*
* Viewport functions (3d projection, wireframe guide rendering, etc).
*/
#include <math.h>
#include <allegro5/allegro.h>
#include <allegro5/allegro_font.h>
#include <allegro5/allegro_primitives.h>
#include "speed.h"
#ifndef M_PI
#define M_PI 3.14159265358979323846
#endif
#define NUM_VIEWS 4
/* desired position of a viewport window */
typedef struct
{
float pos[4]; /* left, top, right, bottom */
} VIEWPOS[NUM_VIEWS];
/* current status of a viewport window */
typedef struct
{
float pos[4]; /* left, top, right, bottom */
float vel[4]; /* rate of change of the above */
} VIEWINFO[NUM_VIEWS];
/* viewport positioning macros */
#define OFF_TL {{ -0.1, -0.1, -0.1, -0.1 }}
#define OFF_TR {{ 1.1, -0.1, 1.1, -0.1 }}
#define OFF_BL {{ -0.1, 1.1, -0.1, 1.1 }}
#define OFF_BR {{ 1.1, 1.1, 1.1, 1.1 }}
#define QTR_TL {{ 0, 0, 0.5, 0.5 }}
#define QTR_TR {{ 0.5, 0, 1.0, 0.5 }}
#define QTR_BL {{ 0, 0.5, 0.5, 1.0 }}
#define QTR_BR {{ 0.5, 0.5, 1.0, 1.0 }}
#define BIG_TL {{ 0, 0, 0.7, 0.7 }}
#define BIG_TR {{ 0.3, 0, 1.0, 0.7 }}
#define BIG_BL {{ 0, 0.3, 0.7, 1.0 }}
#define BIG_BR {{ 0.3, 0.3, 1.0, 1.0 }}
#define FULL {{ 0, 0, 1.0, 1.0 }}
/* list of viewport window positions */
static VIEWPOS viewpos[] =
{
{ FULL, OFF_TR, OFF_BL, OFF_BR }, /* 1 single */
{ OFF_TL, FULL, OFF_BL, OFF_BR }, /* 2 single */
{ BIG_TL, BIG_BR, OFF_BL, OFF_BR }, /* 12 multiple */
{ OFF_TL, OFF_TR, FULL, OFF_BR }, /* 3 single */
{ BIG_TL, OFF_TR, BIG_BR, OFF_BR }, /* 13 multiple */
{ OFF_TL, BIG_TR, BIG_BL, OFF_BR }, /* 23 multiple */
{ FULL, FULL, OFF_BL, OFF_BR }, /* 12 superimpose */
{ OFF_TL, OFF_TR, OFF_BL, FULL, }, /* 4 single */
{ BIG_TL, OFF_TR, OFF_BL, BIG_BR }, /* 14 multiple */
{ OFF_TL, FULL, FULL, OFF_BR }, /* 23 superimpose */
{ OFF_TL, BIG_TL, OFF_BL, BIG_BR }, /* 24 multiple */
{ OFF_TL, FULL, OFF_BL, FULL }, /* 24 superimpose */
{ QTR_TL, QTR_TR, QTR_BL, OFF_BR }, /* 123 multiple */
{ BIG_TL, OFF_TR, OFF_BL, BIG_BR }, /* 14 superimpose */
{ QTR_TL, OFF_TR, QTR_BL, QTR_BR }, /* 134 multiple */
{ FULL, OFF_TR, FULL, OFF_BR }, /* 13 superimpose */
{ OFF_TL, OFF_TR, BIG_TL, BIG_BR }, /* 34 multiple */
{ OFF_TL, QTR_TR, BIG_BL, QTR_BR }, /* 234 multiple */
{ OFF_TL, OFF_TR, FULL, FULL }, /* 34 superimpose */
{ FULL, QTR_TR, OFF_BL, QTR_BR }, /* 124 multiple */
{ FULL, FULL, OFF_BL, FULL }, /* 124 superimpose */
{ QTR_TL, QTR_TR, QTR_BL, QTR_BR }, /* 1234 multiple */
{ FULL, FULL, FULL, OFF_BR }, /* 123 superimpose */
{ FULL, OFF_TR, FULL, FULL }, /* 134 superimpose */
{ OFF_TL, FULL, FULL, FULL }, /* 234 superimpose */
{ FULL, FULL, FULL, FULL }, /* 1234 superimpose */
};
/* current viewport state */
static VIEWINFO viewinfo;
static int viewnum;
static float view_left, view_top, view_right, view_bottom;
/* returns a scaling factor for 2d graphics effects */
float view_size()
{
return ((view_right - view_left) + (view_bottom - view_top)) / 2;
}
/* initialises the view functions */
void init_view()
{
int i, j;
viewnum = 0;
for (i=0; i<4; i++) {
for (j=0; j<4; j++) {
viewinfo[i].pos[j] = 0;
viewinfo[i].vel[j] = 0;
}
}
}
/* closes down the view module */
void shutdown_view()
{
}
/* advances to the next view position */
int advance_view()
{
int cycled = FALSE;
viewnum++;
if (viewnum >= (int)(sizeof(viewpos)/sizeof(VIEWPOS))) {
viewnum = 0;
cycled = TRUE;
}
return cycled;
}
/* updates the view position */
void update_view()
{
float delta, vel;
int i, j;
for (i=0; i<4; i++) {
for (j=0; j<4; j++) {
delta = viewpos[viewnum][i].pos[j] - viewinfo[i].pos[j];
vel = viewinfo[i].vel[j];
vel *= 0.9;
delta = log(ABS(delta)+1.0) * SGN(delta) / 64.0;
vel += delta;
if ((ABS(delta) < 0.00001) && (ABS(vel) < 0.00001)) {
viewinfo[i].pos[j] = viewpos[viewnum][i].pos[j];
viewinfo[i].vel[j] = 0;
}
else {
viewinfo[i].pos[j] += vel;
viewinfo[i].vel[j] = vel;
}
}
}
}
/* flat projection function */
static int project_flat(float *f, int *i, int c)
{
while (c > 0) {
i[0] = view_left + f[0] * (view_right - view_left);
i[1] = view_top + f[1] * (view_bottom - view_top);
f += 2;
i += 2;
c -= 2;
}
return TRUE;
}
/* spherical coordinate projection function */
static int project_spherical(float *f, int *i, int c)
{
while (c > 0) {
float ang = f[0] * M_PI * 2.0;
float xsize = view_right - view_left;
float ysize = view_bottom - view_top;
float size = MIN(xsize, ysize) / 2.0;
float ff = (f[1] > 0.99) ? 0 : (1.0 - f[1] * 0.9);
float dx = cos(ang) * ff * size;
float dy = sin(ang) * ff * size;
i[0] = dx + (view_left + view_right) / 2.0;
i[1] = dy + (view_top + view_bottom) / 2.0;
f += 2;
i += 2;
c -= 2;
}
return TRUE;
}
/* inside of tube projection function */
static int project_tube(float *f, int *i, int c)
{
while (c > 0) {
float ang = f[0] * M_PI * 2.0 + M_PI / 2.0;
float xsize = view_right - view_left;
float ysize = view_bottom - view_top;
float size = MIN(xsize, ysize) / 2.0;
float x = cos(ang);
float y = sin(ang);
float z = 1.0 + (1.0 - f[1]) * 8.0;
i[0] = x/z * size + (view_left + view_right) / 2.0;
i[1] = y/z * size + (view_top + view_bottom) / 2.0;
f += 2;
i += 2;
c -= 2;
}
return TRUE;
}
/* outside of cylinder projection function */
static int project_cylinder(float *f, int *i, int c)
{
static MATRIX_f mtx;
static int virgin = TRUE;
if (virgin) {
MATRIX_f m1, m2;
get_z_rotate_matrix_f(&m1, -64);
qtranslate_matrix_f(&m1, 0, 1.75, 0);
get_scaling_matrix_f(&m2, 2.0, 1.0, 1.0);
matrix_mul_f(&m1, &m2, &mtx);
virgin = FALSE;
}
while (c > 0) {
float ang = (f[0] - player_pos()) * M_PI * 2.0;
float xsize = view_right - view_left;
float ysize = view_bottom - view_top;
float size = MIN(xsize, ysize) / 2.0;
float x = cos(ang);
float y = sin(ang);
float z = 1.0 + (1.0 - f[1]) * 4.0;
float xout, yout, zout;
apply_matrix_f(&mtx, x, y, z, &xout, &yout, &zout);
if (yout > 1.5)
return FALSE;
i[0] = xout/zout * size + (view_left + view_right) / 2.0;
i[1] = (yout/zout * 2 - 1) * size + (view_top + view_bottom) / 2.0;
f += 2;
i += 2;
c -= 2;
}
return TRUE;
}
/* draws the entire view */
void draw_view()
{
int SCREEN_W = al_get_display_width(screen);
int SCREEN_H = al_get_display_height(screen);
int (*project)(float *f, int *i, int c);
int r, g, b;
ALLEGRO_COLOR c;
int i, n, x, y;
float point[6];
int ipoint[6];
al_clear_to_color(makecol(0, 0, 0));
al_set_blender(ALLEGRO_ADD, ALLEGRO_ONE, ALLEGRO_ONE);
for (i=0; i<4; i++) {
view_left = viewinfo[i].pos[0] * SCREEN_W;
view_top = viewinfo[i].pos[1] * SCREEN_H;
view_right = viewinfo[i].pos[2] * SCREEN_W;
view_bottom = viewinfo[i].pos[3] * SCREEN_H;
if ((view_right > view_left) && (view_bottom > view_top) &&
(view_right > 0) && (view_bottom > 0) &&
(view_left < SCREEN_W) && (view_top < SCREEN_H)) {
switch (i) {
case 0:
/* flat projection, green */
project = project_flat;
r = 0;
g = 255;
b = 0;
break;
case 1:
/* spherical coordinates, yellow */
project = project_spherical;
r = 255;
g = 255;
b = 0;
break;
case 2:
/* inside a tube, blue */
project = project_tube;
r = 0;
g = 0;
b = 255;
break;
case 3:
/* surface of cylinder, red */
project = project_cylinder;
r = 255;
g = 0;
b = 0;
break;
default:
/* oops! */
assert(FALSE);
return;
}
if (!no_grid) {
c = makecol(r/5, g/5, b/5);
n = (low_detail) ? 8 : 16;
for (x=0; x<=n; x++) {
for (y=0; y<=n; y++) {
point[0] = (float)x / n;
point[1] = (float)y / n;
point[2] = (float)(x+1) / n;
point[3] = (float)y / n;
point[4] = (float)x / n;
point[5] = (float)(y+1) / n;
if (project(point, ipoint, 6)) {
if (x < n)
line(ipoint[0], ipoint[1], ipoint[2], ipoint[3], c);
if ((y < n) && ((x < n) || (i == 0)))
line(ipoint[0], ipoint[1], ipoint[4], ipoint[5], c);
}
}
}
}
draw_player(r, g, b, project);
draw_badguys(r, g, b, project);
draw_bullets(r, g, b, project);
draw_explode(r, g, b, project);
}
}
solid_mode();
draw_message();
textprintf(font_video, 4, 4, makecol(128, 128, 128), "Lives: %d", lives);
textprintf(font_video, 4, 16, makecol(128, 128, 128), "Score: %d", score);
textprintf(font_video, 4, 28, makecol(128, 128, 128), "Hiscore: %d", get_hiscore());
al_flip_display();
}
| 4,639 |
335 | {
"word": "Fritz",
"definitions": [
"(of a machine) stop working properly."
],
"parts-of-speech": "Noun"
} | 60 |
10,876 | <gh_stars>1000+
{
"name": "monkeys-audio",
"version-string": "5.70",
"port-version": 1,
"description": [
"Monkey's Audio is an excellent audio compression tool which has multiple advantages over traditional methods.",
"Audio files compressed with it end with .ape extension."
],
"homepage": "https://monkeysaudio.com",
"supports": "!(uwp | osx | linux)",
"features": {
"tools": {
"description": "Build monkeys-audio tools"
}
}
}
| 162 |
852 | #ifndef ECALPSEUDOSTRIPINPUTSAMPLE_H
#define ECALPSEUDOSTRIPINPUTSAMPLE_H
#include <ostream>
#include <cstdint>
/** \class EcalPseudoStripInputSample
*/
class EcalPseudoStripInputSample {
public:
EcalPseudoStripInputSample();
EcalPseudoStripInputSample(uint16_t data);
EcalPseudoStripInputSample(int pseudoStripInput, bool finegrain);
///Set data
void setValue(uint16_t data) { theSample = data; }
/// get the raw word
uint16_t raw() const { return theSample; }
/// get the pseudoStrip Input amplitude (12 bits)
int pseudoStripInput() const { return theSample & 0xFFF; }
/// get the fine-grain bit (1 bit, the 13-th)
bool fineGrain() const { return (theSample & 0x1000) != 0; }
/// for streaming
uint16_t operator()() { return theSample; }
private:
uint16_t theSample;
};
std::ostream& operator<<(std::ostream& s, const EcalPseudoStripInputSample& samp);
#endif
| 332 |
1,564 | #!/usr/bin/env python
# Copyright (c) 2012 Stanford University
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR(S) DISCLAIM ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL AUTHORS BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""
This runs a LogCabin cluster and continually kills off the leader, timing how
long each leader election takes.
"""
from __future__ import print_function
from common import sh, captureSh, Sandbox, hosts
import re
import subprocess
import sys
import time
num_servers = 5
def same(seq):
for x in seq:
if x != seq[0]:
return False
return True
def await_stable_leader(sandbox, server_ids, after_term=0):
while True:
server_beliefs = {}
for server_id in server_ids:
server_beliefs[server_id] = {'leader': None,
'term': None,
'wake': None}
b = server_beliefs[server_id]
for line in open('debug/%d' % server_id):
m = re.search('All hail leader (\d+) for term (\d+)', line)
if m is not None:
b['leader'] = int(m.group(1))
b['term'] = int(m.group(2))
continue
m = re.search('Now leader for term (\d+)', line)
if m is not None:
b['leader'] = server_id
b['term'] = int(m.group(1))
continue
m = re.search('Running for election in term (\d+)', line)
if m is not None:
b['wake'] = int(m.group(1))
terms = [b['term'] for b in server_beliefs.values()]
leaders = [b['leader'] for b in server_beliefs.values()]
if same(terms) and terms[0] > after_term:
assert same(leaders), server_beliefs
return {'leader': leaders[0],
'term': terms[0],
'num_woken': sum([1 for b in server_beliefs.values() if b['wake'] > after_term])}
else:
time.sleep(.25)
sandbox.checkFailures()
with Sandbox() as sandbox:
sh('rm -f debug/*')
sh('mkdir -p debug')
server_ids = range(1, num_servers + 1)
servers = {}
def start(server_id):
host = hosts[server_id - 1]
command = 'build/LogCabin -i %d' % server_id
print('Starting LogCabin -i %d on %s' % (server_id, host[0]))
server = sandbox.rsh(host[0], command, bg=True,
stderr=open('debug/%d' % server_id, 'w'))
servers[server_id] = server
for server_id in server_ids:
start(server_id)
num_terms = []
num_woken = []
for i in range(100):
old = await_stable_leader(sandbox, server_ids)
print('Server %d is the leader in term %d' % (old['leader'], old['term']))
print('Killing server %d' % old['leader'])
sandbox.kill(servers[old['leader']])
servers.pop(old['leader'])
server_ids.remove(old['leader'])
new = await_stable_leader(sandbox, server_ids, after_term=old['term'])
print('Server %d is the leader in term %d' % (new['leader'], new['term']))
sandbox.checkFailures()
num_terms.append(new['term'] - old['term'])
print('Took %d terms to elect a new leader' % (new['term'] - old['term']))
num_woken.append(new['num_woken'])
print('%d servers woke up' % (new['num_woken']))
server_ids.append(old['leader'])
start(old['leader'])
num_terms.sort()
print('Num terms:',
file=sys.stderr)
print('\n'.join(['%d: %d' % (i + 1, term) for (i, term) in enumerate(num_terms)]),
file=sys.stderr)
num_woken.sort()
print('Num woken:',
file=sys.stderr)
print('\n'.join(['%d: %d' % (i + 1, n) for (i, n) in enumerate(num_woken)]),
file=sys.stderr)
| 2,040 |
7,482 | <filename>rt-thread/bsp/lpc824_blink/Libraries/peri_driver/uart/ring_buffer.h<gh_stars>1000+
/*
* @brief Common ring buffer support functions
*
* @note
* Copyright(C) NXP Semiconductors, 2012
* All rights reserved.
*
* @par
* Software that is described herein is for illustrative purposes only
* which provides customers with programming information regarding the
* LPC products. This software is supplied "AS IS" without any warranties of
* any kind, and NXP Semiconductors and its licensor disclaim any and
* all warranties, express or implied, including all implied warranties of
* merchantability, fitness for a particular purpose and non-infringement of
* intellectual property rights. NXP Semiconductors assumes no responsibility
* or liability for the use of the software, conveys no license or rights under any
* patent, copyright, mask work right, or any other intellectual property rights in
* or to any products. NXP Semiconductors reserves the right to make changes
* in the software without notification. NXP Semiconductors also makes no
* representation or warranty that such application will be suitable for the
* specified use without further testing or modification.
*
* @par
* Permission to use, copy, modify, and distribute this software and its
* documentation is hereby granted, under NXP Semiconductors' and its
* licensor's relevant copyrights in the software, without fee, provided that it
* is used in conjunction with NXP Semiconductors microcontrollers. This
* copyright, permission, and disclaimer notice must appear in all copies of
* this code.
*/
#ifndef __RING_BUFFER_H_
#define __RING_BUFFER_H_
#include <stdint.h>
#include <string.h>
#define RINGBUF_IRQ_SAFE
#ifdef RINGBUF_IRQ_SAFE
#include <cmsis.h>
#define INIT_CRITICAL() uint32_t priMask = __get_PRIMASK()
#define ENTER_CRITICAL() __set_PRIMASK(1)
#define LEAVE_CRITICAL() __set_PRIMASK(priMask)
#else
#define INIT_CRITICAL()
#define ENTER_CRITICAL()
#define LEAVE_CRITICAL()
#endif
typedef struct
{
uint8_t *pBuf;
uint32_t size;
uint32_t cnt;
uint32_t rNdx;
uint32_t wNdx;
} ring_buffer_t, RINGBUFF_T;
#ifndef MIN
#define MIN(x,y) ((x) < (y) ? (x) : (y))
#endif /* ifndef MIN */
/**
* @brief Create and initialize a ring buffer
* @param pRB : pointer to ring buffer instance
* @param pBuffer: pointer to the buffer for ring buffer data
* @param size: The size of buffer pointed by pBuffer
* @return >=0:Success ; <0:Failed
*/
int32_t RingBuf_Init(ring_buffer_t *pRB, uint8_t *pBuffer, uint32_t size);
/**
* @brief Write new data to buffer
* @param pRB : pointer to the ring buffer instance
* @param pcData: point to data array that will be written to ring buffer
* @param dataBytes: bytes to write
* @return >=0:Bytes written ; <0:Failed
* @remark This function updates the ring buffer
*/
int32_t RingBuf_Write(ring_buffer_t* pRB, const uint8_t *pcData, uint32_t dataBytes);
/**
* @brief Write 1 new byte data to buffer
* @param pRB : pointer to the ring buffer instance
* @param pcData: point to data byte that will be written to ring buffer
* @return 1:success; otherwise failed
* @remark This function updates the ring buffer. Optimized for byte-by-byte write
*/
int32_t RingBuf_Write1Byte(ring_buffer_t* pRB, const uint8_t *pcData);
/**
* @brief Read (copy and remove) data from ring buffer
* @param pRB : pointer to the ring buffer instance
* @param pData : pointer to data array that receives read data
* @param dataBytes: bytes to copy
* @return >=0:Bytes read ; <0:Failed
* @remark This function updates the ring buffer.
*/
int32_t RingBuf_Read(ring_buffer_t* pRB, uint8_t *pData, uint32_t dataBytes);
/**
* @brief Read (copy and remove) 1 oldest byte data from buffer
* @param pRB : pointer to the ring buffer instance
* @param pData: point to data byte that will receive the oldest byte
* @return 1:success ; otherwise failed
* @remark This function updates the ring buffer. Optimized for byte-by-byte read
*/
int32_t RingBuf_Read1Byte(ring_buffer_t* pRB, uint8_t *pData);
/**
* @brief Copy but does NOT remove data from ring buffer
* @param pRB : pointer to the ring buffer instance
* @param pData : pointer to data array that receives read data
* @param dataBytes: bytes to read
* @return >=0:Read bytes ; <0:Failed
*/
int32_t RingBuf_Copy(ring_buffer_t* pRB, uint8_t *pData, uint32_t dataBytes);
/**
* @brief Get data pointer to oldest byte in ring buffer, and contigous byte count
* @param pRB : pointer to the ring buffer instance
* @param ppData : pointer to pointer variable that will be updated to point to oldest byte
* @param contiguous_bytes: Congigous bytes until roll back
* @return >=0:Contiuous bytes until roll back or whole data (if roll back won't happen) ; <0:Failed
* @remak Use this function if performance is critical since it does NOT copy data
* Use RingBuf_Free() to free (remove) data after use
*/
int32_t RingBuf_Peek(ring_buffer_t* pRB, uint8_t **ppData);
/**
* @brief Free (remove) data from ring buffer
* @param pRB : pointer to the ring buffer instance
* @param bytesToFree : Bytes to free (remove)
* @remak Use this function to free data after data get from RingBuf_Peek() is used
*/
int32_t RingBuf_Free(ring_buffer_t* pRB, uint32_t bytesToFree);
/**
* @brief Get free bytes of ring buffer
* @param pRB : pointer to the ring buffer instance
* @return >=0:Free bytes ; <0:Failed
*/
int32_t RingBuf_GetFreeBytes(ring_buffer_t* pRB);
/**
* @brief Get free bytes of ring buffer
* @param pRB : pointer to the ring buffer instance
* @return >=0:Used bytes ; <0:Failed
*/
int32_t RingBuf_GetUsedBytes(ring_buffer_t* pRB);
/**
* @}
*/
#endif /* __RING_BUFFER_H_ */
| 1,863 |
456 | <reponame>medismailben/llvm-project
//===-- UnwindPlan.h --------------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
#ifndef liblldb_UnwindPlan_h
#define liblldb_UnwindPlan_h
#include <map>
#include <memory>
#include <vector>
#include "lldb/Core/AddressRange.h"
#include "lldb/Utility/ConstString.h"
#include "lldb/Utility/Stream.h"
#include "lldb/lldb-private.h"
namespace lldb_private {
// The UnwindPlan object specifies how to unwind out of a function - where this
// function saves the caller's register values before modifying them (for non-
// volatile aka saved registers) and how to find this frame's Canonical Frame
// Address (CFA) or Aligned Frame Address (AFA).
// CFA is a DWARF's Canonical Frame Address.
// Most commonly, registers are saved on the stack, offset some bytes from the
// Canonical Frame Address, or CFA, which is the starting address of this
// function's stack frame (the CFA is same as the eh_frame's CFA, whatever that
// may be on a given architecture). The CFA address for the stack frame does
// not change during the lifetime of the function.
// AFA is an artificially introduced Aligned Frame Address.
// It is used only for stack frames with realignment (e.g. when some of the
// locals has an alignment requirement higher than the stack alignment right
// after the function call). It is used to access register values saved on the
// stack after the realignment (and so they are inaccessible through the CFA).
// AFA usually equals the stack pointer value right after the realignment.
// Internally, the UnwindPlan is structured as a vector of register locations
// organized by code address in the function, showing which registers have been
// saved at that point and where they are saved. It can be thought of as the
// expanded table form of the DWARF CFI encoded information.
// Other unwind information sources will be converted into UnwindPlans before
// being added to a FuncUnwinders object. The unwind source may be an eh_frame
// FDE, a DWARF debug_frame FDE, or assembly language based prologue analysis.
// The UnwindPlan is the canonical form of this information that the unwinder
// code will use when walking the stack.
class UnwindPlan {
public:
class Row {
public:
class RegisterLocation {
public:
enum RestoreType {
unspecified, // not specified, we may be able to assume this
// is the same register. gcc doesn't specify all
// initial values so we really don't know...
undefined, // reg is not available, e.g. volatile reg
same, // reg is unchanged
atCFAPlusOffset, // reg = deref(CFA + offset)
isCFAPlusOffset, // reg = CFA + offset
atAFAPlusOffset, // reg = deref(AFA + offset)
isAFAPlusOffset, // reg = AFA + offset
inOtherRegister, // reg = other reg
atDWARFExpression, // reg = deref(eval(dwarf_expr))
isDWARFExpression // reg = eval(dwarf_expr)
};
RegisterLocation() : m_type(unspecified), m_location() {}
bool operator==(const RegisterLocation &rhs) const;
bool operator!=(const RegisterLocation &rhs) const {
return !(*this == rhs);
}
void SetUnspecified() { m_type = unspecified; }
void SetUndefined() { m_type = undefined; }
void SetSame() { m_type = same; }
bool IsSame() const { return m_type == same; }
bool IsUnspecified() const { return m_type == unspecified; }
bool IsUndefined() const { return m_type == undefined; }
bool IsCFAPlusOffset() const { return m_type == isCFAPlusOffset; }
bool IsAtCFAPlusOffset() const { return m_type == atCFAPlusOffset; }
bool IsAFAPlusOffset() const { return m_type == isAFAPlusOffset; }
bool IsAtAFAPlusOffset() const { return m_type == atAFAPlusOffset; }
bool IsInOtherRegister() const { return m_type == inOtherRegister; }
bool IsAtDWARFExpression() const { return m_type == atDWARFExpression; }
bool IsDWARFExpression() const { return m_type == isDWARFExpression; }
void SetAtCFAPlusOffset(int32_t offset) {
m_type = atCFAPlusOffset;
m_location.offset = offset;
}
void SetIsCFAPlusOffset(int32_t offset) {
m_type = isCFAPlusOffset;
m_location.offset = offset;
}
void SetAtAFAPlusOffset(int32_t offset) {
m_type = atAFAPlusOffset;
m_location.offset = offset;
}
void SetIsAFAPlusOffset(int32_t offset) {
m_type = isAFAPlusOffset;
m_location.offset = offset;
}
void SetInRegister(uint32_t reg_num) {
m_type = inOtherRegister;
m_location.reg_num = reg_num;
}
uint32_t GetRegisterNumber() const {
if (m_type == inOtherRegister)
return m_location.reg_num;
return LLDB_INVALID_REGNUM;
}
RestoreType GetLocationType() const { return m_type; }
int32_t GetOffset() const {
switch(m_type)
{
case atCFAPlusOffset:
case isCFAPlusOffset:
case atAFAPlusOffset:
case isAFAPlusOffset:
return m_location.offset;
default:
return 0;
}
}
void GetDWARFExpr(const uint8_t **opcodes, uint16_t &len) const {
if (m_type == atDWARFExpression || m_type == isDWARFExpression) {
*opcodes = m_location.expr.opcodes;
len = m_location.expr.length;
} else {
*opcodes = nullptr;
len = 0;
}
}
void SetAtDWARFExpression(const uint8_t *opcodes, uint32_t len);
void SetIsDWARFExpression(const uint8_t *opcodes, uint32_t len);
const uint8_t *GetDWARFExpressionBytes() {
if (m_type == atDWARFExpression || m_type == isDWARFExpression)
return m_location.expr.opcodes;
return nullptr;
}
int GetDWARFExpressionLength() {
if (m_type == atDWARFExpression || m_type == isDWARFExpression)
return m_location.expr.length;
return 0;
}
void Dump(Stream &s, const UnwindPlan *unwind_plan,
const UnwindPlan::Row *row, Thread *thread, bool verbose) const;
private:
RestoreType m_type; // How do we locate this register?
union {
// For m_type == atCFAPlusOffset or m_type == isCFAPlusOffset
int32_t offset;
// For m_type == inOtherRegister
uint32_t reg_num; // The register number
// For m_type == atDWARFExpression or m_type == isDWARFExpression
struct {
const uint8_t *opcodes;
uint16_t length;
} expr;
} m_location;
};
class FAValue {
public:
enum ValueType {
unspecified, // not specified
isRegisterPlusOffset, // FA = register + offset
isRegisterDereferenced, // FA = [reg]
isDWARFExpression, // FA = eval(dwarf_expr)
isRaSearch, // FA = SP + offset + ???
};
FAValue() : m_type(unspecified), m_value() {}
bool operator==(const FAValue &rhs) const;
bool operator!=(const FAValue &rhs) const { return !(*this == rhs); }
void SetUnspecified() { m_type = unspecified; }
bool IsUnspecified() const { return m_type == unspecified; }
void SetRaSearch(int32_t offset) {
m_type = isRaSearch;
m_value.ra_search_offset = offset;
}
bool IsRegisterPlusOffset() const {
return m_type == isRegisterPlusOffset;
}
void SetIsRegisterPlusOffset(uint32_t reg_num, int32_t offset) {
m_type = isRegisterPlusOffset;
m_value.reg.reg_num = reg_num;
m_value.reg.offset = offset;
}
bool IsRegisterDereferenced() const {
return m_type == isRegisterDereferenced;
}
void SetIsRegisterDereferenced(uint32_t reg_num) {
m_type = isRegisterDereferenced;
m_value.reg.reg_num = reg_num;
}
bool IsDWARFExpression() const { return m_type == isDWARFExpression; }
void SetIsDWARFExpression(const uint8_t *opcodes, uint32_t len) {
m_type = isDWARFExpression;
m_value.expr.opcodes = opcodes;
m_value.expr.length = len;
}
uint32_t GetRegisterNumber() const {
if (m_type == isRegisterDereferenced || m_type == isRegisterPlusOffset)
return m_value.reg.reg_num;
return LLDB_INVALID_REGNUM;
}
ValueType GetValueType() const { return m_type; }
int32_t GetOffset() const {
switch (m_type) {
case isRegisterPlusOffset:
return m_value.reg.offset;
case isRaSearch:
return m_value.ra_search_offset;
default:
return 0;
}
}
void IncOffset(int32_t delta) {
if (m_type == isRegisterPlusOffset)
m_value.reg.offset += delta;
}
void SetOffset(int32_t offset) {
if (m_type == isRegisterPlusOffset)
m_value.reg.offset = offset;
}
void GetDWARFExpr(const uint8_t **opcodes, uint16_t &len) const {
if (m_type == isDWARFExpression) {
*opcodes = m_value.expr.opcodes;
len = m_value.expr.length;
} else {
*opcodes = nullptr;
len = 0;
}
}
const uint8_t *GetDWARFExpressionBytes() {
if (m_type == isDWARFExpression)
return m_value.expr.opcodes;
return nullptr;
}
int GetDWARFExpressionLength() {
if (m_type == isDWARFExpression)
return m_value.expr.length;
return 0;
}
void Dump(Stream &s, const UnwindPlan *unwind_plan, Thread *thread) const;
private:
ValueType m_type; // How do we compute CFA value?
union {
struct {
// For m_type == isRegisterPlusOffset or m_type ==
// isRegisterDereferenced
uint32_t reg_num; // The register number
// For m_type == isRegisterPlusOffset
int32_t offset;
} reg;
// For m_type == isDWARFExpression
struct {
const uint8_t *opcodes;
uint16_t length;
} expr;
// For m_type == isRaSearch
int32_t ra_search_offset;
} m_value;
}; // class FAValue
public:
Row();
Row(const UnwindPlan::Row &rhs) = default;
bool operator==(const Row &rhs) const;
bool GetRegisterInfo(uint32_t reg_num,
RegisterLocation ®ister_location) const;
void SetRegisterInfo(uint32_t reg_num,
const RegisterLocation register_location);
void RemoveRegisterInfo(uint32_t reg_num);
lldb::addr_t GetOffset() const { return m_offset; }
void SetOffset(lldb::addr_t offset) { m_offset = offset; }
void SlideOffset(lldb::addr_t offset) { m_offset += offset; }
FAValue &GetCFAValue() { return m_cfa_value; }
FAValue &GetAFAValue() { return m_afa_value; }
bool SetRegisterLocationToAtCFAPlusOffset(uint32_t reg_num, int32_t offset,
bool can_replace);
bool SetRegisterLocationToIsCFAPlusOffset(uint32_t reg_num, int32_t offset,
bool can_replace);
bool SetRegisterLocationToUndefined(uint32_t reg_num, bool can_replace,
bool can_replace_only_if_unspecified);
bool SetRegisterLocationToUnspecified(uint32_t reg_num, bool can_replace);
bool SetRegisterLocationToRegister(uint32_t reg_num, uint32_t other_reg_num,
bool can_replace);
bool SetRegisterLocationToSame(uint32_t reg_num, bool must_replace);
void Clear();
void Dump(Stream &s, const UnwindPlan *unwind_plan, Thread *thread,
lldb::addr_t base_addr) const;
protected:
typedef std::map<uint32_t, RegisterLocation> collection;
lldb::addr_t m_offset; // Offset into the function for this row
FAValue m_cfa_value;
FAValue m_afa_value;
collection m_register_locations;
}; // class Row
public:
typedef std::shared_ptr<Row> RowSP;
UnwindPlan(lldb::RegisterKind reg_kind)
: m_row_list(), m_plan_valid_address_range(), m_register_kind(reg_kind),
m_return_addr_register(LLDB_INVALID_REGNUM), m_source_name(),
m_plan_is_sourced_from_compiler(eLazyBoolCalculate),
m_plan_is_valid_at_all_instruction_locations(eLazyBoolCalculate),
m_plan_is_for_signal_trap(eLazyBoolCalculate),
m_lsda_address(), m_personality_func_addr() {}
// Performs a deep copy of the plan, including all the rows (expensive).
UnwindPlan(const UnwindPlan &rhs)
: m_plan_valid_address_range(rhs.m_plan_valid_address_range),
m_register_kind(rhs.m_register_kind),
m_return_addr_register(rhs.m_return_addr_register),
m_source_name(rhs.m_source_name),
m_plan_is_sourced_from_compiler(rhs.m_plan_is_sourced_from_compiler),
m_plan_is_valid_at_all_instruction_locations(
rhs.m_plan_is_valid_at_all_instruction_locations),
m_lsda_address(rhs.m_lsda_address),
m_personality_func_addr(rhs.m_personality_func_addr) {
m_row_list.reserve(rhs.m_row_list.size());
for (const RowSP &row_sp : rhs.m_row_list)
m_row_list.emplace_back(new Row(*row_sp));
}
~UnwindPlan() = default;
void Dump(Stream &s, Thread *thread, lldb::addr_t base_addr) const;
void AppendRow(const RowSP &row_sp);
void InsertRow(const RowSP &row_sp, bool replace_existing = false);
// Returns a pointer to the best row for the given offset into the function's
// instructions. If offset is -1 it indicates that the function start is
// unknown - the final row in the UnwindPlan is returned. In practice, the
// UnwindPlan for a function with no known start address will be the
// architectural default UnwindPlan which will only have one row.
UnwindPlan::RowSP GetRowForFunctionOffset(int offset) const;
lldb::RegisterKind GetRegisterKind() const { return m_register_kind; }
void SetRegisterKind(lldb::RegisterKind kind) { m_register_kind = kind; }
void SetReturnAddressRegister(uint32_t regnum) {
m_return_addr_register = regnum;
}
uint32_t GetReturnAddressRegister(void) { return m_return_addr_register; }
uint32_t GetInitialCFARegister() const {
if (m_row_list.empty())
return LLDB_INVALID_REGNUM;
return m_row_list.front()->GetCFAValue().GetRegisterNumber();
}
// This UnwindPlan may not be valid at every address of the function span.
// For instance, a FastUnwindPlan will not be valid at the prologue setup
// instructions - only in the body of the function.
void SetPlanValidAddressRange(const AddressRange &range);
const AddressRange &GetAddressRange() const {
return m_plan_valid_address_range;
}
bool PlanValidAtAddress(Address addr);
bool IsValidRowIndex(uint32_t idx) const;
const UnwindPlan::RowSP GetRowAtIndex(uint32_t idx) const;
const UnwindPlan::RowSP GetLastRow() const;
lldb_private::ConstString GetSourceName() const;
void SetSourceName(const char *);
// Was this UnwindPlan emitted by a compiler?
lldb_private::LazyBool GetSourcedFromCompiler() const {
return m_plan_is_sourced_from_compiler;
}
// Was this UnwindPlan emitted by a compiler?
void SetSourcedFromCompiler(lldb_private::LazyBool from_compiler) {
m_plan_is_sourced_from_compiler = from_compiler;
}
// Is this UnwindPlan valid at all instructions? If not, then it is assumed
// valid at call sites, e.g. for exception handling.
lldb_private::LazyBool GetUnwindPlanValidAtAllInstructions() const {
return m_plan_is_valid_at_all_instruction_locations;
}
// Is this UnwindPlan valid at all instructions? If not, then it is assumed
// valid at call sites, e.g. for exception handling.
void SetUnwindPlanValidAtAllInstructions(
lldb_private::LazyBool valid_at_all_insn) {
m_plan_is_valid_at_all_instruction_locations = valid_at_all_insn;
}
// Is this UnwindPlan for a signal trap frame? If so, then its saved pc
// may have been set manually by the signal dispatch code and therefore
// not follow a call to the child frame.
lldb_private::LazyBool GetUnwindPlanForSignalTrap() const {
return m_plan_is_for_signal_trap;
}
void SetUnwindPlanForSignalTrap(lldb_private::LazyBool is_for_signal_trap) {
m_plan_is_for_signal_trap = is_for_signal_trap;
}
int GetRowCount() const;
void Clear() {
m_row_list.clear();
m_plan_valid_address_range.Clear();
m_register_kind = lldb::eRegisterKindDWARF;
m_source_name.Clear();
m_plan_is_sourced_from_compiler = eLazyBoolCalculate;
m_plan_is_valid_at_all_instruction_locations = eLazyBoolCalculate;
m_plan_is_for_signal_trap = eLazyBoolCalculate;
m_lsda_address.Clear();
m_personality_func_addr.Clear();
}
const RegisterInfo *GetRegisterInfo(Thread *thread, uint32_t reg_num) const;
Address GetLSDAAddress() const { return m_lsda_address; }
void SetLSDAAddress(Address lsda_addr) { m_lsda_address = lsda_addr; }
Address GetPersonalityFunctionPtr() const { return m_personality_func_addr; }
void SetPersonalityFunctionPtr(Address presonality_func_ptr) {
m_personality_func_addr = presonality_func_ptr;
}
private:
typedef std::vector<RowSP> collection;
collection m_row_list;
AddressRange m_plan_valid_address_range;
lldb::RegisterKind m_register_kind; // The RegisterKind these register numbers
// are in terms of - will need to be
// translated to lldb native reg nums at unwind time
uint32_t m_return_addr_register; // The register that has the return address
// for the caller frame
// e.g. the lr on arm
lldb_private::ConstString
m_source_name; // for logging, where this UnwindPlan originated from
lldb_private::LazyBool m_plan_is_sourced_from_compiler;
lldb_private::LazyBool m_plan_is_valid_at_all_instruction_locations;
lldb_private::LazyBool m_plan_is_for_signal_trap;
Address m_lsda_address; // Where the language specific data area exists in the
// module - used
// in exception handling.
Address m_personality_func_addr; // The address of a pointer to the
// personality function - used in
// exception handling.
}; // class UnwindPlan
} // namespace lldb_private
#endif // liblldb_UnwindPlan_h
| 7,684 |
2,208 | <gh_stars>1000+
// declaring the methods that can be used from the pyrexwrapper
// CHECKME: are those 'extern's necessary?
extern void initCartPole(int markov_, int numPoles_, int maxsteps_);
extern void reset();
extern unsigned int getObservationDimension();
extern void echoParams();
extern void getObservation(double * input);
extern void doAction(double * output);
extern int trialFinished();
extern double getReward();
| 126 |
809 | <reponame>nikitavlaev/embox<gh_stars>100-1000
/**
* @file mmc.c
* @brief MMC-specific stuff (i.e. non-SD, non-SDIO cards)
* @author <NAME> <<EMAIL>>
* @version
* @date 05.11.2019
*/
#include <assert.h>
#include <drivers/block_dev.h>
#include <drivers/mmc/mmc.h>
#include <drivers/mmc/mmc_core.h>
#include <drivers/mmc/mmc_host.h>
#include <util/log.h>
int mmc_try_mmc(struct mmc_host *host) {
uint32_t resp[4];
uint64_t size;
mmc_send_cmd(host, 55, 0, MMC_RSP_R1, resp);
#define VOLTAGE_WINDOW_MMC 0x00FF8080 /* Taken from u-boot */
mmc_send_cmd(host, 41, VOLTAGE_WINDOW_MMC & 0xff8000, MMC_RSP_R3, resp);
mmc_send_cmd(host, 1, 0, 0, resp);
/* Send CID (get manifacture ID etc.) */
mmc_send_cmd(host, 2, 0, MMC_RSP_R2, resp);
mmc_dump_cid(resp);
/* CMD3 sets stand-by mode */
mmc_send_cmd(host, 3, 0, MMC_RSP_R6, resp);
host->rca = resp[0] >> 16;
log_debug("MMC RCA: %04x", host->rca);
/* Send CSD (get device size and so on) */
/* TODO setup RCA for MMC cards? */
mmc_send_cmd(host, 9, host->rca << 16, MMC_RSP_R2, resp);
log_debug("MMC CSD: %08x %08x %08x %08x",
resp[0], resp[1], resp[2], resp[3]);
if ((resp[0] | resp[1] | resp[2] | resp[3]) == 0) {
return -1;
}
if (!(resp[0] & 0x40000000)) {
host->high_capacity = 1;
size = ((resp[1] >> 8) & 0x3) << 10;
size |= (resp[1] & 0xFF) << 2;
size = 256 * 1024 * (size + 1);
log_debug("Size = %lld bytes (High-Capacity SD)", size);
} else {
host->high_capacity = 0;
size = (resp[1] & 0xFF) << 16;
size |= ((resp[2] >> 24) & 0xFF) << 8;
size |= (resp[2] >> 16) & 0xFF;
size = 512 * 1024 * (size + 1);
log_debug("Size = %lld bytes (Standart Capacity SD)", size);
}
assert(host->bdev);
host->bdev->size = size;
host->bdev->block_size = 512;
mmc_send_cmd(host, 7, host->rca << 16, 0, resp);
return 0;
}
| 854 |
831 | /*
* Copyright (C) 2018 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.android.tools.idea.gradle.project.sync.idea.data;
import static com.android.tools.idea.Projects.getBaseDirPath;
import static com.android.tools.idea.testing.AndroidGradleTestUtilsKt.openPreparedProject;
import static com.android.tools.idea.testing.AndroidGradleTestUtilsKt.prepareGradleProject;
import static com.intellij.openapi.application.ActionsKt.runWriteAction;
import static com.intellij.openapi.project.Project.DIRECTORY_STORE_FOLDER;
import com.android.tools.idea.projectsystem.ProjectSystemService;
import com.android.tools.idea.projectsystem.ProjectSystemSyncManager;
import com.android.tools.idea.testing.AndroidGradleTestCase;
import com.android.tools.idea.testing.TestProjectPaths;
import com.intellij.openapi.roots.OrderRootType;
import com.intellij.openapi.roots.libraries.LibraryTablesRegistrar;
import com.intellij.openapi.vfs.VfsUtilCore;
import com.intellij.openapi.vfs.VirtualFile;
import com.intellij.util.containers.ContainerUtil;
import java.io.File;
import java.io.IOException;
import java.util.Arrays;
import java.util.List;
public class IdeaSyncCachesTest extends AndroidGradleTestCase {
private IdeaSyncCachesInvalidator myInvalidator;
@Override
public void setUp() throws Exception {
super.setUp();
myInvalidator = new IdeaSyncCachesInvalidator();
}
public void testCacheIsInvalidated() {
prepareGradleProject(this, TestProjectPaths.SIMPLE_APPLICATION, "project");
openPreparedProject(this, "project", project -> {
assertEquals(ProjectSystemSyncManager.SyncResult.SUCCESS,
ProjectSystemService.getInstance(project).getProjectSystem().getSyncManager().getLastSyncResult());
return null;
});
openPreparedProject(this, "project", project -> {
assertEquals(ProjectSystemSyncManager.SyncResult.SKIPPED,
ProjectSystemService.getInstance(project).getProjectSystem().getSyncManager().getLastSyncResult());
myInvalidator.invalidateCaches();
return null;
});
openPreparedProject(this, "project", project -> {
assertEquals(ProjectSystemSyncManager.SyncResult.SUCCESS,
ProjectSystemService.getInstance(project).getProjectSystem().getSyncManager().getLastSyncResult());
return null;
});
}
public void testMissingJarTriggersSync() {
prepareGradleProject(this, TestProjectPaths.SIMPLE_APPLICATION, "project");
openPreparedProject(this, "project", project -> {
assertEquals(ProjectSystemSyncManager.SyncResult.SUCCESS,
ProjectSystemService.getInstance(project).getProjectSystem().getSyncManager().getLastSyncResult());
return null;
});
List<VirtualFile> lifecycleLiveDataLibraryPaths = openPreparedProject(this, "project", project -> {
assertEquals(ProjectSystemSyncManager.SyncResult.SKIPPED,
ProjectSystemService.getInstance(project).getProjectSystem().getSyncManager().getLastSyncResult());
return
ContainerUtil
.map(
Arrays.stream(LibraryTablesRegistrar.getInstance().getLibraryTable(project).getLibraries())
.filter(it -> it.getName().startsWith("Gradle: android.arch.lifecycle:livedata:"))
.findAny()
.get()
.getFiles(OrderRootType.CLASSES),
it -> {
VirtualFile file = VfsUtilCore.getVirtualFileForJar(it);
if (file == null) file = it;
return file;
});
});
deleteLibraryFilesFromGradleCache(lifecycleLiveDataLibraryPaths);
openPreparedProject(this, "project", project -> {
assertEquals(ProjectSystemSyncManager.SyncResult.SUCCESS,
ProjectSystemService.getInstance(project).getProjectSystem().getSyncManager().getLastSyncResult());
return null;
});
}
private void deleteLibraryFilesFromGradleCache(List<VirtualFile> lifecycleLiveDataLibraryPaths) {
assertFalse(lifecycleLiveDataLibraryPaths.isEmpty());
// Delete all CLASSES files from the Gradle cache. When a library expires in the Gradle cache all files are deleted.
runWriteAction(() ->{
lifecycleLiveDataLibraryPaths.forEach(file -> {
try {
file.delete(this);
}
catch (IOException e) {
fail(e.getMessage());
}
});
return null;
});
}
public void testLibrariesFolderIsDeleted() throws Exception {
loadSimpleApplication();
// Create .idea/libraries folder under project folder.
File ideaFolderPath = new File(getBaseDirPath(getProject()), DIRECTORY_STORE_FOLDER);
File librariesFolderPath = new File(ideaFolderPath, "libraries");
assertTrue(librariesFolderPath.mkdirs());
// Verify that libraries folder exists.
assertExists(librariesFolderPath);
// Verify that after invalidating cache, libraries folder is deleted.
myInvalidator.invalidateCaches();
assertDoesntExist(librariesFolderPath);
}
}
| 1,929 |
1,059 | #include <tilck/kernel/sync.h>
#include <tilck/kernel/sched.h>
void __wrap_kmutex_lock(struct kmutex *m) {
ASSERT(m->owner_task == NULL);
m->owner_task = get_curr_task();
}
void __wrap_kmutex_unlock(struct kmutex *m) {
ASSERT(m->owner_task == get_curr_task());
m->owner_task = NULL;
}
| 132 |
1,350 | <reponame>Manny27nyc/azure-sdk-for-java<filename>sdk/synapse/azure-analytics-synapse-artifacts/src/main/java/com/azure/analytics/synapse/artifacts/models/SparkScheduler.java<gh_stars>1000+
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
// Code generated by Microsoft (R) AutoRest Code Generator.
package com.azure.analytics.synapse.artifacts.models;
import com.azure.core.annotation.Fluent;
import com.fasterxml.jackson.annotation.JsonProperty;
import java.time.OffsetDateTime;
/** The SparkScheduler model. */
@Fluent
public final class SparkScheduler {
/*
* The submittedAt property.
*/
@JsonProperty(value = "submittedAt")
private OffsetDateTime submittedAt;
/*
* The scheduledAt property.
*/
@JsonProperty(value = "scheduledAt")
private OffsetDateTime scheduledAt;
/*
* The endedAt property.
*/
@JsonProperty(value = "endedAt")
private OffsetDateTime endedAt;
/*
* The cancellationRequestedAt property.
*/
@JsonProperty(value = "cancellationRequestedAt")
private OffsetDateTime cancellationRequestedAt;
/*
* The currentState property.
*/
@JsonProperty(value = "currentState")
private SchedulerCurrentState currentState;
/**
* Get the submittedAt property: The submittedAt property.
*
* @return the submittedAt value.
*/
public OffsetDateTime getSubmittedAt() {
return this.submittedAt;
}
/**
* Set the submittedAt property: The submittedAt property.
*
* @param submittedAt the submittedAt value to set.
* @return the SparkScheduler object itself.
*/
public SparkScheduler setSubmittedAt(OffsetDateTime submittedAt) {
this.submittedAt = submittedAt;
return this;
}
/**
* Get the scheduledAt property: The scheduledAt property.
*
* @return the scheduledAt value.
*/
public OffsetDateTime getScheduledAt() {
return this.scheduledAt;
}
/**
* Set the scheduledAt property: The scheduledAt property.
*
* @param scheduledAt the scheduledAt value to set.
* @return the SparkScheduler object itself.
*/
public SparkScheduler setScheduledAt(OffsetDateTime scheduledAt) {
this.scheduledAt = scheduledAt;
return this;
}
/**
* Get the endedAt property: The endedAt property.
*
* @return the endedAt value.
*/
public OffsetDateTime getEndedAt() {
return this.endedAt;
}
/**
* Set the endedAt property: The endedAt property.
*
* @param endedAt the endedAt value to set.
* @return the SparkScheduler object itself.
*/
public SparkScheduler setEndedAt(OffsetDateTime endedAt) {
this.endedAt = endedAt;
return this;
}
/**
* Get the cancellationRequestedAt property: The cancellationRequestedAt property.
*
* @return the cancellationRequestedAt value.
*/
public OffsetDateTime getCancellationRequestedAt() {
return this.cancellationRequestedAt;
}
/**
* Set the cancellationRequestedAt property: The cancellationRequestedAt property.
*
* @param cancellationRequestedAt the cancellationRequestedAt value to set.
* @return the SparkScheduler object itself.
*/
public SparkScheduler setCancellationRequestedAt(OffsetDateTime cancellationRequestedAt) {
this.cancellationRequestedAt = cancellationRequestedAt;
return this;
}
/**
* Get the currentState property: The currentState property.
*
* @return the currentState value.
*/
public SchedulerCurrentState getCurrentState() {
return this.currentState;
}
/**
* Set the currentState property: The currentState property.
*
* @param currentState the currentState value to set.
* @return the SparkScheduler object itself.
*/
public SparkScheduler setCurrentState(SchedulerCurrentState currentState) {
this.currentState = currentState;
return this;
}
}
| 1,505 |
1,585 | /* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil -*- */
/*
* Copyright (c) 2004-2005 The Trustees of Indiana University and Indiana
* University Research and Technology
* Corporation. All rights reserved.
* Copyright (c) 2004-2005 The University of Tennessee and The University
* of Tennessee Research Foundation. All rights
* reserved.
* Copyright (c) 2004-2005 High Performance Computing Center Stuttgart,
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
* Copyright (c) 2011-2012 Cisco Systems, Inc. All rights reserved.
* Copyright (c) 2013 Los Alamos National Security, LLC. All rights
* reserved.
* Copyright (c) 2015-2021 Research Organization for Information Science
* and Technology (RIST). All rights reserved.
* Copyright (c) 2018 FUJITSU LIMITED. All rights reserved.
* $COPYRIGHT$
*
* Additional copyrights may follow
*
* $HEADER$
*/
#include "ompi_config.h"
#include "ompi/mpi/fortran/mpif-h/bindings.h"
#include "ompi/mpi/fortran/base/constants.h"
#if OMPI_BUILD_MPI_PROFILING
#if OPAL_HAVE_WEAK_SYMBOLS
#pragma weak PMPI_NEIGHBOR_ALLTOALL_INIT = ompi_neighbor_alltoall_init_f
#pragma weak pmpi_neighbor_alltoall_init = ompi_neighbor_alltoall_init_f
#pragma weak pmpi_neighbor_alltoall_init_ = ompi_neighbor_alltoall_init_f
#pragma weak pmpi_neighbor_alltoall_init__ = ompi_neighbor_alltoall_init_f
#pragma weak PMPI_Neighbor_alltoall_init_f = ompi_neighbor_alltoall_init_f
#pragma weak PMPI_Neighbor_alltoall_init_f08 = ompi_neighbor_alltoall_init_f
#else
OMPI_GENERATE_F77_BINDINGS (PMPI_NEIGHBOR_ALLTOALL_INIT,
pmpi_neighbor_alltoall_init,
pmpi_neighbor_alltoall_init_,
pmpi_neighbor_alltoall_init__,
pompi_neighbor_alltoall_init_f,
(char *sendbuf, MPI_Fint *sendcount, MPI_Fint *sendtype, char *recvbuf, MPI_Fint *recvcount, MPI_Fint *recvtype, MPI_Fint *comm, MPI_Fint *info, MPI_Fint *request, MPI_Fint *ierr),
(sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype, comm, info, request, ierr) )
#endif
#endif
#if OPAL_HAVE_WEAK_SYMBOLS
#pragma weak MPI_NEIGHBOR_ALLTOALL_INIT = ompi_neighbor_alltoall_init_f
#pragma weak mpi_neighbor_alltoall_init = ompi_neighbor_alltoall_init_f
#pragma weak mpi_neighbor_alltoall_init_ = ompi_neighbor_alltoall_init_f
#pragma weak mpi_neighbor_alltoall_init__ = ompi_neighbor_alltoall_init_f
#pragma weak MPI_Neighbor_alltoall_init_f = ompi_neighbor_alltoall_init_f
#pragma weak MPI_Neighbor_alltoall_init_f08 = ompi_neighbor_alltoall_init_f
#else
#if ! OMPI_BUILD_MPI_PROFILING
OMPI_GENERATE_F77_BINDINGS (MPI_NEIGHBOR_ALLTOALL_INIT,
mpi_neighbor_alltoall_init,
mpi_neighbor_alltoall_init_,
mpi_neighbor_alltoall_init__,
ompi_neighbor_alltoall_init_f,
(char *sendbuf, MPI_Fint *sendcount, MPI_Fint *sendtype, char *recvbuf, MPI_Fint *recvcount, MPI_Fint *recvtype, MPI_Fint *comm, MPI_Fint *info, MPI_Fint *request, MPI_Fint *ierr),
(sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype, comm, info, request, ierr) )
#else
#define ompi_neighbor_alltoall_init_f pompi_neighbor_alltoall_init_f
#endif
#endif
void ompi_neighbor_alltoall_init_f(char *sendbuf, MPI_Fint *sendcount, MPI_Fint *sendtype,
char *recvbuf, MPI_Fint *recvcount, MPI_Fint *recvtype,
MPI_Fint *comm, MPI_Fint *info, MPI_Fint *request, MPI_Fint *ierr)
{
int c_ierr;
MPI_Comm c_comm;
MPI_Info c_info;
MPI_Request c_req;
MPI_Datatype c_sendtype, c_recvtype;
c_comm = PMPI_Comm_f2c(*comm);
c_sendtype = PMPI_Type_f2c(*sendtype);
c_recvtype = PMPI_Type_f2c(*recvtype);
c_info = PMPI_Info_f2c(*info);
sendbuf = (char *) OMPI_F2C_IN_PLACE(sendbuf);
sendbuf = (char *) OMPI_F2C_BOTTOM(sendbuf);
recvbuf = (char *) OMPI_F2C_BOTTOM(recvbuf);
c_ierr = PMPI_Neighbor_alltoall_init(sendbuf,
OMPI_FINT_2_INT(*sendcount),
c_sendtype,
recvbuf,
OMPI_FINT_2_INT(*recvcount),
c_recvtype, c_comm, c_info, &c_req);
if (NULL != ierr) *ierr = OMPI_INT_2_FINT(c_ierr);
if (MPI_SUCCESS == c_ierr) *request = PMPI_Request_c2f(c_req);
}
| 2,566 |
388 | <reponame>ci-fuzz/cppcms
///////////////////////////////////////////////////////////////////////////////
//
// Copyright (C) 2008-2012 <NAME> (Tonkikh) <<EMAIL>>
//
// See accompanying file COPYING.TXT file for licensing details.
//
///////////////////////////////////////////////////////////////////////////////
#include <cppcms/service.h>
#include <cppcms/http_response.h>
#include <cppcms/json.h>
#include <cppcms/cache_interface.h>
#include <cppcms/url_mapper.h>
#include <cppcms/config.h>
#include <cppcms/view.h>
#include <string>
#include <vector>
#include <iomanip>
#include "dummy_api.h"
#include "test.h"
#ifndef CPPCMS_NO_GZIP
#include <zlib.h>
#endif
#include <iomanip>
#include <sstream>
void compare_strings(std::string const &l,std::string const &r,int file_line)
{
if(l==r) {
//std::cerr << "[" << l << "] == [" << r << "]" << " at " << file_line << " OK !" << std::endl;
return;
}
/*
size_t m = l.size();
if(r.size() > m) m = r.size();
int line = 1;
for(size_t i=0;i<m;i++) {
std::string lstr = conv(l,i);
std::string rstr = conv(r,i);
if(lstr=="\\n")
line++;
std::cerr << std::setw(4) << line << " [" << lstr << '|' << rstr << "] ";
if(lstr!=rstr)
std::cerr << "<----------" << std::endl;
else
std::cerr << std::endl;
}*/
std::cerr << "[" << l << "]!=[" << r << "]" << " at " << file_line << std::endl;
throw std::runtime_error("Failed test");
}
std::string remove_brakets(std::string const &in)
{
std::string r;
for(size_t i=0;i<in.size();i++)
if(in[i]!='[' && in[i]!=']')
r+= in[i];
return r;
}
#define TEQ(l,r) compare_strings(l,r,__LINE__)
#define TEQC(l,r) compare_strings(remove_brakets(l),remove_brakets(r),__LINE__)
class test_app : public cppcms::application {
public:
test_app(cppcms::service &srv) :
cppcms::application(srv),
srv_(srv)
{
mapper().assign("foo","/foo");
mapper().assign("/");
mapper().assign("/{1}");
mapper().assign("/{1}/{2}");
}
~test_app()
{
release_context();
}
void set_context(bool mark_chunks=false,bool mark_eof=false)
{
std::map<std::string,std::string> env;
env["HTTP_HOST"]="www.example.com";
env["SCRIPT_NAME"]="/foo";
env["PATH_INFO"]="/bar";
env["REQUEST_METHOD"]="GET";
env["HTTP_ACCEPT_ENCODING"]="gzip";
booster::shared_ptr<dummy_api> api(new dummy_api(srv_,env,output_,mark_chunks,mark_eof));
booster::shared_ptr<cppcms::http::context> cnt(new cppcms::http::context(api));
assign_context(cnt);
response().io_mode(cppcms::http::response::normal);
output_.clear();
}
std::string str()
{
std::string result = output_;
output_.clear();
return result;
}
void test_buffer_size(bool async)
{
std::cout << "- Test setbuf/flush " << (async ? "async" : "sync")<< std::endl;
set_context(true,true);
if(async) {
response().io_mode(cppcms::http::response::asynchronous);
}
else {
response().io_mode(cppcms::http::response::nogzip);
}
response().full_asynchronous_buffering(false);
response().out();
response().setbuf(0);
str();
response().out() << "x";
TEQ(str(),"[x]");
response().out() << 123;
TEQC(str(),"[123]");
response().setbuf(4);
response().out() << "abcdefg";
TEQ(str(),"[abcdefg]");
response().out() << 124;
TEQ(str(),"");
response().out() << std::flush;
TEQ(str(),"[124]");
response().out() << '0';
TEQ(str(),"");
response().out() << '1';
TEQ(str(),"");
response().out() << '2';
TEQ(str(),"");
response().out() << '3';
TEQ(str(),"");
response().out() << '4';
TEQ(str(),"[01234]");
response().out() << "xxx";
response().setbuf(0);
TEQ(str(),"[xxx]");
if(async) {
response().setbuf(4);
std::cout<< "-- fully/partially buffered mode" << std::endl;
response().full_asynchronous_buffering(true);
response().out() << "12345678";
TEQ(str(),"");
response().out() << std::flush;
TEQ(str(),"");
response().full_asynchronous_buffering(false);
TEQ(str(),"[12345678]");
response().full_asynchronous_buffering(true);
response().out() << "123";
TEQ(str(),"");
response().full_asynchronous_buffering(false);
response().out() << std::flush;
TEQ(str(),"[123]");
}
response().finalize();
TEQ(str(),"[EOF]");
}
void reset_context(bool async,bool gzip,bool set_cache)
{
set_context(false,false);
if(async) {
response().io_mode(cppcms::http::response::asynchronous);
response().full_asynchronous_buffering(false);
}
else {
if(!gzip)
response().io_mode(cppcms::http::response::nogzip);
}
if(set_cache)
cache().fetch_page("none");
response().out();
str();
}
void test_io_error(bool async,bool gzip,bool cached)
{
std::cout << "- Test I/O errors " << (async ? "async" : "sync")
<< (gzip ? " gzip" : "") << (cached ? " cached" : "") << std::endl;
reset_context(async,gzip,cached);
response().setbuf(0);
response().out() << "XXXXXXXXXXXXXXXX";
TEST(response().out());
response().out() << std::flush;
TEST(response().out());
#ifndef CPPCMS_NO_GZIP
if(gzip) {
zsinit();
TEST(zstr() == "XXXXXXXXXXXXXXXX");
zs_done = true;
zsdone();
}
#endif
output_="$$$ERROR$$$";
if(gzip || cached) {
response().out() << "x" << std::flush;
}
else {
response().out() << "x";
}
TEST(!response().out());
reset_context(async,gzip,cached);
response().setbuf(1024);
response().out() << "XXXXXXXXXXXXXXXX";
TEST(response().out());
response().out() << std::flush;
TEST(response().out());
output_="$$$ERROR$$$";
int i;
std::cout << "--- error on stream... " << std::flush;
for(i=0;i<=100000;i++) {
response().out() << std::setw(9) << i << '\n';
if(!response().out())
break;
}
TEST(i<100000);
std::cout << "Detected after " << (i*10) << " bytes " << std::endl;
if(async) {
reset_context(async,gzip,cached);
response().setbuf(1024);
response().out() << "xx" << std::flush;
std::cout << "--- block on stream... " << std::flush;
output_="$$$BLOCK$$$";
int i;
for(i=0;i<=100000;i++) {
response().out() << std::setw(9) << i << '\n';
TEST(response().out());
if(response().pending_blocked_output())
break;
}
TEST(i<100000);
std::cout << "Detected after " << (i*10) << " bytes " << std::endl;
}
}
#ifndef CPPCMS_NO_GZIP
z_stream zs;
bool zs_done;
std::string total;
std::string totalz;
void zsinit()
{
memset(&zs,0,sizeof(zs));
inflateInit2(&zs,15+16);
zs_done = false;
total.clear();
totalz.clear();
}
void zsdone()
{
TEST(zs_done == true);
inflateEnd(&zs);
}
std::string zstr()
{
std::vector<char> out(4096);
std::string s = str();
totalz += s;
/*for(size_t i=0;i<totalz.size();i++) {
std::cout << std::setw(2) <<std::hex<< unsigned((unsigned char)totalz[i]);
}
std::cout << "->[" << total <<"]" << std::endl;
*/
if(zs_done) {
TEST(s.empty());
return std::string();
}
if(s.empty()) {
return std::string();
}
zs.avail_in = s.size();
zs.next_in = (Bytef*)s.c_str();
zs.avail_out = out.size();
zs.next_out = (Bytef*)&out[0];
int r = inflate(&zs,0);
TEST(r==0 || r==Z_STREAM_END);
std::string output;
output.assign(&out[0],out.size() - zs.avail_out);
if(r == Z_STREAM_END)
zs_done = true;
total += output;
return output;
}
void test_gzipped(bool cached)
{
std::cout << "- Test gzip setbuf/flush " << (cached ? "cached" : "non cached")<< std::endl;
set_context(false,false);
if(cached) {
cache().fetch_page("none");
}
response().out();
std::string temp = str();
TEST(temp.find("\r\n\r\n")==temp.size()-4);
zsinit();
response().out() << "message";
TEQ(zstr(),"");
response().out() << std::flush;
TEQ(zstr(),"message");
response().setbuf(0);
response().out() << "ABCD" << std::flush;
TEQ(zstr(),"ABCD");
response().setbuf(2);
response().out() << "XYZ" << std::flush;
TEQ(zstr(),"XYZ");
response().setbuf(1024);
response().out() << "11111111111111111111111111111111111" << std::flush;
TEQ(zstr(),"11111111111111111111111111111111111");
response().out() << "x";
response().finalize();
TEQ(zstr(),"x");
zsdone();
std::string ztmp = totalz;
std::string tmp = total;
zsinit();
output_ = ztmp;
TEST(tmp == zstr());
zsdone();
if(cached) {
cache().store_page("p");
std::string ztmp2;
TEST(cache().fetch_frame("_Z:p",ztmp2,true));
TEST(ztmp2 == ztmp);
cache().rise("p");
}
}
#endif
private:
std::string output_;
cppcms::service &srv_;
};
int main()
{
try {
cppcms::json::value cfg;
cfg["cache"]["backend"]="thread_shared";
cfg["cache"]["limit"]=100;
cppcms::service srv(cfg);
test_app app(srv);
app.test_buffer_size(false);
app.test_buffer_size(true);
//void test_io_error(bool async,bool gzip,bool cached)
app.test_io_error(false,false,false);
app.test_io_error(false,true,false);
app.test_io_error(false,false,true);
app.test_io_error(false,true,true);
app.test_io_error(true,false,false);
app.test_io_error(true,false,true);
#ifndef CPPCMS_NO_GZIP
app.test_gzipped(false);
app.test_gzipped(true);
#endif
}
catch(std::exception const &e)
{
std::cerr << "Fail " << e.what() << std::endl;
return 1;
}
std::cout << "Ok" << std::endl;
return 0;
}
| 4,282 |
1,405 | <reponame>jarekankowski/pegasus_spyware<filename>sample4/recompiled_java/sources/tms/al.java
package tms;
import android.app.PendingIntent;
import android.content.Context;
import android.content.Intent;
import android.net.Uri;
import android.provider.Settings;
import com.tencent.lbsapi.QLBSService;
import com.tencent.tmsecure.common.BaseManager;
import com.tencent.tmsecure.module.antitheft.IAntitheftTips;
import com.tencent.tmsecure.utils.Log;
public final class al extends BaseManager {
private Context a;
private IAntitheftTips b;
private ao c;
private QLBSService d;
private boolean b() {
String string = Settings.Secure.getString(this.a.getContentResolver(), "location_providers_allowed");
if (string != null && string.contains("gps")) {
return true;
}
Intent intent = new Intent();
intent.setClassName("com.android.settings", "com.android.settings.widget.SettingsAppWidgetProvider");
intent.addCategory("android.intent.category.ALTERNATIVE");
intent.setData(Uri.parse("custom:3"));
try {
PendingIntent.getBroadcast(this.a, 0, intent, 0).send();
return true;
} catch (PendingIntent.CanceledException e) {
Log.i("AntitheftManagerImpl", e.getMessage());
return false;
}
}
static /* synthetic */ void c(al alVar) {
alVar.d.release();
alVar.d = null;
}
public final ao a() {
return this.c;
}
/* JADX WARNING: Removed duplicated region for block: B:14:0x0044 */
/* JADX WARNING: Removed duplicated region for block: B:16:0x0047 */
/* JADX WARNING: Removed duplicated region for block: B:27:0x0085 */
/* JADX WARNING: Removed duplicated region for block: B:54:? A[RETURN, SYNTHETIC] */
/* Code decompiled incorrectly, please refer to instructions dump. */
public final boolean a(com.tencent.tmsecure.module.antitheft.IAntitheftTips r8, java.lang.String r9, java.lang.String r10) {
/*
// Method dump skipped, instructions count: 374
*/
throw new UnsupportedOperationException("Method not decompiled: tms.al.a(com.tencent.tmsecure.module.antitheft.IAntitheftTips, java.lang.String, java.lang.String):boolean");
}
@Override // com.tencent.tmsecure.common.BaseManager
public final int getSingletonType() {
return 1;
}
@Override // com.tencent.tmsecure.common.BaseManager
public final void onCreate(Context context) {
this.a = context;
this.c = new ao();
}
}
| 1,008 |
3,075 | /*
*
* Copyright 2017 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package org.powermock.core.classloader.javassist;
import javassist.ClassPool;
import javassist.CtClass;
import javassist.NotFoundException;
import org.powermock.core.classloader.MockClassLoader;
import org.powermock.core.classloader.MockClassLoaderConfiguration;
import org.powermock.core.classloader.annotations.UseClassPathAdjuster;
import org.powermock.core.transformers.ClassWrapper;
import org.powermock.core.transformers.javassist.support.JavaAssistClassWrapperFactory;
import java.security.ProtectionDomain;
public class JavassistMockClassLoader extends MockClassLoader {
public static final String CGLIB_ENHANCER = "net.sf.cglib.proxy.Enhancer$EnhancerKey$$KeyFactoryByCGLIB$$";
public static final String CGLIB_METHOD_WRAPPER = "net.sf.cglib.core.MethodWrapper$MethodWrapperKey$$KeyFactoryByCGLIB";
private final ClassPool classPool;
public JavassistMockClassLoader(String[] classesToMock) {
this(classesToMock, new String[0], null);
}
public JavassistMockClassLoader(String[] classesToMock, String[] packagesToDefer,
UseClassPathAdjuster useClassPathAdjuster) {
this(new MockClassLoaderConfiguration(classesToMock, packagesToDefer), useClassPathAdjuster);
}
public JavassistMockClassLoader(MockClassLoaderConfiguration configuration) {
this(configuration, null);
}
public JavassistMockClassLoader(MockClassLoaderConfiguration configuration,
UseClassPathAdjuster useClassPathAdjuster) {
super(configuration, new JavaAssistClassWrapperFactory());
classPool = new ClassPoolFactory(useClassPathAdjuster).create();
classMarker = JavaAssistClassMarkerFactory.createClassMarker(classPool);
}
@Override
protected Class<?> loadUnmockedClass(String name, ProtectionDomain protectionDomain)
throws ClassFormatError, ClassNotFoundException {
byte bytes[] = null;
try {
/*
* TODO This if-statement is a VERY ugly hack to avoid the
* java.lang.ExceptionInInitializerError caused by
* "javassist.NotFoundException:
* net.sf.cglib.proxy.Enhancer$EnhancerKey$$KeyFactoryByCGLIB$$7fb24d72
* ". This happens after the
* se.jayway.examples.tests.privatefield.
* SimplePrivateFieldServiceClassTest#testUseService(..) tests has
* been run and all other tests will fail if this class is tried to
* be loaded. Atm I have found no solution other than this ugly hack
* to make it work. We really need to investigate the real cause of
* this behavior.
*/
if (!name.startsWith(CGLIB_ENHANCER) && !name.startsWith(CGLIB_METHOD_WRAPPER)) {
final CtClass ctClass = classPool.get(name);
if (ctClass.isFrozen()) {
ctClass.defrost();
}
bytes = ctClass.toBytecode();
}
} catch (NotFoundException e) {
return ClassLoader.getSystemClassLoader().loadClass(name);
} catch (Exception e) {
throw new RuntimeException("Failed to loaded class " + name, e);
}
return bytes == null ? null : defineClass(name, bytes, 0, bytes.length, protectionDomain);
}
protected byte[] defineAndTransformClass(String name, ProtectionDomain protectionDomain) {
final byte[] clazz;
ClassPool.doPruning = false;
try {
CtClass type = classPool.get(name);
ClassWrapper<CtClass> wrappedType = classWrapperFactory.wrap(type);
wrappedType = transformClass(wrappedType);
type = wrappedType.unwrap();
/*
* ClassPool may cause huge memory consumption if the number of CtClass
* objects becomes amazingly large (this rarely happens since Javassist
* tries to reduce memory consumption in various ways). To avoid this
* problem, you can explicitly remove an unnecessary CtClass object from
* the ClassPool. If you call detach() on a CtClass object, then that
* CtClass object is removed from the ClassPool.
*/
type.detach();
clazz = type.toBytecode();
} catch (Exception e) {
throw new IllegalStateException("Failed to transform class with name " + name + ". Reason: " + e.getMessage(), e);
}
return clazz;
}
}
| 2,113 |
4,320 | /*
* Copyright (c) 2018, Unmoon <https://github.com/Unmoon>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package net.runelite.client.plugins.blastmine;
import java.time.Duration;
import java.time.Instant;
import lombok.Getter;
import net.runelite.api.GameObject;
class BlastMineRock
{
private static final Duration PLANT_TIME = Duration.ofSeconds(30);
private static final Duration FUSE_TIME = Duration.ofMillis(4200);
@Getter
private final GameObject gameObject;
@Getter
private final BlastMineRockType type;
private final Instant creationTime = Instant.now();
BlastMineRock(final GameObject gameObject, BlastMineRockType blastMineRockType)
{
this.gameObject = gameObject;
this.type = blastMineRockType;
}
double getRemainingFuseTimeRelative()
{
Duration duration = Duration.between(creationTime, Instant.now());
return duration.compareTo(FUSE_TIME) < 0 ? (double) duration.toMillis() / FUSE_TIME.toMillis() : 1;
}
double getRemainingTimeRelative()
{
Duration duration = Duration.between(creationTime, Instant.now());
return duration.compareTo(PLANT_TIME) < 0 ? (double) duration.toMillis() / PLANT_TIME.toMillis() : 1;
}
}
| 739 |
9,491 | /*
+----------------------------------------------------------------------+
| HipHop for PHP |
+----------------------------------------------------------------------+
| Copyright (c) 2010-present Facebook, Inc. (http://www.facebook.com) |
+----------------------------------------------------------------------+
| This source file is subject to version 3.01 of the PHP license, |
| that is bundled with this package in the file LICENSE, and is |
| available through the world-wide-web at the following url: |
| http://www.php.net/license/3_01.txt |
| If you did not receive a copy of the PHP license and are unable to |
| obtain it through the world-wide-web, please send a note to |
| <EMAIL> so we can mail you a copy immediately. |
+----------------------------------------------------------------------+
*/
#include "hphp/util/process.h"
#include <sys/types.h>
#include <stdlib.h>
#ifdef _MSC_VER
#include <lmcons.h>
#include <Windows.h>
#include <ShlObj.h>
#else
#include <sys/fcntl.h>
#include <sys/utsname.h>
#include <sys/wait.h>
#include <pwd.h>
#include <folly/portability/Sockets.h>
#include <folly/portability/SysMman.h>
#include <folly/portability/Unistd.h>
#endif
#ifdef __APPLE__
#include <crt_externs.h>
#endif
#include <folly/Conv.h>
#include <folly/Format.h>
#include <folly/ScopeGuard.h>
#include <folly/String.h>
#include <boost/filesystem.hpp>
#include <set>
#include "hphp/util/hugetlb.h"
#include "hphp/util/managed-arena.h"
#include "hphp/util/text-color.h"
#include "hphp/util/user-info.h"
namespace HPHP {
///////////////////////////////////////////////////////////////////////////////
using std::string;
static void readString(FILE *f, string &out) {
size_t nread = 0;
constexpr unsigned int BUFFER_SIZE = 1024;
char buf[BUFFER_SIZE];
while ((nread = fread(buf, 1, BUFFER_SIZE, f)) != 0) {
out.append(buf, nread);
}
}
///////////////////////////////////////////////////////////////////////////////
// Cached process statics
std::string Process::HostName;
std::string Process::CurrentWorkingDirectory;
char** Process::Argv;
std::atomic_int64_t ProcStatus::VmSizeKb;
std::atomic_int64_t ProcStatus::VmRSSKb;
std::atomic_int64_t ProcStatus::VmHWMKb;
std::atomic_int64_t ProcStatus::VmSwapKb;
std::atomic_int64_t ProcStatus::HugetlbPagesKb;
std::atomic_int64_t ProcStatus::UnusedKb;
std::atomic_int ProcStatus::threads;
std::atomic_uint ProcStatus::lastUpdate;
void Process::InitProcessStatics() {
HostName = GetHostName();
CurrentWorkingDirectory = GetCurrentDirectory();
}
///////////////////////////////////////////////////////////////////////////////
// /proc/* parsing functions
std::string Process::GetCommandLine(pid_t pid) {
auto const name = folly::sformat("/proc/{}/cmdline", pid);
std::string cmdline;
auto const f = fopen(name.c_str(), "r");
if (f) {
readString(f, cmdline);
fclose(f);
}
std::string converted;
for (auto ch : cmdline) {
converted += ch ? ch : ' ';
}
return converted;
}
bool Process::IsUnderGDB() {
auto const cmdStr = GetCommandLine(getppid());
auto const cmdPiece = folly::StringPiece{cmdStr};
if (cmdPiece.empty()) return false;
auto const spaceIdx = std::min(cmdPiece.find(' '), cmdPiece.size() - 1);
auto const binaryPiece = cmdPiece.subpiece(0, spaceIdx + 1);
boost::filesystem::path binaryPath(binaryPiece.begin(), binaryPiece.end());
return binaryPath.filename() == "gdb ";
}
int64_t Process::GetMemUsageMb() {
ProcStatus::update();
return ProcStatus::valid() ? ProcStatus::adjustedRssKb() / 1024 : 0;
}
int64_t Process::GetSystemCPUDelayMS() {
static FILE* fp = nullptr;
if (!fp) {
if (!(fp = fopen("/proc/schedstat", "r"))) {
return -1;
}
}
// Refresh the proc info.
rewind(fp);
fflush(fp);
int64_t totalCpuDelay = 0;
// Supposedly this should be enough to hold th important lines of the
// schedstat file.
char buf[320];
while (fgets(buf, sizeof(buf), fp) != nullptr) {
if (strncmp(buf, "cpu", 3) == 0) {
uint64_t cpuDelay;
if (sscanf(buf,
"%*s %*u %*u %*u %*u %*u %*u %*u %lu %*u",
&cpuDelay) != 1) {
return -1;
}
totalCpuDelay += cpuDelay;
}
}
// The kernel reports the information in nanoseconds. Convert it
// to milliseconds.
return totalCpuDelay / 1000000;
}
int Process::GetNumThreads() {
ProcStatus::update();
return ProcStatus::valid() ? ProcStatus::nThreads() : 1;
}
// Files such as /proc/meminfo and /proc/self/status contain many lines
// formatted as one of the following:
// <fieldName>: <number>
// <fieldName>: <number> kB
// This function parses the line and return the number in it. -1 is returned
// when the line isn't formatted as expected (until one day we need to read a
// line where -1 is a legit value).
static int64_t readSize(const char* line, bool expectKB = false) {
int64_t result = -1;
char tail[8];
auto n = sscanf(line, "%*s %" SCNd64 " %7s", &result, tail);
if (expectKB) {
if (n < 2) return -1;
if (tail[0] != 'k' || tail[1] != 'B') return -1;
}
return result;
}
bool Process::GetMemoryInfo(MemInfo& info) {
#ifdef _WIN32
#error "Process::GetMemoryInfo() doesn't support Windows (yet)."
return false;
#endif
info = MemInfo{};
FILE* f = fopen("/proc/meminfo", "r");
if (f) {
SCOPE_EXIT{ fclose(f); };
char line[128];
while (fgets(line, sizeof(line), f)) {
auto const kb = readSize(line, true);
if (!strncmp(line, "MemTotal:", 9)) {
if (kb >= 0) info.totalMb = kb / 1024;
} else if (!strncmp(line, "MemFree:", 8)) {
if (kb >= 0) info.freeMb = kb / 1024;
} else if (!strncmp(line, "Buffers:", 8)) {
if (kb >= 0) info.buffersMb = kb / 1024;
} else if (!strncmp(line, "Cached:", 7)) {
if (kb >= 0) info.cachedMb = kb / 1024;
} else if (!strncmp(line, "MemAvailable:", 13)) {
if (kb >= 0) info.availableMb = kb / 1024;
}
if (info.valid()) return true;
}
// If MemAvailable isn't available, which shouldn't be the case for kernel
// versions later than 3.14, we get a rough esitmation.
if (info.availableMb < 0 && info.freeMb >= 0 &&
info.cachedMb >= 0 && info.buffersMb >= 0) {
info.availableMb = info.freeMb + info.cachedMb;
return true;
}
}
return false;
}
int Process::GetCPUCount() {
return sysconf(_SC_NPROCESSORS_ONLN);
}
#ifdef __x86_64__
static __inline void do_cpuid(u_int ax, u_int *p) {
asm volatile ("cpuid"
: "=a" (p[0]), "=b" (p[1]), "=c" (p[2]), "=d" (p[3])
: "0" (ax));
}
#elif defined(_M_X64)
#include <intrin.h>
static ALWAYS_INLINE void do_cpuid(int func, uint32_t* p) {
__cpuid((int*)p, func);
}
#endif
std::string Process::GetCPUModel() {
#if defined(__x86_64__) || defined(_M_X64)
uint32_t regs[4];
do_cpuid(0, regs);
const int vendor_size = sizeof(regs[1])*3;
std::swap(regs[2], regs[3]);
uint32_t cpu_exthigh = 0;
if (memcmp(regs + 1, "GenuineIntel", vendor_size) == 0 ||
memcmp(regs + 1, "AuthenticAMD", vendor_size) == 0) {
do_cpuid(0x80000000, regs);
cpu_exthigh = regs[0];
}
char cpu_brand[3 * sizeof(regs) + 1];
char *brand = cpu_brand;
if (cpu_exthigh >= 0x80000004) {
for (u_int i = 0x80000002; i < 0x80000005; i++) {
do_cpuid(i, regs);
memcpy(brand, regs, sizeof(regs));
brand += sizeof(regs);
}
}
*brand = '\0';
assert(brand - cpu_brand < sizeof(cpu_brand));
return cpu_brand;
#else
// On non-x64, fall back to calling uname
std::string model = "Unknown ";
struct utsname uname_buf;
uname(&uname_buf);
model.append(uname_buf.machine);
return model;
#endif // __x86_64__
}
///////////////////////////////////////////////////////////////////////////////
std::string Process::GetAppName() {
const char* progname = getenv("_");
if (!progname || !*progname) {
progname = "unknown program";
}
return progname;
}
std::string Process::GetHostName() {
char hostbuf[128];
hostbuf[0] = '\0'; // for cleaner valgrind output when gethostname() fails
gethostname(hostbuf, sizeof(hostbuf));
hostbuf[sizeof(hostbuf) - 1] = '\0';
return hostbuf;
}
std::string Process::GetCurrentUser() {
const char *name = getenv("LOGNAME");
if (name && *name) {
return name;
}
#ifdef _MSC_VER
char username[UNLEN + 1];
DWORD username_len = UNLEN + 1;
if (GetUserName(username, &username_len))
return std::string(username, username_len);
#else
auto buf = PasswdBuffer{};
passwd *pwd;
if (!getpwuid_r(geteuid(), &buf.ent, buf.data.get(), buf.size, &pwd) &&
pwd && pwd->pw_name) {
return pwd->pw_name;
}
#endif
return "";
}
std::string Process::GetCurrentDirectory() {
char buf[PATH_MAX + 64]; // additional space for suffixes like " (deleted)";
memset(buf, 0, sizeof(buf));
if (char* cwd = getcwd(buf, PATH_MAX)) return cwd;
#if defined(__linux__)
if (errno != ENOENT) {
return "";
}
// Read cwd symlink directly if it leads to the deleted path.
int r = readlink("/proc/self/cwd", buf, sizeof(buf));
if (r == -1) {
return "";
}
auto const kDeleted = " (deleted)";
auto const kDeletedLen = strlen(kDeleted);
if (r >= kDeletedLen && !strcmp(buf + r - kDeletedLen, kDeleted)) {
buf[r - kDeletedLen] = 0;
}
return &(buf[0]);
#else
// /proc/self/cwd is not available.
return "";
#endif
}
std::string Process::GetHomeDirectory() {
string ret;
const char *home = getenv("HOME");
if (home && *home) {
ret = home;
} else {
#ifdef _MSC_VER
PWSTR path;
if (SHGetKnownFolderPath(FOLDERID_UsersFiles, 0, nullptr, &path) == S_OK) {
char hPath[PATH_MAX];
size_t len = wcstombs(hPath, path, MAX_PATH);
CoTaskMemFree(path);
ret = std::string(hPath, len);
}
#else
passwd *pwd = getpwent();
if (pwd && pwd->pw_dir) {
ret = pwd->pw_dir;
}
#endif
}
if (ret.empty() || ret[ret.size() - 1] != '/') {
ret += '/';
}
return ret;
}
void Process::SetCoreDumpHugePages() {
#if defined(__linux__)
/*
* From documentation athttp://man7.org/linux/man-pages/man5/core.5.html
*
* The bits in coredump_filter have the following meanings:
*
* bit 0 Dump anonymous private mappings.
* bit 1 Dump anonymous shared mappings.
* bit 2 Dump file-backed private mappings.
* bit 3 Dump file-backed shared mappings.
* bit 4 (since Linux 2.6.24) Dump ELF headers.
* bit 5 (since Linux 2.6.28) Dump private huge pages.
* bit 6 (since Linux 2.6.28) Dump shared huge pages.
* bit 7 (since Linux 4.4) Dump private DAX pages.
* bit 8 (since Linux 4.4) Dump shared DAX pages.
*/
if (FILE* f = fopen("/proc/self/coredump_filter", "r+")) {
unsigned mask = 0;
if (fscanf(f, "%x", &mask)) {
constexpr unsigned hugetlbMask = 0x60;
if ((mask & hugetlbMask) != hugetlbMask) {
mask |= hugetlbMask;
rewind(f);
fprintf(f, "0x%x", mask);
}
}
fclose(f);
}
#endif
}
void ProcStatus::update() {
if (FILE* f = fopen("/proc/self/status", "r")) {
char line[128];
int64_t vmsize = 0, vmrss = 0, vmhwm = 0, vmswap = 0, hugetlb = 0;
while (fgets(line, sizeof(line), f)) {
if (!strncmp(line, "VmSize:", 7)) {
vmsize = readSize(line, true);
} else if (!strncmp(line, "VmRSS:", 6)) {
vmrss = readSize(line, true);
} else if (!strncmp(line, "VmHWM:", 6)) {
vmhwm = readSize(line, true);
} else if (!strncmp(line, "VmSwap:", 7)) {
vmswap = readSize(line, true);
} else if (!strncmp(line, "HugetlbPages:", 13)) {
hugetlb = readSize(line, true);
} else if (!strncmp(line, "Threads:", 8)) {
threads.store(readSize(line, false), std::memory_order_relaxed);
}
}
fclose(f);
if (vmrss <= 0) {
// Invalid
lastUpdate.store(0, std::memory_order_release);
} else {
VmSizeKb.store(vmsize, std::memory_order_relaxed);
VmRSSKb.store(vmrss, std::memory_order_relaxed);
VmSwapKb.store(vmswap, std::memory_order_relaxed);
VmHWMKb.store(vmhwm + hugetlb, std::memory_order_relaxed);
HugetlbPagesKb.store(hugetlb, std::memory_order_relaxed);
lastUpdate.store(time(), std::memory_order_release);
}
#ifdef USE_JEMALLOC
mallctl_epoch();
#if USE_JEMALLOC_EXTENT_HOOKS
size_t unused = 0;
// Various arenas where range of hugetlb pages can be reserved but only
// partially used.
unused += alloc::getRange(alloc::AddrRangeClass::VeryLow).retained();
unused += alloc::getRange(alloc::AddrRangeClass::Low).retained();
unused += alloc::getRange(alloc::AddrRangeClass::Uncounted).retained();
if (alloc::g_arena0) {
unused += alloc::g_arena0->retained();
}
for (auto const arena : alloc::g_local_arenas) {
if (arena) unused += arena->retained();
}
updateUnused(unused >> 10); // convert to kB
#endif
#endif
}
}
bool Process::OOMScoreAdj(int adj) {
#ifdef __linux__
if (adj >= -1000 && adj < 1000) {
if (auto f = fopen("/proc/self/oom_score_adj", "r+")) {
fprintf(f, "%d", adj);
fclose(f);
return true;
}
}
#endif
return false;
}
int Process::Relaunch() {
if (!Argv) {
errno = EINVAL;
return -1;
}
return execvp(Argv[0], Argv);
}
std::map<int, int> Process::RemapFDsPreExec(const std::map<int, int>& fds) {
std::map<int, int> unspecified;
// 1. copy all to FDs outside of STDIO range
std::map<int, int> dups;
std::set<int> preserve_set;
for (auto& [_target, current] : fds) {
if (dups.find(current) != dups.end()) {
continue;
}
int next_fd;
bool conflict;
do {
conflict = false;
// don't conflict with STDIO
next_fd = dup(current);
if (next_fd <= STDERR_FILENO) {
conflict = true;
continue;
}
// don't conflict with targets
conflict = false;
for (auto [target, _current] : fds) {
if (next_fd == target) {
conflict = true;
break;
}
}
} while (conflict);
dups[current] = next_fd;
preserve_set.emplace(next_fd);
}
// 2. clean up libc STDIO
//
// Don't want to swap the FD underlying these FILE*...
//
// If they are closed already, these are silent no-ops.
fclose(stdin);
fclose(stdout);
fclose(stderr);
// 3. close all FDs except our dups
//
// This includes the STDIO FDs as it's possible that:
// - the FILE* were previously closed (so the fclose above were no-ops)
// - the FDs were then re-used
#ifdef __APPLE__
const char* fd_dir = "/dev/fd";
#endif
#ifdef __linux__
const char* fd_dir = "/proc/self/fd";
#endif
// If you close FDs while in this loop, they get removed from /proc/self/fd
// and the iterator gets sad ("Bad file descriptor: /proc/self/fd")
std::set<int> fds_to_close;
for (const auto& entry : boost::filesystem::directory_iterator(fd_dir)) {
char* endptr = nullptr;
auto filename = entry.path().filename();
const char* filename_c = filename.c_str();
const int fd = strtol(filename_c, &endptr, 10);
assert(endptr != filename_c); // no matching characters
assert(*endptr == '\0'); // entire string
if (preserve_set.find(fd) != preserve_set.end()) {
continue;
}
fds_to_close.emplace(fd);
}
for (const auto& fd: fds_to_close) {
close(fd);
}
// 4. Move the dups into place.
for (const auto& [target, orig] : fds) {
int tmp = dups[orig];
if (target < 0 /* don't care what the FD is */) {
unspecified[target] = tmp;
} else {
dup2(tmp, target);
}
}
// 5. Close the dups; do this separately to above in case
// the same orig was used for multiple targets
for (const auto& [target, orig] : fds) {
if (target < 0) {
continue;
}
close(dups.at(orig));
}
return unspecified;
}
namespace {
char **build_cstrarr(const std::vector<std::string> &vec) {
char **cstrarr = nullptr;
int size = vec.size();
if (size) {
cstrarr = (char **)malloc((size + 1) * sizeof(char *));
int j = 0;
for (unsigned int i = 0; i < vec.size(); i++, j++) {
*(cstrarr + j) = (char *)vec[i].c_str();
}
*(cstrarr + j) = nullptr;
}
return cstrarr;
}
} // namespace {
pid_t Process::ForkAndExecve(
const std::string& path,
const std::vector<std::string>& argv,
const std::vector<std::string>& envp,
const std::string& cwd,
const std::map<int, int>& orig_fds,
int flags,
pid_t pgid
) {
// Distinguish execve failure: if the write side of the pipe
// is closed with no data, it succeeded.
int fork_fds[2];
pipe(fork_fds);
int fork_r = fork_fds[0];
int fork_w = fork_fds[1];
fcntl(fork_w, F_SETFD, fcntl(fork_w, F_GETFD) | O_CLOEXEC);
pid_t child = fork();
if (child == -1) {
return -1;
}
if (child == 0) {
mprotect_1g_pages(PROT_READ);
Process::OOMScoreAdj(1000);
close(fork_r);
// Need mutable copy
std::map<int, int> fds(orig_fds);
fds[-1] = fork_w;
const auto remapped = Process::RemapFDsPreExec(fds);
fork_w = remapped.at(-1);
if (!cwd.empty()) {
if (cwd != Process::GetCurrentDirectory()) {
if (chdir(cwd.c_str()) == -1) {
dprintf(fork_w, "%s %d", "chdir", errno);
_Exit(1);
}
}
}
if (flags & Process::FORK_AND_EXECVE_FLAG_SETSID) {
if (setsid() == -1) {
dprintf(fork_w, "%s\n%d\n", "setsid", errno);
_Exit(1);
}
} else if (flags & Process::FORK_AND_EXECVE_FLAG_SETPGID) {
if (setpgid(0, pgid) == -1) {
dprintf(fork_w, "%s %d", "setpgid", errno);
_Exit(1);
}
}
char** argv_arr = build_cstrarr(argv);
char** envp_arr = build_cstrarr(envp);
SCOPE_EXIT { free(argv_arr); free(envp_arr); };
if (flags & Process::FORK_AND_EXECVE_FLAG_EXECVPE) {
#if defined(__APPLE__)
// execvpe() is a glibcism
//
// We could either:
// - use `execve()` and implement our own $PATH behavior
// - use `execvp()` and implement our own envp behavior
// The latter seems less likely to lead to accidental problems, so let's
// do that.
char**& environ = *_NSGetEnviron();
// We could also use this implementation on Linux (using the standard
// `extern char** environ` instead of the Apple-specific call above)...
environ = envp_arr;
execvp(path.c_str(), argv_arr);
#else
// ... but it feels nasty enough that I'll use the glibcism
execvpe(path.c_str(), argv_arr, envp_arr);
#endif
} else {
execve(path.c_str(), argv_arr, envp_arr);
}
dprintf(fork_w, "%s %d", "execve", errno);
_Exit(1);
}
close(fork_w);
pollfd pfd[1];
pfd[0].fd = fork_w;
pfd[0].events = POLLIN;
int ret;
do {
ret = poll(pfd, /* number of fds = */ 1, /* timeout = no timeout*/ -1);
} while (ret == -1 && errno == EINTR);
char buf[16];
auto len = read(fork_r, &buf, sizeof(buf));
close(fork_r);
// Closed without write, which means close-on-exec
if (len < 1) {
return child;
}
char failed_call_buf[17]; // 16 + trailing null
int saved_errno;
if (sscanf(buf, "%16s %d", failed_call_buf, &saved_errno) != 2) {
return -999;
}
// Doing the call => return value here instead of sending return values over
// the pipe so that it's all in one place, and we're less likely to introduce
// bugs when/if we add additional features.
const std::string failed_call(failed_call_buf);
SCOPE_EXIT { errno = saved_errno; };
if (failed_call == "chdir") {
return -2;
}
if (failed_call == "setsid") {
return -3;
}
if (failed_call == "setpgid") {
return -4;
}
if (failed_call == "execve") {
return -5;
}
if (failed_call == "putenv") {
return -6;
}
return -9999;
}
///////////////////////////////////////////////////////////////////////////////
}
| 8,360 |
1,011 | <reponame>jimbofreedman/django-rest-framework-json-api<gh_stars>1000+
from datetime import datetime
from unittest import mock
import pytest
from django.test import TestCase
from django.urls import reverse
from django.utils import timezone
from rest_framework.request import Request
from rest_framework.test import APIRequestFactory
from rest_framework_json_api.serializers import (
DateField,
ModelSerializer,
ResourceIdentifierObjectSerializer,
empty,
)
from rest_framework_json_api.utils import format_resource_type
from example.factories import ArtProjectFactory
from example.models import Author, Blog, Entry
from example.serializers import ArtProjectSerializer, BlogSerializer, ProjectSerializer
request_factory = APIRequestFactory()
pytestmark = pytest.mark.django_db
class TestResourceIdentifierObjectSerializer(TestCase):
def setUp(self):
self.blog = Blog.objects.create(name="Some Blog", tagline="It's a blog")
now = timezone.now()
self.entry = Entry.objects.create(
blog=self.blog,
headline="headline",
body_text="body_text",
pub_date=now.date(),
mod_date=now.date(),
n_comments=0,
n_pingbacks=0,
rating=3,
)
for i in range(1, 6):
name = f"some_author{i}"
self.entry.authors.add(
Author.objects.create(name=name, email=f"{<EMAIL>")
)
def test_forward_relationship_not_loaded_when_not_included(self):
to_representation_method = (
"example.serializers.TaggedItemSerializer.to_representation"
)
with mock.patch(to_representation_method) as mocked_serializer:
class EntrySerializer(ModelSerializer):
blog = BlogSerializer()
class Meta:
model = Entry
fields = "__all__"
request_without_includes = Request(request_factory.get("/"))
serializer = EntrySerializer(context={"request": request_without_includes})
serializer.to_representation(self.entry)
mocked_serializer.assert_not_called()
def test_forward_relationship_optimization_correct_representation(self):
class EntrySerializer(ModelSerializer):
blog = BlogSerializer()
class Meta:
model = Entry
fields = "__all__"
request_without_includes = Request(request_factory.get("/"))
serializer = EntrySerializer(context={"request": request_without_includes})
result = serializer.to_representation(self.entry)
# Remove non deterministic fields
result.pop("created_at")
result.pop("modified_at")
expected = dict(
[
("id", 1),
(
"blog",
dict(
[
("name", "<NAME>"),
("tags", []),
("copyright", datetime.now().year),
("url", "http://testserver/blogs/1"),
]
),
),
("headline", "headline"),
("body_text", "body_text"),
("pub_date", DateField().to_representation(self.entry.pub_date)),
("mod_date", DateField().to_representation(self.entry.mod_date)),
("n_comments", 0),
("n_pingbacks", 0),
("rating", 3),
(
"authors",
[
dict([("type", "authors"), ("id", "1")]),
dict([("type", "authors"), ("id", "2")]),
dict([("type", "authors"), ("id", "3")]),
dict([("type", "authors"), ("id", "4")]),
dict([("type", "authors"), ("id", "5")]),
],
),
]
)
self.assertDictEqual(expected, result)
def test_data_in_correct_format_when_instantiated_with_blog_object(self):
serializer = ResourceIdentifierObjectSerializer(instance=self.blog)
expected_data = {"type": format_resource_type("Blog"), "id": str(self.blog.id)}
assert serializer.data == expected_data
def test_data_in_correct_format_when_instantiated_with_entry_object(self):
serializer = ResourceIdentifierObjectSerializer(instance=self.entry)
expected_data = {
"type": format_resource_type("Entry"),
"id": str(self.entry.id),
}
assert serializer.data == expected_data
def test_deserialize_primitive_data_blog(self):
initial_data = {"type": format_resource_type("Blog"), "id": str(self.blog.id)}
serializer = ResourceIdentifierObjectSerializer(
data=initial_data, model_class=Blog
)
self.assertTrue(serializer.is_valid(), msg=serializer.errors)
assert serializer.validated_data == self.blog
def test_deserialize_primitive_data_blog_with_unexisting_pk(self):
unexisting_pk = self.blog.id
self.blog.delete()
assert not Blog.objects.filter(id=unexisting_pk).exists()
initial_data = {"type": format_resource_type("Blog"), "id": str(unexisting_pk)}
serializer = ResourceIdentifierObjectSerializer(
data=initial_data, model_class=Blog
)
self.assertFalse(serializer.is_valid())
self.assertEqual(serializer.errors[0].code, "does_not_exist")
def test_data_in_correct_format_when_instantiated_with_queryset(self):
qs = Author.objects.all()
serializer = ResourceIdentifierObjectSerializer(instance=qs, many=True)
type_string = format_resource_type("Author")
author_pks = Author.objects.values_list("pk", flat=True)
expected_data = [{"type": type_string, "id": str(pk)} for pk in author_pks]
assert serializer.data == expected_data
def test_deserialize_many(self):
type_string = format_resource_type("Author")
author_pks = Author.objects.values_list("pk", flat=True)
initial_data = [{"type": type_string, "id": str(pk)} for pk in author_pks]
serializer = ResourceIdentifierObjectSerializer(
data=initial_data, model_class=Author, many=True
)
self.assertTrue(serializer.is_valid(), msg=serializer.errors)
print(serializer.data)
class TestModelSerializer:
def test_model_serializer_with_implicit_fields(self, comment, client):
expected = {
"data": {
"type": "comments",
"id": str(comment.pk),
"attributes": {"body": comment.body},
"relationships": {
"entry": {"data": {"type": "entries", "id": str(comment.entry.pk)}},
"author": {
"data": {"type": "authors", "id": str(comment.author.pk)}
},
"writer": {
"data": {"type": "writers", "id": str(comment.author.pk)}
},
},
"meta": {
"modifiedDaysAgo": (datetime.now() - comment.modified_at).days
},
},
"included": [
{
"attributes": {
"email": comment.author.email,
"name": comment.author.name,
},
"id": str(comment.author.pk),
"relationships": {
"bio": {
"data": {
"id": str(comment.author.bio.pk),
"type": "authorBios",
}
}
},
"type": "writers",
}
],
}
response = client.get(reverse("comment-detail", kwargs={"pk": comment.pk}))
assert response.status_code == 200
assert expected == response.json()
class TestPolymorphicModelSerializer(TestCase):
def setUp(self):
self.project = ArtProjectFactory.create()
self.child_init_args = {}
# Override `__init__` with our own method
def overridden_init(child_self, instance=None, data=empty, **kwargs):
"""
Override `ArtProjectSerializer.__init__` with the same signature that
`BaseSerializer.__init__` has to assert that it receives the parameters
that `BaseSerializer` expects
"""
self.child_init_args = dict(instance=instance, data=data, **kwargs)
return super(ArtProjectSerializer, child_self).__init__(
instance, data, **kwargs
)
self.child_serializer_init = ArtProjectSerializer.__init__
ArtProjectSerializer.__init__ = overridden_init
def tearDown(self):
# Restore original init to avoid affecting other tests
ArtProjectSerializer.__init__ = self.child_serializer_init
def test_polymorphic_model_serializer_passes_instance_to_child(self):
"""
Ensure that `PolymorphicModelSerializer` is passing the instance to the
child serializer when initializing them
"""
# Initialize a serializer that would partially update a model instance
initial_data = {"artist": "<NAME>", "type": "artProjects"}
parent_serializer = ProjectSerializer(
instance=self.project, data=initial_data, partial=True
)
parent_serializer.is_valid(raise_exception=True)
# Run save to force `ProjectSerializer` to init `ArtProjectSerializer`
parent_serializer.save()
# Assert that child init received the expected arguments
assert self.child_init_args["instance"] == self.project
assert self.child_init_args["data"] == initial_data
assert self.child_init_args["partial"] == parent_serializer.partial
assert self.child_init_args["context"] == parent_serializer.context
| 4,812 |
921 | // Copyright (c) 2017-2020 <NAME> <<EMAIL>> Use of this source code is governed by the Apache 2.0 license that can be found in the LICENSE file.
package com.vladsch.md.nav.flex.settings;
import com.vladsch.md.nav.flex.PluginBundle;
import com.vladsch.md.nav.settings.MdHtmlSettingsForm;
import com.vladsch.md.nav.settings.MdRenderingProfileHolder;
import com.vladsch.md.nav.settings.RenderingProfileSynchronizer;
import com.vladsch.md.nav.settings.api.MdSettingsComponent;
import com.vladsch.md.nav.settings.api.MdSettingsFormExtensionProvider;
import org.jetbrains.annotations.NotNull;
public class FlexmarkHtmlSettingsFormProvider implements MdSettingsFormExtensionProvider<RenderingProfileSynchronizer, MdRenderingProfileHolder> {
@NotNull
@Override
public MdSettingsComponent<MdRenderingProfileHolder> createComponent(final RenderingProfileSynchronizer settings, final RenderingProfileSynchronizer profileSynchronizer, final Object parent) {
return new FlexmarkHtmlSettingsForm(profileSynchronizer);
}
@NotNull
@Override
public String getExtensionName() {
return PluginBundle.message("product.title");
}
@Override
public boolean isAvailable(@NotNull final Object parent) {
return parent instanceof MdHtmlSettingsForm;
}
}
| 421 |
8,315 | <gh_stars>1000+
package com.airbnb.epoxy;
class InsertedModel extends TestModel {
static final InsertedModel INSTANCE = new InsertedModel();
@Override
public int getDefaultLayout() {
return 0;
}
}
| 68 |
145,614 | <reponame>NavpreetDevpuri/Python<filename>web_programming/download_images_from_google_query.py
import json
import os
import re
import sys
import urllib.request
import requests
from bs4 import BeautifulSoup
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36"
" (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582"
}
def download_images_from_google_query(query: str = "dhaka", max_images: int = 5) -> int:
"""Searches google using the provided query term and downloads the images in a folder.
Args:
query : The image search term to be provided by the user. Defaults to
"dhaka".
image_numbers : [description]. Defaults to 5.
Returns:
The number of images successfully downloaded.
# Comment out slow (4.20s call) doctests
# >>> download_images_from_google_query()
5
# >>> download_images_from_google_query("potato")
5
"""
max_images = min(max_images, 50) # Prevent abuse!
params = {
"q": query,
"tbm": "isch",
"hl": "en",
"ijn": "0",
}
html = requests.get("https://www.google.com/search", params=params, headers=headers)
soup = BeautifulSoup(html.text, "html.parser")
matched_images_data = "".join(
re.findall(r"AF_initDataCallback\(([^<]+)\);", str(soup.select("script")))
)
matched_images_data_fix = json.dumps(matched_images_data)
matched_images_data_json = json.loads(matched_images_data_fix)
matched_google_image_data = re.findall(
r"\[\"GRID_STATE0\",null,\[\[1,\[0,\".*?\",(.*),\"All\",",
matched_images_data_json,
)
if not matched_google_image_data:
return 0
removed_matched_google_images_thumbnails = re.sub(
r"\[\"(https\:\/\/encrypted-tbn0\.gstatic\.com\/images\?.*?)\",\d+,\d+\]",
"",
str(matched_google_image_data),
)
matched_google_full_resolution_images = re.findall(
r"(?:'|,),\[\"(https:|http.*?)\",\d+,\d+\]",
removed_matched_google_images_thumbnails,
)
for index, fixed_full_res_image in enumerate(matched_google_full_resolution_images):
if index >= max_images:
return index
original_size_img_not_fixed = bytes(fixed_full_res_image, "ascii").decode(
"unicode-escape"
)
original_size_img = bytes(original_size_img_not_fixed, "ascii").decode(
"unicode-escape"
)
opener = urllib.request.build_opener()
opener.addheaders = [
(
"User-Agent",
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36"
" (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.19582",
)
]
urllib.request.install_opener(opener)
path_name = f"query_{query.replace(' ', '_')}"
if not os.path.exists(path_name):
os.makedirs(path_name)
urllib.request.urlretrieve(
original_size_img, f"{path_name}/original_size_img_{index}.jpg"
)
return index
if __name__ == "__main__":
try:
image_count = download_images_from_google_query(sys.argv[1])
print(f"{image_count} images were downloaded to disk.")
except IndexError:
print("Please provide a search term.")
raise
| 1,498 |
14,668 | <reponame>zealoussnow/chromium
// Copyright 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "chrome/browser/offline_pages/android/offline_page_auto_fetcher_service.h"
#include <string>
#include <utility>
#include "base/bind.h"
#include "base/strings/strcat.h"
#include "base/strings/string_number_conversions.h"
#include "base/time/time.h"
#include "chrome/browser/offline_pages/android/auto_fetch_notifier.h"
#include "chrome/browser/offline_pages/request_coordinator_factory.h"
#include "chrome/common/offline_page_auto_fetcher.mojom.h"
#include "components/offline_pages/core/auto_fetch.h"
#include "components/offline_pages/core/background/request_coordinator.h"
#include "components/offline_pages/core/background/save_page_request.h"
#include "components/offline_pages/core/client_id.h"
#include "components/offline_pages/core/client_namespace_constants.h"
#include "components/offline_pages/core/offline_page_item_utils.h"
#include "components/offline_pages/core/offline_page_model.h"
#include "components/offline_pages/core/offline_store_utils.h"
#include "third_party/abseil-cpp/absl/types/optional.h"
#include "url/gurl.h"
namespace offline_pages {
namespace {
constexpr int kMaximumInFlight = 3;
class AutoFetchNotifierImpl : public AutoFetchNotifier {
public:
~AutoFetchNotifierImpl() override {}
// Ensures that the in-progress notification is showing with the appropriate
// request count.
void NotifyInProgress(int in_flight_count) override {
ShowAutoFetchInProgressNotification(in_flight_count);
}
// Update the request count if the in-progress notification is already
// showing. This won't trigger showing the notification if it's not already
// shown. If |in_flight_count| is 0, the notification will be hidden.
void InProgressCountChanged(int in_flight_count) override {
UpdateAutoFetchInProgressNotificationCountIfShowing(in_flight_count);
}
};
} // namespace
OfflinePageAutoFetcherService::OfflinePageAutoFetcherService(
RequestCoordinator* request_coordinator,
OfflinePageModel* offline_page_model,
Delegate* delegate)
: notifier_(std::make_unique<AutoFetchNotifierImpl>()),
page_load_watcher_(
notifier_.get(),
request_coordinator,
std::make_unique<AutoFetchPageLoadWatcher::AndroidTabFinder>()),
request_coordinator_(request_coordinator),
offline_page_model_(offline_page_model),
delegate_(delegate) {
request_coordinator_->AddObserver(this);
if (AutoFetchInProgressNotificationCanceled()) {
CancelAll(base::BindOnce(&AutoFetchCancellationComplete));
}
}
OfflinePageAutoFetcherService::~OfflinePageAutoFetcherService() = default;
void OfflinePageAutoFetcherService::Shutdown() {
request_coordinator_->RemoveObserver(this);
}
void OfflinePageAutoFetcherService::TrySchedule(bool user_requested,
const GURL& url,
int android_tab_id,
TryScheduleCallback callback) {
// Return an early failure if the URL is not suitable.
if (!OfflinePageModel::CanSaveURL(url)) {
std::move(callback).Run(OfflinePageAutoFetcherScheduleResult::kOtherError);
return;
}
// Attempt to schedule a new request.
RequestCoordinator::SavePageLaterParams params;
params.url = url;
auto_fetch::ClientIdMetadata metadata(android_tab_id);
params.client_id = auto_fetch::MakeClientId(metadata);
params.user_requested = false;
params.availability =
RequestCoordinator::RequestAvailability::ENABLED_FOR_OFFLINER;
params.add_options.disallow_duplicate_requests = true;
if (!user_requested) {
params.add_options.maximum_in_flight_requests_for_namespace =
kMaximumInFlight;
}
request_coordinator_->SavePageLater(
params, base::BindOnce(&OfflinePageAutoFetcherService::TryScheduleDone,
GetWeakPtr(), std::move(callback)));
}
void OfflinePageAutoFetcherService::CancelAll(base::OnceClosure callback) {
auto condition = base::BindRepeating([](const SavePageRequest& request) {
return request.client_id().name_space == kAutoAsyncNamespace;
});
request_coordinator_->RemoveRequestsIf(
condition, base::BindOnce(&OfflinePageAutoFetcherService::CancelAllDone,
GetWeakPtr(), std::move(callback)));
}
void OfflinePageAutoFetcherService::CancelSchedule(const GURL& url) {
auto predicate = base::BindRepeating(
[](const GURL& url, const SavePageRequest& request) {
return request.client_id().name_space == kAutoAsyncNamespace &&
EqualsIgnoringFragment(request.url(), url);
},
url);
request_coordinator_->RemoveRequestsIf(predicate,
/*done_callback=*/base::DoNothing());
}
void OfflinePageAutoFetcherService::CancelAllDone(
base::OnceClosure callback,
const MultipleItemStatuses& result) {
std::move(callback).Run();
}
void OfflinePageAutoFetcherService::TryScheduleDone(
TryScheduleCallback callback,
AddRequestResult result) {
// Translate the result and forward to the mojo caller.
OfflinePageAutoFetcherScheduleResult callback_result;
switch (result) {
case AddRequestResult::REQUEST_QUOTA_HIT:
callback_result = OfflinePageAutoFetcherScheduleResult::kNotEnoughQuota;
break;
case AddRequestResult::DUPLICATE_URL:
case AddRequestResult::ALREADY_EXISTS:
callback_result = OfflinePageAutoFetcherScheduleResult::kAlreadyScheduled;
break;
case AddRequestResult::SUCCESS:
callback_result = OfflinePageAutoFetcherScheduleResult::kScheduled;
break;
case AddRequestResult::STORE_FAILURE:
case AddRequestResult::URL_ERROR:
callback_result = OfflinePageAutoFetcherScheduleResult::kOtherError;
break;
}
std::move(callback).Run(callback_result);
}
void OfflinePageAutoFetcherService::OnCompleted(
const SavePageRequest& request,
RequestNotifier::BackgroundSavePageResult status) {
if (request.client_id().name_space != kAutoAsyncNamespace ||
status != RequestNotifier::BackgroundSavePageResult::SUCCESS)
return;
offline_page_model_->GetPageByOfflineId(
request.request_id(),
base::BindOnce(&OfflinePageAutoFetcherService::AutoFetchComplete,
weak_ptr_factory_.GetWeakPtr()));
}
void OfflinePageAutoFetcherService::AutoFetchComplete(
const OfflinePageItem* page) {
if (!page)
return;
absl::optional<auto_fetch::ClientIdMetadata> metadata =
auto_fetch::ExtractMetadata(page->client_id);
if (!metadata)
return;
delegate_->ShowAutoFetchCompleteNotification(
page->title, page->GetOriginalUrl().spec(), page->url.spec(),
metadata.value().android_tab_id, page->offline_id);
}
} // namespace offline_pages
| 2,531 |
2,603 | <reponame>JVVJV/FreeRTOS
/* ----------------------------------------------------------------------------
* ATMEL Microcontroller Software Support
* ----------------------------------------------------------------------------
* Copyright (c) 2008, Atmel Corporation
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* - Redistributions of source code must retain the above copyright notice,
* this list of conditions and the disclaimer below.
*
* Atmel's name may not be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* DISCLAIMER: THIS SOFTWARE IS PROVIDED BY ATMEL "AS IS" AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT ARE
* DISCLAIMED. IN NO EVENT SHALL ATMEL BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
* OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
* EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
* ----------------------------------------------------------------------------
*/
//------------------------------------------------------------------------------
/// \page "mci"
///
/// !Purpose
///
/// mci-interface driver
///
/// !Usage
///
/// -# MCI_Init: Initializes a MCI driver instance and the underlying peripheral.
/// -# MCI_SetSpeed : Configure the MCI CLKDIV in the MCI_MR register.
/// -# MCI_SendCommand: Starts a MCI transfer.
/// -# MCI_Handler : Interrupt handler which is called by ISR handler.
/// -# MCI_SetBusWidth : Configure the MCI SDCBUS in the MCI_SDCR register.
//------------------------------------------------------------------------------
#ifndef MCI_H
#define MCI_H
//------------------------------------------------------------------------------
// Headers
//------------------------------------------------------------------------------
#include <board.h>
//------------------------------------------------------------------------------
// Constants
//------------------------------------------------------------------------------
/// Transfer is pending.
#define MCI_STATUS_PENDING 1
/// Transfer has been aborted because an error occured.
#define MCI_STATUS_ERROR 2
/// Card did not answer command.
#define MCI_STATUS_NORESPONSE 3
/// MCI driver is currently in use.
#define MCI_ERROR_LOCK 1
/// MCI configuration with 1-bit data bus on slot A (for MMC cards).
#define MCI_MMC_SLOTA 0
/// MCI configuration with 1-bit data bus on slot B (for MMC cards).
#define MCI_MMC_SLOTB 1
/// MCI configuration with 4-bit data bus on slot A (for SD cards).
#define MCI_SD_SLOTA AT91C_MCI_SCDBUS
/// MCI configuration with 4-bit data bus on slot B (for SD cards).
#define MCI_SD_SLOTB (AT91C_MCI_SCDBUS | 1)
/// Start new data transfer
#define MCI_NEW_TRANSFER 0
/// Continue data transfer
#define MCI_CONTINUE_TRANSFER 1
/// MCI SD Bus Width 1-bit
#define MCI_SDCBUS_1BIT (0 << 7)
/// MCI SD Bus Width 4-bit
#define MCI_SDCBUS_4BIT (1 << 7)
//------------------------------------------------------------------------------
// Types
//------------------------------------------------------------------------------
/// MCI end-of-transfer callback function.
typedef void (*MciCallback)(unsigned char status, void *pCommand);
//------------------------------------------------------------------------------
/// MCI Transfer Request prepared by the application upper layer. This structure
/// is sent to the MCI_SendCommand function to start the transfer. At the end of
/// the transfer, the callback is invoked by the interrupt handler.
//------------------------------------------------------------------------------
typedef struct _MciCmd {
/// Command status.
volatile char status;
/// Command code.
unsigned int cmd;
/// Command argument.
unsigned int arg;
/// Data buffer.
unsigned char *pData;
/// Size of data buffer in bytes.
unsigned short blockSize;
/// Number of blocks to be transfered
unsigned short nbBlock;
/// Indicate if continue to transfer data
unsigned char conTrans;
/// Indicates if the command is a read operation.
unsigned char isRead;
/// Response buffer.
unsigned int *pResp;
/// SD card response type.
unsigned char resType;
/// Optional user-provided callback function.
MciCallback callback;
/// Optional argument to the callback function.
void *pArg;
} MciCmd;
//------------------------------------------------------------------------------
/// MCI driver structure. Holds the internal state of the MCI driver and
/// prevents parallel access to a MCI peripheral.
//------------------------------------------------------------------------------
typedef struct {
/// Pointer to a MCI peripheral.
AT91S_MCI *pMciHw;
/// MCI peripheral identifier.
unsigned char mciId;
/// Pointer to currently executing command.
MciCmd *pCommand;
/// Mutex.
volatile char semaphore;
} Mci;
//------------------------------------------------------------------------------
// Global functions
//------------------------------------------------------------------------------
extern void MCI_Init(
Mci *pMci,
AT91PS_MCI pMciHw,
unsigned char mciId,
unsigned int mode);
extern void MCI_SetSpeed(Mci *pMci, unsigned int mciSpeed);
extern unsigned char MCI_SendCommand(Mci *pMci, MciCmd *pMciCmd);
extern void MCI_Handler(Mci *pMci);
extern unsigned char MCI_IsTxComplete(MciCmd *pMciCmd);
extern unsigned char MCI_CheckBusy(Mci *pMci);
extern void MCI_Close(Mci *pMci);
extern void MCI_SetBusWidth(Mci *pMci, unsigned char busWidth);
#endif //#ifndef MCI_H
| 1,895 |
1,895 | <gh_stars>1000+
package com.baomidou.mybatisplus.samples.ar;
import org.springframework.boot.SpringApplication;
import org.springframework.boot.autoconfigure.SpringBootApplication;
/**
* <p>
* AR 测试
* </p>
*
* @author hubin
* @since 2018-08-14
*/
@SpringBootApplication
public class ArApplication {
public static void main(String[] args) {
SpringApplication.run(ArApplication.class, args);
}
}
| 149 |
460 | <reponame>dyzmapl/BumpTop
#include "../../src/qt3support/network/q3ftp.h"
| 36 |
524 | #pragma once
#include "../loader/loader_helpers.h"
#include "../loader/preview_proxy.h"
#include "../loader/web_file_info.h"
#include "async_handler.h"
#include "downloadable_file_chunks.h"
#include "downloaded_file_info.h"
#include "file_sharing_meta.h"
#include "../../../log/log.h"
#include "../../../corelib/collection_helper.h"
#include "../../../corelib/enumerations.h"
namespace core
{
class coll_helper;
namespace wim
{
static const std::string default_file_location;
typedef transferred_data<void> default_data_t;
typedef transferred_data<downloaded_file_info> file_info_data_t;
typedef transferred_data<preview_proxy::link_meta> link_meta_data_t;
typedef transferred_data<file_sharing_meta> file_sharing_meta_data_t;
typedef async_handler<void> default_handler_t;
typedef async_handler<downloaded_file_info> file_info_handler_t;
typedef async_handler<preview_proxy::link_meta> link_meta_handler_t;
typedef async_handler<file_sharing_meta> file_sharing_meta_handler_t;
struct wim_packet_params;
struct cancelled_tasks
{
std::unordered_set<std::string> tasks_;
std::mutex mutex_;
};
class async_loader
: public std::enable_shared_from_this<async_loader>
{
public:
explicit async_loader(std::wstring _content_cache_dir);
void set_download_dir(std::wstring _download_dir);
void download(priority_t _priority, const std::string& _url, const std::string& _base_url, const wim_packet_params& _wim_params, default_handler_t _handler, time_t _last_modified_time = 0, int64_t _id = -1, std::string_view _normalized_url = {});
void download_file(priority_t _priority, const std::string& _url, const std::string& _base_url, const std::wstring& _file_name, const wim_packet_params& _wim_params, file_info_handler_t _handler = file_info_handler_t(), time_t _last_modified_time = 0, int64_t _id = -1, const bool _with_data = true, std::string_view _normalized_url = {});
void download_file(priority_t _priority, const std::string& _url, const std::string& _base_url, const std::string& _file_name, const wim_packet_params& _wim_params, file_info_handler_t _handler = file_info_handler_t(), time_t _last_modified_time = 0, int64_t _id = -1, const bool _with_data = true, std::string_view _normalized_url = {});
void cancel(const std::string& _url);
void download_image_metainfo(const std::string& _url, const wim_packet_params& _wim_params, link_meta_handler_t _handler = link_meta_handler_t(), int64_t _id = -1);
void download_file_sharing_metainfo(const std::string& _url, const wim_packet_params& _wim_params, file_sharing_meta_handler_t _handler = file_sharing_meta_handler_t(), int64_t _id = -1);
void download_image_preview(priority_t _priority, const std::string& _url, const wim_packet_params& _wim_params, link_meta_handler_t _metainfo_handler = link_meta_handler_t(), file_info_handler_t _preview_handler = file_info_handler_t(), int64_t _id = -1);
void download_image(priority_t _priority, const std::string& _url, const wim_packet_params& _wim_params, const bool _use_proxy, const bool _is_external_resource, file_info_handler_t _preview_handler = file_info_handler_t(), int64_t _id = -1, const bool _with_data = true);
void download_image(priority_t _priority, const std::string& _url, const std::string& _file_name, const wim_packet_params& _wim_params, const bool _use_proxy, const bool _is_external_resource, file_info_handler_t _preview_handler = file_info_handler_t(), int64_t _id = -1, const bool _with_data = true);
void download_file_sharing(priority_t _priority, const std::string& _contact, const std::string& _url, std::string _file_name, const wim_packet_params& _wim_params, file_info_handler_t _handler = file_info_handler_t());
void cancel_file_sharing(const std::string& _url);
void resume_suspended_tasks(const wim_packet_params& _wim_params);
void contact_switched(const std::string& _contact);
std::wstring get_meta_path(const std::string& _url, const std::wstring& _path);
static void save_filesharing_local_path(const std::wstring& _meta_path, const std::string& _url, const std::wstring& _path);
void save_filesharing_local_path(const std::string& _url, const std::wstring& _path);
private:
void download_file_sharing_impl(std::string _url, wim_packet_params _wim_params, downloadable_file_chunks_ptr _file_chunks, std::string_view _normalized_url = {});
static void update_file_chunks(downloadable_file_chunks& _file_chunks, priority_t _new_priority, file_info_handler_t _additional_handlers);
template <typename T>
static void fire_callback(loader_errors _error, const transferred_data<T>& _data, typename async_handler<T>::completion_callback_t _completion_callback)
{
if (_completion_callback)
g_core->execute_core_context([=]() { _completion_callback(_error, _data); });
}
void fire_chunks_callback(loader_errors _error, const std::string& _url);
template <class metainfo_parser_t, typename T>
void download_metainfo(const std::string& _url, const std::string& _signed_url, metainfo_parser_t _parser, const wim_packet_params& _wim_params, async_handler<T> _handler, int64_t _id = -1, std::string_view _normalized_url = {})
{
__INFO("async_loader",
"download_metainfo\n"
"url = <%1%>\n"
"signed = <%2%>\n"
"handler = <%3%>\n", _url % _signed_url % _handler.to_string());
const auto meta_path = get_path_in_cache(content_cache_dir_, _url, path_type::link_meta);
auto load_from_local = [_handler, _parser, _url, meta_path]()
{
tools::binary_stream json_file;
if (json_file.load_from_file(meta_path))
{
const auto file_size = json_file.available();
if (file_size != 0)
{
const auto json_str = json_file.read(file_size);
std::vector<char> json;
json.reserve(file_size + 1);
json.assign(json_str, json_str + file_size);
json.push_back('\0');
auto meta_info = _parser(json.data(), _url);
if (meta_info)
{
transferred_data<T> result(std::shared_ptr<T>(meta_info.release()));
fire_callback(loader_errors::success, result, _handler.completion_callback_);
return true;
}
}
}
return false;
};
if (load_from_local())
return;
auto local_handler = default_handler_t([_url, _signed_url, _parser, _handler, meta_path, _id, load_from_local, this](loader_errors _error, const default_data_t& _data)
{
__INFO("async_loader",
"download_metainfo\n"
"url = <%1%>\n"
"handler = <%2%>\n"
"result = <%3%>\n"
"response = <%4%>\n", _url % _handler.to_string() % static_cast<int>(_error) % _data.response_code_);
if (_error == loader_errors::network_error)
{
suspended_tasks_.push([_url, _signed_url, _parser, _handler, _id, this](const wim_packet_params& wim_params)
{
download_metainfo(_url, _signed_url, _parser, wim_params, _handler, _id);
});
return;
}
if (_error != loader_errors::success)
{
fire_callback(_error, transferred_data<T>(), _handler.completion_callback_);
return;
}
if (load_from_local())
return;
std::vector<char> json;
json.assign(_data.content_->get_data(), _data.content_->get_data() + _data.content_->available());
json.push_back('\0');
auto meta_info = _parser(json.data(), _url);
if (!meta_info)
{
fire_callback(loader_errors::invalid_json, transferred_data<T>(), _handler.completion_callback_);
return;
}
_data.content_->reset_out();
_data.content_->save_2_file(meta_path);
transferred_data<T> result(_data.response_code_, _data.header_, _data.content_, std::shared_ptr<T>(meta_info.release()));
fire_callback(loader_errors::success, result, _handler.completion_callback_);
}, _handler.progress_callback_);
download(highest_priority(), _signed_url, _url, _wim_params, local_handler, 0, _id, _normalized_url);
}
void cleanup_cache();
static constexpr std::string_view get_endpoint_for_url(std::string_view _url);
private:
const std::wstring content_cache_dir_;
std::wstring download_dir_;
std::unordered_map<std::string, downloadable_file_chunks_ptr> in_progress_;
std::mutex in_progress_mutex_;
std::shared_ptr<cancelled_tasks> cancelled_tasks_;
typedef std::function<void(const wim_packet_params& _wim_params)> suspended_task_t;
std::queue<suspended_task_t> suspended_tasks_;
std::shared_ptr<async_executer> async_tasks_;
std::shared_ptr<async_executer> file_save_thread_;
};
}
}
| 5,259 |
4,759 | versions = {'etcd': '9.6', 'etcd3': '14', 'consul': '13', 'exhibitor': '12', 'raft': '11', 'kubernetes': '14'}
| 52 |
778 | <filename>external_libraries/vexcl/tests/deduce.cpp
#define BOOST_TEST_MODULE ExpressionTypeDeduction
#include <boost/test/unit_test.hpp>
#include <vexcl/vector.hpp>
#include <vexcl/element_index.hpp>
#include <vexcl/mba.hpp>
#ifdef RESOLVED_70
#include <vexcl/spmat.hpp>
#endif
#include <vexcl/tagged_terminal.hpp>
#include <vexcl/temporary.hpp>
#include <vexcl/cast.hpp>
#include <vexcl/vector_view.hpp>
#include <vexcl/reductor.hpp>
#include <vexcl/function.hpp>
#include "context_setup.hpp"
template <typename T>
inline std::array<T, 2> make_array(T x, T y) {
std::array<T, 2> p = {{x, y}};
return p;
}
template <class Result, class Expr>
void check(const Expr &expr) {
typedef typename vex::detail::return_type<Expr>::type Deduced;
boost::proto::display_expr( boost::proto::as_child(expr) );
std::cout << vex::type_name<Deduced>() << std::endl << std::endl;
BOOST_CHECK( (std::is_same<Deduced, Result>::value) );
}
BOOST_AUTO_TEST_CASE(terminals)
{
const size_t n = 1024;
vex::vector<double> x;
vex::vector<int> y;
vex::vector<cl_double2> z;
check<int> (5);
check<double> (4.2);
check<double> (x);
check<int> (y);
check<cl_double2>(z);
check<size_t>(vex::element_index());
{
vex::mba<2, float> *surf= 0;
check<float>( (*surf)(x, y) );
}
#ifdef RESOLVED_70
{
vex::SpMatCCSR<double> *A = 0;
check<double>( (*A) * x );
}
{
std::vector<cl::CommandQueue> q1(1, ctx.queue(0));
vex::vector<double> x1(q1, n);
vex::SpMat<double> *A = 0;
check<double>( vex::make_inline( (*A) * x1 ) );
}
#endif
check<double>( vex::tag<1>(x) );
{
auto tmp = vex::make_temp<1>(x * y);
check<double>( tmp );
}
{
std::vector<vex::command_queue> q1(1, ctx.queue(0));
vex::vector<int> x1(q1, n);
vex::slicer<1> slice(vex::extents[n]);
check<int>( slice[vex::range(0, 2, n)](x1) );
}
}
BOOST_AUTO_TEST_CASE(logical_expr)
{
vex::vector<double> x;
vex::vector<int> y;
check<cl_long>(x < y);
check<cl_long>(5 > pow(x, 2.0 * y));
check<cl_long>(!x);
}
BOOST_AUTO_TEST_CASE(nary_expr)
{
vex::vector<double> x;
vex::vector<cl_double2> x2;
vex::vector<int> y;
vex::vector<cl_int2> y2;
check<double> (x + y);
check<double> (x + 2 * y);
check<int> (-y);
check<cl_double2>(x * x2);
check<cl_double2>(y * x2);
check<cl_double2>(x * y2);
}
BOOST_AUTO_TEST_CASE(user_functions)
{
vex::vector<double> x;
vex::vector<int> y;
VEX_FUNCTION(double, f1, (double, x), return 42;);
VEX_FUNCTION(int, f2, (double, x)(double, y), return 42;);
check<double>( f1(x) );
check<int> ( f2(x, y) );
check<int> ( f2(x + y, x - y) );
}
BOOST_AUTO_TEST_CASE(ternary_operator)
{
vex::vector<double> x;
vex::vector<int> y;
check<int> ( if_else(x < 0, 1, y) );
check<double>( *if_else(x < 0, &x, &y) );
}
BOOST_AUTO_TEST_CASE(builtin_functions)
{
vex::vector<double> x;
vex::vector<int> y;
check<double>( cos(x) - sin(y) );
check<double>( pow(x, 2.0 * y) );
}
BOOST_AUTO_TEST_CASE(reduced_view)
{
const size_t n = 1024;
std::vector<vex::command_queue> q1(1, ctx.queue(0));
vex::vector<double> x(q1, n);
vex::slicer<2> s(vex::extents[32][32]);
check<double>( vex::reduce<vex::SUM>(s[vex::_](x), 1) );
}
BOOST_AUTO_TEST_CASE(casted_terminals)
{
check<double>(vex::cast<double>(42));
check<int>(vex::cast<int>(4.2));
}
BOOST_AUTO_TEST_SUITE_END()
| 1,855 |
435 | {
"description": "Want to start using openstack? Come to this talk and meet shade! Shade\nis a python library that wraps the many OpenStack clients.\n\nThis introductory talk presents:\n\n- an overview of openstack\n- connecting to openstack with shade\n- creating and listing machines, networks & co\n- shade usage in ansible modules\n- contributing to shade (tox, git review, \u2026)\n",
"duration": 1924,
"language": "eng",
"recorded": "2017-04-09",
"related_urls": [
{
"label": "slides",
"url": "https://www.pycon.it/media/conference/slides/just-one-shade-of-openstack.pdf"
}
],
"speakers": [
"<NAME>"
],
"tags": [
"OpenStack",
"OpenSource",
"cloud"
],
"thumbnail_url": "https://i.ytimg.com/vi/JFOtxIngSY8/hqdefault.jpg",
"title": "Just one Shade of OpenStack",
"videos": [
{
"type": "youtube",
"url": "https://www.youtube.com/watch?v=JFOtxIngSY8"
}
]
}
| 368 |
2,151 | <filename>third_party/blink/renderer/core/offscreencanvas/offscreen_canvas.cc
// Copyright 2015 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "third_party/blink/renderer/core/offscreencanvas/offscreen_canvas.h"
#include <memory>
#include <utility>
#include "gpu/config/gpu_feature_info.h"
#include "third_party/blink/public/platform/platform.h"
#include "third_party/blink/renderer/core/css/css_font_selector.h"
#include "third_party/blink/renderer/core/css/offscreen_font_selector.h"
#include "third_party/blink/renderer/core/css/style_engine.h"
#include "third_party/blink/renderer/core/dom/exception_code.h"
#include "third_party/blink/renderer/core/execution_context/execution_context.h"
#include "third_party/blink/renderer/core/fileapi/blob.h"
#include "third_party/blink/renderer/core/html/canvas/canvas_async_blob_creator.h"
#include "third_party/blink/renderer/core/html/canvas/canvas_context_creation_attributes_core.h"
#include "third_party/blink/renderer/core/html/canvas/canvas_rendering_context.h"
#include "third_party/blink/renderer/core/html/canvas/canvas_rendering_context_factory.h"
#include "third_party/blink/renderer/core/html/canvas/image_data.h"
#include "third_party/blink/renderer/core/imagebitmap/image_bitmap.h"
#include "third_party/blink/renderer/core/origin_trials/origin_trials.h"
#include "third_party/blink/renderer/core/workers/worker_global_scope.h"
#include "third_party/blink/renderer/platform/graphics/canvas_resource_provider.h"
#include "third_party/blink/renderer/platform/graphics/gpu/shared_gpu_context.h"
#include "third_party/blink/renderer/platform/graphics/image.h"
#include "third_party/blink/renderer/platform/graphics/offscreen_canvas_frame_dispatcher.h"
#include "third_party/blink/renderer/platform/graphics/skia/skia_utils.h"
#include "third_party/blink/renderer/platform/graphics/static_bitmap_image.h"
#include "third_party/blink/renderer/platform/image-encoders/image_encoder_utils.h"
#include "third_party/blink/renderer/platform/instrumentation/tracing/trace_event.h"
#include "third_party/blink/renderer/platform/wtf/math_extras.h"
#include "third_party/skia/include/core/SkSurface.h"
namespace blink {
OffscreenCanvas::OffscreenCanvas(const IntSize& size) : size_(size) {}
OffscreenCanvas* OffscreenCanvas::Create(unsigned width, unsigned height) {
return new OffscreenCanvas(
IntSize(clampTo<int>(width), clampTo<int>(height)));
}
OffscreenCanvas::~OffscreenCanvas() = default;
void OffscreenCanvas::Commit(scoped_refptr<StaticBitmapImage> bitmap_image,
const SkIRect& damage_rect) {
if (!HasPlaceholderCanvas())
return;
double commit_start_time = WTF::CurrentTimeTicksInSeconds();
current_frame_damage_rect_.join(damage_rect);
GetOrCreateFrameDispatcher()->DispatchFrameSync(
std::move(bitmap_image), commit_start_time, current_frame_damage_rect_);
current_frame_damage_rect_ = SkIRect::MakeEmpty();
}
void OffscreenCanvas::Dispose() {
if (context_) {
context_->DetachHost();
context_ = nullptr;
}
}
void OffscreenCanvas::setWidth(unsigned width) {
IntSize new_size = size_;
new_size.SetWidth(clampTo<int>(width));
SetSize(new_size);
}
void OffscreenCanvas::setHeight(unsigned height) {
IntSize new_size = size_;
new_size.SetHeight(clampTo<int>(height));
SetSize(new_size);
}
void OffscreenCanvas::SetSize(const IntSize& size) {
if (context_) {
if (context_->Is3d()) {
if (size != size_)
context_->Reshape(size.Width(), size.Height());
} else if (context_->Is2d()) {
context_->Reset();
origin_clean_ = true;
}
}
size_ = size;
if (frame_dispatcher_) {
frame_dispatcher_->Reshape(size_);
}
current_frame_damage_rect_ = SkIRect::MakeWH(size_.Width(), size_.Height());
if (context_) {
context_->DidDraw();
}
}
void OffscreenCanvas::SetNeutered() {
DCHECK(!context_);
is_neutered_ = true;
size_.SetWidth(0);
size_.SetHeight(0);
}
ImageBitmap* OffscreenCanvas::transferToImageBitmap(
ScriptState* script_state,
ExceptionState& exception_state) {
if (is_neutered_) {
exception_state.ThrowDOMException(
kInvalidStateError,
"Cannot transfer an ImageBitmap from a detached OffscreenCanvas");
return nullptr;
}
if (!context_) {
exception_state.ThrowDOMException(kInvalidStateError,
"Cannot transfer an ImageBitmap from an "
"OffscreenCanvas with no context");
return nullptr;
}
ImageBitmap* image = context_->TransferToImageBitmap(script_state);
if (!image) {
// Undocumented exception (not in spec)
exception_state.ThrowDOMException(kV8Error, "Out of memory");
}
return image;
}
scoped_refptr<Image> OffscreenCanvas::GetSourceImageForCanvas(
SourceImageStatus* status,
AccelerationHint hint,
const FloatSize& size) {
if (!context_) {
*status = kInvalidSourceImageStatus;
sk_sp<SkSurface> surface =
SkSurface::MakeRasterN32Premul(size_.Width(), size_.Height());
return surface ? StaticBitmapImage::Create(surface->makeImageSnapshot())
: nullptr;
}
if (!size.Width() || !size.Height()) {
*status = kZeroSizeCanvasSourceImageStatus;
return nullptr;
}
scoped_refptr<Image> image = context_->GetImage(hint);
if (!image)
image = CreateTransparentImage(Size());
*status = image ? kNormalSourceImageStatus : kInvalidSourceImageStatus;
return image;
}
IntSize OffscreenCanvas::BitmapSourceSize() const {
return size_;
}
ScriptPromise OffscreenCanvas::CreateImageBitmap(
ScriptState* script_state,
EventTarget&,
base::Optional<IntRect> crop_rect,
const ImageBitmapOptions& options) {
return ImageBitmapSource::FulfillImageBitmap(
script_state,
IsPaintable() ? ImageBitmap::Create(this, crop_rect, options) : nullptr);
}
bool OffscreenCanvas::IsOpaque() const {
if (!context_)
return false;
return !context_->CreationAttributes().alpha;
}
CanvasRenderingContext* OffscreenCanvas::GetCanvasRenderingContext(
ExecutionContext* execution_context,
const String& id,
const CanvasContextCreationAttributesCore& attributes) {
execution_context_ = execution_context;
CanvasRenderingContext::ContextType context_type =
CanvasRenderingContext::ContextTypeFromId(id);
// Unknown type.
if (context_type == CanvasRenderingContext::kContextTypeCount ||
(context_type == CanvasRenderingContext::kContextXRPresent &&
!OriginTrials::webXREnabled(execution_context)))
return nullptr;
CanvasRenderingContextFactory* factory =
GetRenderingContextFactory(context_type);
if (!factory)
return nullptr;
if (context_) {
if (context_->GetContextType() != context_type) {
factory->OnError(
this, "OffscreenCanvas has an existing context of a different type");
return nullptr;
}
} else {
context_ = factory->Create(this, attributes);
}
return context_.Get();
}
OffscreenCanvas::ContextFactoryVector&
OffscreenCanvas::RenderingContextFactories() {
DEFINE_STATIC_LOCAL(ContextFactoryVector, context_factories,
(CanvasRenderingContext::kContextTypeCount));
return context_factories;
}
CanvasRenderingContextFactory* OffscreenCanvas::GetRenderingContextFactory(
int type) {
DCHECK_LT(type, CanvasRenderingContext::kContextTypeCount);
return RenderingContextFactories()[type].get();
}
void OffscreenCanvas::RegisterRenderingContextFactory(
std::unique_ptr<CanvasRenderingContextFactory> rendering_context_factory) {
CanvasRenderingContext::ContextType type =
rendering_context_factory->GetContextType();
DCHECK_LT(type, CanvasRenderingContext::kContextTypeCount);
DCHECK(!RenderingContextFactories()[type]);
RenderingContextFactories()[type] = std::move(rendering_context_factory);
}
bool OffscreenCanvas::OriginClean() const {
return origin_clean_ && !disable_reading_from_canvas_;
}
bool OffscreenCanvas::IsAccelerated() const {
return context_ && context_->IsAccelerated();
}
OffscreenCanvasFrameDispatcher* OffscreenCanvas::GetOrCreateFrameDispatcher() {
if (!frame_dispatcher_) {
// The frame dispatcher connects the current thread of OffscreenCanvas
// (either main or worker) to the browser process and remains unchanged
// throughout the lifetime of this OffscreenCanvas.
frame_dispatcher_ = std::make_unique<OffscreenCanvasFrameDispatcher>(
this, client_id_, sink_id_, placeholder_canvas_id_, size_);
}
return frame_dispatcher_.get();
}
void OffscreenCanvas::DiscardResourceProvider() {
resource_provider_.reset();
needs_matrix_clip_restore_ = true;
}
CanvasResourceProvider* OffscreenCanvas::GetOrCreateResourceProvider() {
if (!resource_provider_) {
bool is_accelerated_2d_canvas_blacklisted = true;
if (SharedGpuContext::IsGpuCompositingEnabled()) {
base::WeakPtr<WebGraphicsContext3DProviderWrapper>
context_provider_wrapper = SharedGpuContext::ContextProviderWrapper();
if (context_provider_wrapper) {
const gpu::GpuFeatureInfo& gpu_feature_info =
context_provider_wrapper->ContextProvider()->GetGpuFeatureInfo();
if (gpu::kGpuFeatureStatusEnabled ==
gpu_feature_info
.status_values[gpu::GPU_FEATURE_TYPE_ACCELERATED_2D_CANVAS]) {
is_accelerated_2d_canvas_blacklisted = false;
}
}
}
IntSize surface_size(width(), height());
if (RuntimeEnabledFeatures::Accelerated2dCanvasEnabled() &&
!is_accelerated_2d_canvas_blacklisted) {
resource_provider_ = CanvasResourceProvider::Create(
surface_size, CanvasResourceProvider::kAcceleratedResourceUsage,
SharedGpuContext::ContextProviderWrapper(), 0,
context_->ColorParams());
}
if (!resource_provider_ || !resource_provider_->IsValid()) {
resource_provider_ = CanvasResourceProvider::Create(
surface_size, CanvasResourceProvider::kSoftwareResourceUsage, nullptr,
0, context_->ColorParams());
}
if (resource_provider_ && resource_provider_->IsValid()) {
resource_provider_->Clear();
// Always save an initial frame, to support resetting the top level matrix
// and clip.
resource_provider_->Canvas()->save();
}
if (resource_provider_ && needs_matrix_clip_restore_) {
needs_matrix_clip_restore_ = false;
context_->RestoreCanvasMatrixClipStack(resource_provider_->Canvas());
}
}
return resource_provider_.get();
}
void OffscreenCanvas::DidDraw() {
DidDraw(FloatRect(0, 0, Size().Width(), Size().Height()));
}
void OffscreenCanvas::DidDraw(const FloatRect& rect) {
if (rect.IsEmpty())
return;
if (!HasPlaceholderCanvas())
return;
GetOrCreateFrameDispatcher()->SetNeedsBeginFrame(true);
}
void OffscreenCanvas::BeginFrame() {
context_->PushFrame();
GetOrCreateFrameDispatcher()->SetNeedsBeginFrame(false);
}
void OffscreenCanvas::PushFrame(scoped_refptr<StaticBitmapImage> image,
const SkIRect& damage_rect) {
current_frame_damage_rect_.join(damage_rect);
if (current_frame_damage_rect_.isEmpty())
return;
double commit_start_time = WTF::CurrentTimeTicksInSeconds();
GetOrCreateFrameDispatcher()->DispatchFrame(
std::move(image), commit_start_time, current_frame_damage_rect_);
current_frame_damage_rect_ = SkIRect::MakeEmpty();
}
ScriptPromise OffscreenCanvas::convertToBlob(ScriptState* script_state,
const ImageEncodeOptions& options,
ExceptionState& exception_state) {
if (this->IsNeutered()) {
exception_state.ThrowDOMException(kInvalidStateError,
"OffscreenCanvas object is detached.");
return exception_state.Reject(script_state);
}
if (!this->OriginClean()) {
exception_state.ThrowSecurityError(
"Tainted OffscreenCanvas may not be exported.");
return exception_state.Reject(script_state);
}
if (!this->IsPaintable() || size_.IsEmpty()) {
exception_state.ThrowDOMException(
kIndexSizeError, "The size of the OffscreenCanvas is zero.");
return exception_state.Reject(script_state);
}
if (!this->context_) {
exception_state.ThrowDOMException(
kInvalidStateError, "OffscreenCanvas object has no rendering contexts");
return exception_state.Reject(script_state);
}
double start_time = WTF::CurrentTimeTicksInSeconds();
scoped_refptr<StaticBitmapImage> snapshot =
context_->GetImage(kPreferNoAcceleration);
if (snapshot) {
ScriptPromiseResolver* resolver =
ScriptPromiseResolver::Create(script_state);
String encoding_mime_type = ImageEncoderUtils::ToEncodingMimeType(
options.type(), ImageEncoderUtils::kEncodeReasonConvertToBlobPromise);
CanvasAsyncBlobCreator* async_creator = CanvasAsyncBlobCreator::Create(
snapshot, encoding_mime_type, start_time,
ExecutionContext::From(script_state), resolver);
async_creator->ScheduleAsyncBlobCreation(options.quality());
return resolver->Promise();
} else {
exception_state.ThrowDOMException(
kNotReadableError, "Readback of the source image has failed.");
return exception_state.Reject(script_state);
}
}
void OffscreenCanvas::RegisterContextToDispatch(
CanvasRenderingContext* context) {
if (!HasPlaceholderCanvas())
return;
if (GetExecutionContext()->IsWorkerGlobalScope()) {
ToWorkerGlobalScope(GetExecutionContext())
->GetAnimationFrameProvider()
->AddContextToDispatch(context);
}
}
FontSelector* OffscreenCanvas::GetFontSelector() {
if (GetExecutionContext()->IsDocument()) {
return ToDocument(execution_context_)->GetStyleEngine().GetFontSelector();
}
return ToWorkerGlobalScope(execution_context_)->GetFontSelector();
}
void OffscreenCanvas::Trace(blink::Visitor* visitor) {
visitor->Trace(context_);
visitor->Trace(execution_context_);
EventTargetWithInlineData::Trace(visitor);
}
} // namespace blink
| 5,267 |
525 | <gh_stars>100-1000
# Added at : 2016.07.31
# Author : 7sDream
# Usage : Provide math operator with polynomials on GF28.
# Used in Reed-solomon encoder.
import abc
from .gf import GF28
__all__ = ['GF28Poly']
class _GFPoly(object):
@classmethod
@abc.abstractmethod
def gf(cls):
pass
def __init__(self, pcmap):
self._pcmap = pcmap
if self._pcmap:
self._max_index = max(self._pcmap.keys())
else:
self._max_index = 0
@classmethod
def from_index_list(cls, ilist, maxp):
pcmap = {}
for xi, ai in enumerate(ilist):
if ai is None:
continue
pcmap[maxp - xi] = cls.gf()[ai]
return cls(pcmap)
@classmethod
def from_value_list(cls, vlist, maxp):
pcmap = {}
for i, v in enumerate(vlist):
if v == 0:
continue
pcmap[maxp - i] = cls.gf()[cls.gf().index(v)]
return cls(pcmap)
@property
def pcmap(self):
return self._pcmap
@property
def max_index(self):
return self._max_index
@property
def as_int_list(self):
int_list = []
for p in reversed(range(self.max_index + 1)):
if p in self.pcmap:
int_list.append(self.pcmap[p].value)
else:
int_list.append(0)
return int_list
def __mul__(self, other):
new_pcmap = {}
for p1, c1 in self.pcmap.items():
for p2, c2 in other.pcmap.items():
if (p1 + p2) in new_pcmap:
old_value = new_pcmap[p1 + p2]
new_pcmap[p1 + p2] = old_value + c1 * c2
else:
new_pcmap[p1 + p2] = c1 * c2
return type(self)(new_pcmap)
def __mod__(self, other):
r = type(self)(self.pcmap)
while r.max_index >= other.max_index:
pad = r.max_index - other.max_index
pad_item = type(self)({pad: r.pcmap[r.max_index]})
r += other * pad_item
return r
def __add__(self, other):
pcmap = {}
for p in range(max(self.max_index, other.max_index) + 1):
if p in self.pcmap:
pcmap[p] = self.pcmap[p]
if p in other.pcmap:
if p in pcmap:
pcmap[p] += other.pcmap[p]
else:
pcmap[p] = other.pcmap[p]
if pcmap[p] is None:
del pcmap[p]
return type(self)(pcmap)
def __str__(self):
pc_list = sorted(self.pcmap.items(), key=lambda x: x[0], reverse=True)
strings = []
for p, c in pc_list:
if p == 0:
item = str(c)
elif p == 1:
item = str(c) + 'x'
else:
item = str(c) + 'x^' + str(p)
strings.append(item)
return '+'.join(strings)
def __repr__(self):
return "Poly at {id}: {string}".format(id=id(self), string=str(self))
class GF28Poly(_GFPoly):
@classmethod
def gf(cls):
return GF28
| 1,764 |
365 | import bpy
bpy.context.scene.render.fps = 50
bpy.context.scene.render.fps_base = 1
| 32 |
421 |
#using <system.dll>
#using <system.runtime.remoting.dll>
#using "service.dll"
using namespace System;
using namespace System::Collections;
using namespace System::IO;
using namespace System::Runtime::Remoting;
using namespace System::Runtime::Remoting::Channels;
using namespace System::Runtime::Remoting::Channels::Http;
using namespace System::Threading;
using namespace SampleNamespace;
int main()
{
array<String^>^args = Environment::GetCommandLineArgs();
String^ serverConfigFile = "basicserver.exe.config";
if ( (args->Length > 2) && (args[ 1 ]->ToLower()->Equals( "/c" ) | args[ 1 ]->ToLower()->Equals( "-c" )) )
{
serverConfigFile = args[ 2 ];
}
RemotingConfiguration::Configure( "channels.config" );
RemotingConfiguration::Configure( serverConfigFile );
Console::WriteLine( "Listening..." );
String^ keyState = "";
while ( String::Compare( keyState, "0", true ) != 0 )
{
Console::WriteLine( "***** Press 0 to exit this service *****" );
keyState = Console::ReadLine();
}
return 0;
}
| 383 |
1,144 | package de.metas.ui.web.window.descriptor;
import com.google.common.base.MoreObjects;
import com.google.common.collect.ImmutableSet;
import de.metas.ui.web.window.datatypes.LookupValue;
import de.metas.ui.web.window.datatypes.LookupValuesList;
import de.metas.ui.web.window.datatypes.LookupValuesPage;
import de.metas.ui.web.window.datatypes.WindowId;
import de.metas.ui.web.window.descriptor.DocumentLayoutElementFieldDescriptor.LookupSource;
import de.metas.ui.web.window.model.lookup.LookupDataSourceContext;
import de.metas.ui.web.window.model.lookup.LookupDataSourceFetcher;
import de.metas.util.Check;
import lombok.NonNull;
import java.util.Optional;
import java.util.Set;
import java.util.function.Function;
/*
* #%L
* metasfresh-webui-api
* %%
* Copyright (C) 2017 metas GmbH
* %%
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as
* published by the Free Software Foundation, either version 2 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public
* License along with this program. If not, see
* <http://www.gnu.org/licenses/gpl-2.0.html>.
* #L%
*/
/**
* {@link LookupDescriptor} and {@link LookupDataSourceFetcher} implementation which is backed by a given {@link LookupValuesList} supplier.
*
* @author metas-dev <<EMAIL>>
*
*/
@SuppressWarnings("OptionalUsedAsFieldOrParameterType")
public final class ListLookupDescriptor extends SimpleLookupDescriptorTemplate
{
public static Builder builder()
{
return new Builder();
}
private final LookupSource lookupSourceType;
private final boolean numericKey;
private final Function<LookupDataSourceContext, LookupValuesPage> lookupValues;
private final ImmutableSet<String> dependsOnFieldNames;
private final Optional<String> lookupTableName;
private ListLookupDescriptor(@NonNull final Builder builder)
{
Check.assumeNotNull(builder.lookupValues, "Parameter builder.lookupValues is not null");
numericKey = builder.numericKey;
lookupSourceType = builder.lookupSourceType;
lookupValues = builder.lookupValues;
dependsOnFieldNames = builder.dependsOnFieldNames == null ? ImmutableSet.of() : ImmutableSet.copyOf(builder.dependsOnFieldNames);
lookupTableName = builder.lookupTableName;
}
@Override
public String toString()
{
return MoreObjects.toStringHelper(this)
.add("lookupValues", lookupValues)
.toString();
}
@Override
public LookupSource getLookupSourceType()
{
return lookupSourceType;
}
@Override
public boolean isNumericKey()
{
return numericKey;
}
@Override
public Set<String> getDependsOnFieldNames()
{
return dependsOnFieldNames;
}
@Override
public Optional<String> getLookupTableName()
{
return lookupTableName;
}
@Override
public LookupValue retrieveLookupValueById(final @NonNull LookupDataSourceContext evalCtx)
{
final LookupValuesPage page = lookupValues.apply(evalCtx);
return page.getValues().getById(evalCtx.getSingleIdToFilterAsObject());
}
@Override
public LookupValuesPage retrieveEntities(final LookupDataSourceContext evalCtx)
{
return lookupValues.apply(evalCtx);
}
@Override
public Optional<WindowId> getZoomIntoWindowId()
{
return Optional.empty();
}
//
//
//
//
//
@SuppressWarnings("OptionalUsedAsFieldOrParameterType")
public static class Builder
{
private LookupSource lookupSourceType = LookupSource.list;
private boolean numericKey;
private Function<LookupDataSourceContext, LookupValuesPage> lookupValues;
private Set<String> dependsOnFieldNames;
private Optional<String> lookupTableName = Optional.empty();
private Builder()
{
}
public ListLookupDescriptor build()
{
return new ListLookupDescriptor(this);
}
public Builder setLookupSourceType(@NonNull final LookupSource lookupSourceType)
{
this.lookupSourceType = lookupSourceType;
return this;
}
public Builder setLookupValues(final boolean numericKey, final Function<LookupDataSourceContext, LookupValuesPage> lookupValues)
{
this.numericKey = numericKey;
this.lookupValues = lookupValues;
return this;
}
public Builder setIntegerLookupValues(final Function<LookupDataSourceContext, LookupValuesPage> lookupValues)
{
setLookupValues(true, lookupValues);
return this;
}
public Builder setDependsOnFieldNames(final String[] dependsOnFieldNames)
{
this.dependsOnFieldNames = ImmutableSet.copyOf(dependsOnFieldNames);
return this;
}
public Builder setLookupTableName(final String lookupTableName)
{
this.lookupTableName = Check.isEmpty(lookupTableName, true) ? Optional.empty() : Optional.of(lookupTableName);
return this;
}
}
}
| 1,611 |
742 | package org.support.project.web.entity;
import org.support.project.di.Container;
import org.support.project.di.DI;
import org.support.project.di.Instance;
import org.support.project.web.entity.gen.GenSystemAttributesEntity;
/**
* システム付加情報
*/
@DI(instance = Instance.Prototype)
public class SystemAttributesEntity extends GenSystemAttributesEntity {
/** SerialVersion */
private static final long serialVersionUID = 1L;
/**
* インスタンス取得 AOPに対応
*
* @return インスタンス
*/
public static SystemAttributesEntity get() {
return Container.getComp(SystemAttributesEntity.class);
}
/**
* コンストラクタ
*/
public SystemAttributesEntity() {
super();
}
/**
* コンストラクタ
*
* @param configName コンフィグ名
* @param systemName システム名
*/
public SystemAttributesEntity(String configName, String systemName) {
super(configName, systemName);
}
}
| 387 |
412 | /*******************************************************************\
Module: Symbolic Execution
Author: <NAME>, <EMAIL>
\*******************************************************************/
/// \file
/// Symbolic Execution
#include "goto_symex.h"
#include <util/std_expr.h>
void goto_symext::symex_dead(statet &state)
{
const goto_programt::instructiont &instruction=*state.source.pc;
symex_dead(state, instruction.dead_symbol());
}
static void remove_l1_object_rec(
goto_symext::statet &state,
const exprt &l1_expr,
const namespacet &ns)
{
if(is_ssa_expr(l1_expr))
{
const ssa_exprt &l1_ssa = to_ssa_expr(l1_expr);
const irep_idt &l1_identifier = l1_ssa.get_identifier();
// We cannot remove the object from the L1 renaming map, because L1 renaming
// information is not local to a path, but removing it from the propagation
// map and value-set is safe as 1) it is local to a path and 2) this
// instance can no longer appear (unless shared across threads).
if(
state.threads.size() <= 1 ||
state.write_is_shared(l1_ssa, ns) ==
goto_symex_statet::write_is_shared_resultt::NOT_SHARED)
{
state.value_set.values.erase_if_exists(l1_identifier);
}
state.propagation.erase_if_exists(l1_identifier);
// Remove from the local L2 renaming map; this means any reads from the dead
// identifier will use generation 0 (e.g. x!N@M#0, where N and M are
// positive integers), which is never defined by any write, and will be
// dropped by `goto_symext::merge_goto` on merging with branches where the
// identifier is still live.
state.drop_l1_name(l1_identifier);
}
else if(l1_expr.id() == ID_array || l1_expr.id() == ID_struct)
{
for(const auto &op : l1_expr.operands())
remove_l1_object_rec(state, op, ns);
}
else
UNREACHABLE;
}
void goto_symext::symex_dead(statet &state, const symbol_exprt &symbol_expr)
{
ssa_exprt to_rename = is_ssa_expr(symbol_expr) ? to_ssa_expr(symbol_expr)
: ssa_exprt{symbol_expr};
ssa_exprt ssa = state.rename_ssa<L1>(to_rename, ns).get();
const exprt fields = state.field_sensitivity.get_fields(ns, state, ssa);
remove_l1_object_rec(state, fields, ns);
}
| 894 |
1,104 | // In this header, you should import all the public headers of your framework
#import "TNSMLKitCameraView.h"
#import "TNSMLKitCameraViewDelegate.h"
//! Project version number for TNSMLKitCamera.
FOUNDATION_EXPORT double TNSMLKitCameraVersionNumber;
//! Project version string for TNSMLKitCamera.
FOUNDATION_EXPORT const unsigned char TNSMLKitCameraVersionString[];
| 107 |
1,810 | <gh_stars>1000+
# -*- coding: utf-8 -*-
"""The chain plugins manager object."""
class ChainPluginsManager(object):
"""Class that implements the chain plugins manager."""
_plugin_classes = {}
@classmethod
def deregister_plugin(cls, plugin_class):
"""Deregisters a plugin class.
The plugin classes are identified based on their lower case name.
Args:
plugin_class (type): the class object of the plugin.
Raises:
KeyError: if plugin class is not set for the corresponding name.
"""
plugin_name = plugin_class.NAME.lower()
if plugin_name not in cls._plugin_classes:
raise KeyError('Plugin class not set for name: {0:s}.'.format(
plugin_class.NAME))
del cls._plugin_classes[plugin_name]
@classmethod
def get_plugins(cls, analyzer_object):
"""Retrieves the chain plugins.
Args:
analyzer_object: an instance of Sketch object.
Returns:
list[type]: list of all chain plugin objects.
"""
return [plugin_class(analyzer_object) for plugin_class in iter(
cls._plugin_classes.values())]
@classmethod
def register_plugin(cls, plugin_class):
"""Registers a plugin class.
The plugin classes are identified based on their lower case name.
Args:
plugin_class (type): the class object of the plugin.
Raises:
KeyError: if plugin class is already set for the corresponding
name.
"""
plugin_name = plugin_class.NAME.lower()
if plugin_name in cls._plugin_classes:
raise KeyError(('Plugin class already set for : {0:s}.').format(
plugin_class.NAME))
cls._plugin_classes[plugin_name] = plugin_class
@classmethod
def register_plugins(cls, plugin_classes):
"""Registers plugin classes.
The plugin classes are identified based on their lower case name.
Args:
plugin_classes (list[type]): a list of class objects of the
plugins.
Raises:
KeyError: if plugin class is already set for the corresponding
name.
"""
for plugin_class in plugin_classes:
cls.register_plugin(plugin_class)
| 960 |
336 | # Used by TestProxy.test_exception_in_init.
raise ValueError("Exception in __init__.py")
| 30 |
12,278 | <reponame>189569400/ClickHouse<gh_stars>1000+
// Boost.Range library
//
// Copyright <NAME> 2003-2004.
// Use, modification and distribution is subject to the Boost Software
// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
//
// For more information, see http://www.boost.org/libs/range/
//
#ifndef BOOST_RANGE_RANGE_FWD_HPP_INCLUDED
#define BOOST_RANGE_RANGE_FWD_HPP_INCLUDED
namespace boost
{
// Extension points
template<typename C, typename Enabler>
struct range_iterator;
template<typename C, typename Enabler>
struct range_mutable_iterator;
template<typename C, typename Enabler>
struct range_const_iterator;
// Core classes
template<typename IteratorT>
class iterator_range;
template<typename ForwardRange>
class sub_range;
// Meta-functions
template<typename T>
struct range_category;
template<typename T>
struct range_difference;
template<typename T>
struct range_pointer;
template<typename T>
struct range_reference;
template<typename T>
struct range_reverse_iterator;
template<typename T>
struct range_size;
template<typename T>
struct range_value;
template<typename T>
struct has_range_iterator;
template<typename T>
struct has_range_const_iterator;
} // namespace boost
#endif // include guard
| 521 |
12,278 | /*
* Copyright <NAME> 2007 - 2015.
* Distributed under the Boost Software License, Version 1.0.
* (See accompanying file LICENSE_1_0.txt or copy at
* http://www.boost.org/LICENSE_1_0.txt)
*/
/*!
* \file sink_init_helpers.hpp
* \author <NAME>
* \date 14.03.2009
*
* \brief This header is the Boost.Log library implementation, see the library documentation
* at http://www.boost.org/doc/libs/release/libs/log/doc/html/index.html.
*/
#ifndef BOOST_LOG_DETAIL_SINK_INIT_HELPERS_HPP_INCLUDED_
#define BOOST_LOG_DETAIL_SINK_INIT_HELPERS_HPP_INCLUDED_
#include <string>
#include <boost/mpl/bool.hpp>
#include <boost/parameter/binding.hpp>
#include <boost/type_traits/is_void.hpp>
#include <boost/type_traits/is_array.hpp>
#include <boost/core/enable_if.hpp>
#include <boost/utility/string_view_fwd.hpp>
#include <boost/log/detail/config.hpp>
#if !defined(BOOST_NO_CXX17_HDR_STRING_VIEW)
#include <string_view>
#endif
#include <boost/log/core/core.hpp>
#include <boost/log/expressions/filter.hpp>
#include <boost/log/expressions/formatter.hpp>
#include <boost/log/utility/setup/filter_parser.hpp>
#include <boost/log/utility/setup/formatter_parser.hpp>
#include <boost/log/keywords/filter.hpp>
#include <boost/log/keywords/format.hpp>
#include <boost/log/detail/is_character_type.hpp>
#include <boost/log/detail/header.hpp>
#ifdef BOOST_HAS_PRAGMA_ONCE
#pragma once
#endif
namespace boost {
BOOST_LOG_OPEN_NAMESPACE
namespace aux {
// The function creates a filter functional object from the provided argument
template< typename CharT >
inline typename boost::enable_if_c<
log::aux::is_character_type< CharT >::value,
filter
>::type acquire_filter(const CharT* filter)
{
return boost::log::parse_filter(filter);
}
template< typename CharT, typename TraitsT, typename AllocatorT >
inline filter acquire_filter(std::basic_string< CharT, TraitsT, AllocatorT > const& filter)
{
return boost::log::parse_filter(filter);
}
#if !defined(BOOST_NO_CXX17_HDR_STRING_VIEW)
template< typename CharT, typename TraitsT >
inline filter acquire_filter(std::basic_string_view< CharT, TraitsT > const& filter)
{
const CharT* p = filter.data();
return boost::log::parse_filter(p, p + filter.size());
}
#endif // !defined(BOOST_NO_CXX17_HDR_STRING_VIEW)
template< typename CharT, typename TraitsT >
inline filter acquire_filter(boost::basic_string_view< CharT, TraitsT > const& filter)
{
const CharT* p = filter.data();
return boost::log::parse_filter(p, p + filter.size());
}
template< typename FilterT >
inline typename boost::disable_if_c<
boost::is_array< FilterT >::value,
FilterT const&
>::type acquire_filter(FilterT const& filter)
{
return filter;
}
// The function installs filter into the sink, if provided in the arguments pack
template< typename SinkT, typename ArgsT >
inline void setup_filter(SinkT&, ArgsT const&, mpl::true_)
{
}
template< typename SinkT, typename ArgsT >
inline void setup_filter(SinkT& s, ArgsT const& args, mpl::false_)
{
s.set_filter(aux::acquire_filter(args[keywords::filter]));
}
// The function creates a formatter functional object from the provided argument
template< typename CharT >
inline typename boost::enable_if_c<
log::aux::is_character_type< CharT >::value,
basic_formatter< CharT >
>::type acquire_formatter(const CharT* formatter)
{
return boost::log::parse_formatter(formatter);
}
template< typename CharT, typename TraitsT, typename AllocatorT >
inline basic_formatter< CharT > acquire_formatter(std::basic_string< CharT, TraitsT, AllocatorT > const& formatter)
{
return boost::log::parse_formatter(formatter);
}
#if !defined(BOOST_NO_CXX17_HDR_STRING_VIEW)
template< typename CharT, typename TraitsT >
inline basic_formatter< CharT > acquire_formatter(std::basic_string_view< CharT, TraitsT > const& formatter)
{
const CharT* p = formatter.data();
return boost::log::parse_formatter(p, p + formatter.size());
}
#endif // !defined(BOOST_NO_CXX17_HDR_STRING_VIEW)
template< typename CharT, typename TraitsT >
inline basic_formatter< CharT > acquire_formatter(boost::basic_string_view< CharT, TraitsT > const& formatter)
{
const CharT* p = formatter.data();
return boost::log::parse_formatter(p, p + formatter.size());
}
template< typename FormatterT >
inline typename boost::disable_if_c<
boost::is_array< FormatterT >::value,
FormatterT const&
>::type acquire_formatter(FormatterT const& formatter)
{
return formatter;
}
// The function installs filter into the sink, if provided in the arguments pack
template< typename SinkT, typename ArgsT >
inline void setup_formatter(SinkT&, ArgsT const&, mpl::true_)
{
}
template< typename SinkT, typename ArgsT >
inline void setup_formatter(SinkT& s, ArgsT const& args, mpl::false_)
{
s.set_formatter(aux::acquire_formatter(args[keywords::format]));
}
} // namespace aux
BOOST_LOG_CLOSE_NAMESPACE // namespace log
} // namespace boost
#include <boost/log/detail/footer.hpp>
#endif // BOOST_LOG_DETAIL_SINK_INIT_HELPERS_HPP_INCLUDED_
| 1,896 |
5,168 | <filename>dnn/src/aarch64/conv_bias/opr_impl.h
/**
* \file dnn/src/aarch64/conv_bias/opr_impl.h
* MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
*
* Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*/
#pragma once
#include "src/arm_common/conv_bias/opr_impl.h"
#include "src/common/utils.h"
namespace megdnn {
namespace aarch64 {
class ConvBiasImpl : public arm_common::ConvBiasImpl {
public:
using arm_common::ConvBiasImpl::ConvBiasImpl;
class AlgoBase : public arm_common::ConvBiasImpl::AlgoBase {
public:
AlgoBase() : arm_common::ConvBiasImpl::AlgoBase() {
m_handle_type = Handle::HandleType::AARCH64;
}
};
SmallVector<fallback::ConvBiasImpl::AlgoBase*> get_all_packed_algo() override;
MEGDNN_FB_DECL_GET_ALGO_FROM_DESC(ConvBiasImpl);
protected:
const char* get_algorithm_set_name() const override;
private:
class AlgoF32DirectStride2;
class AlgoS8MatrixMul;
class AlgoQU8MatrixMul;
#if __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
class AlgoF16DirectStride2;
#endif
class AlgoPack;
static const AlgoPack& algo_pack();
};
} // namespace aarch64
} // namespace megdnn
// vim: syntax=cpp.doxygen
| 565 |
637 | <filename>app-framework/src/main/java/com/unclezs/novel/app/framework/util/ImageLoader.java
package com.unclezs.novel.app.framework.util;
import cn.hutool.cache.CacheUtil;
import cn.hutool.cache.impl.TimedCache;
import cn.hutool.core.io.FileUtil;
import cn.hutool.core.util.URLUtil;
import com.unclezs.novel.analyzer.util.uri.UrlUtils;
import java.util.function.Consumer;
import javafx.beans.InvalidationListener;
import javafx.beans.Observable;
import javafx.scene.image.Image;
import lombok.experimental.UtilityClass;
/**
* 图片加载并缓存1分钟
*
* @author <EMAIL>
* @date 2021/4/24 14:52
*/
@UtilityClass
public class ImageLoader {
/**
* 图片缓存
*/
private static final TimedCache<String, Image> IMAGE_CACHE = CacheUtil.newTimedCache(5 * 60L * 1000L);
/**
* 直接读取缓存,可能为null
*
* @param url 图片地址
* @return 图片
*/
public Image get(String url) {
return IMAGE_CACHE.get(url, true);
}
/**
* 读取缓存,不存在则加载
*
* @param url 图片地址
* @param defaultImage 失败时默认图片
* @param callback 图片获取回调
*/
public void load(String url, Image defaultImage, Consumer<Image> callback) {
Image cacheImage = get(url);
// 先查看缓存
if (cacheImage == null) {
if (!UrlUtils.isHttpUrl(url) && FileUtil.exist(url)) {
url = URLUtil.getURL(FileUtil.file(url)).toString();
} else if (!UrlUtils.isHttpUrl(url)) {
callback.accept(defaultImage);
return;
}
Image image = new Image(url, true);
IMAGE_CACHE.put(url, image);
String finalUrl = url;
image.progressProperty().addListener(new InvalidationListener() {
@Override
public void invalidated(Observable observable) {
if (image.getProgress() == 1) {
if (image.isError()) {
IMAGE_CACHE.put(finalUrl, defaultImage);
callback.accept(defaultImage);
} else {
callback.accept(image);
}
image.progressProperty().removeListener(this);
}
}
});
} else {
// 如果重复获取正在加载的图片则创建新的
if (cacheImage.getProgress() != 1) {
Image image = new Image(url, true);
String finalUrl = url;
image.progressProperty().addListener(new InvalidationListener() {
@Override
public void invalidated(Observable observable) {
if (image.getProgress() == 1) {
callback.accept(get(finalUrl));
image.progressProperty().removeListener(this);
}
}
});
} else {
callback.accept(cacheImage);
}
}
}
}
| 1,293 |
466 | #include "BaseShader.h"
#include <iostream>
#include <fstream>
#include <sstream>
BaseShader::BaseShader(const char * shaderPath)
{
path = std::string(shaderPath);
std::string shaderCode = loadShaderFromFile(shaderPath);
const char * shaderString = shaderCode.c_str();
shadType = getShaderType(shaderPath);
shad = glCreateShader(shadType.type);
glShaderSource(shad, 1, &shaderString, NULL);
glCompileShader(shad);
checkCompileErrors(shad, shadType.name.c_str(), getShaderName(shaderPath));
}
bool checkCompileErrors(unsigned int shader, std::string type, std::string shaderName)
{
int success;
char infoLog[1024];
if (type != "PROGRAM")
{
glGetShaderiv(shader, GL_COMPILE_STATUS, &success);
if (!success)
{
glGetShaderInfoLog(shader, 1024, NULL, infoLog);
std::cout << "ERROR: SHADER" << shaderName << "COMPILATION ERROR of type: " << type << "\n" << infoLog << "\n -- --------------------------------------------------- -- " << std::endl;
}
}
else
{
glGetProgramiv(shader, GL_LINK_STATUS, &success);
if (!success)
{
glGetProgramInfoLog(shader, 1024, NULL, infoLog);
std::cout << "ERROR::PROGRAM_LINKING_ERROR of type: " << type << "\n" << infoLog << "\n -- --------------------------------------------------- -- " << std::endl;
}
}
//if (success) {
// std::cout << type + " SHADER SUCCESSFULLY COMPILED AND/OR LINKED!" << std::endl;
//}
return success;
}
std::string BaseShader::loadShaderFromFile(const char* shaderPath) {
std::string shaderCode;
std::ifstream shaderFile;
shaderFile.exceptions(std::ifstream::failbit | std::ifstream::badbit);
try
{
// open files
shaderFile.open(shaderPath);
std::stringstream shaderStream;
// read file's buffer contents into streams
shaderStream << shaderFile.rdbuf();
// close file handlers
shaderFile.close();
// convert stream into string
shaderCode = shaderStream.str();
}
catch (std::ifstream::failure e)
{
std::cout << "ERROR::SHADER " << getShaderName(shaderPath) << " FILE_NOT_SUCCESFULLY_READ" << std::endl;
}
return shaderCode;
}
std::string getShaderName(const char* path) {
std::string pathstr = std::string(path);
const size_t last_slash_idx = pathstr.find_last_of("/");
if (std::string::npos != last_slash_idx)
{
pathstr.erase(0, last_slash_idx + 1);
}
return pathstr;
}
shaderType getShaderType(const char* path) {
std::string type = getShaderName(path);
const size_t last_slash_idx = type.find_last_of(".");
if (std::string::npos != last_slash_idx)
{
type.erase(0, last_slash_idx + 1);
}
if (type == "vert")
return shaderType(GL_VERTEX_SHADER, "VERTEX");
if (type == "frag")
return shaderType(GL_FRAGMENT_SHADER, "FRAGMENT");
if (type == "tes")
return shaderType(GL_TESS_EVALUATION_SHADER, "TESS_EVALUATION");
if (type == "tcs")
return shaderType(GL_TESS_CONTROL_SHADER, "TESS_CONTROL");
if (type == "geom")
return shaderType(GL_GEOMETRY_SHADER, "GEOMETRY");
if (type == "comp")
return shaderType(GL_COMPUTE_SHADER, "COMPUTE") ;
}
BaseShader::~BaseShader()
{
//glDeleteShader(shad);
}
| 1,204 |
771 | {
"name": "RegExr",
"url": "https://regexr.com/",
"desc": "Learn, build, & test Regular Expressions",
"tags": [
"Regular Expressions"
],
"maintainers": [
"gskinner"
],
"addedAt": "2019-12-29"
}
| 96 |
352 | <filename>core/src/main/java/com/crawljax/condition/eventablecondition/package-info.java
/**
* Eventable condition utils.
*/
package com.crawljax.condition.eventablecondition; | 56 |
2,542 | // ------------------------------------------------------------
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License (MIT). See License.txt in the repo root for license information.
// ------------------------------------------------------------
#include "stdafx.h"
using namespace std;
using namespace Common;
using namespace ServiceModel;
using namespace Hosting2;
using namespace Management;
using namespace ImageModel;
StringLiteral const TraceEnvironmentManager("EnvironmentManager");
// ********************************************************************************************************************
// EnvironmentManager::OpenAsyncOperation Implementation
//
class EnvironmentManager::OpenAsyncOperation
: public AsyncOperation,
TextTraceComponent<TraceTaskCodes::Hosting>
{
DENY_COPY(OpenAsyncOperation)
public:
OpenAsyncOperation(
__in EnvironmentManager & owner,
TimeSpan const timeout,
AsyncCallback const & callback,
AsyncOperationSPtr const & parent)
: AsyncOperation(callback, parent),
owner_(owner),
timeoutHelper_(timeout)
{
}
virtual ~OpenAsyncOperation()
{
}
static ErrorCode OpenAsyncOperation::End(
AsyncOperationSPtr const & operation)
{
auto thisPtr = AsyncOperation::End<OpenAsyncOperation>(operation);
return thisPtr->Error;
}
protected:
void OnStart(AsyncOperationSPtr const & thisSPtr)
{
// Create and open the providers
// 1. Log Collection Provider
auto error = LogCollectionProviderFactory::CreateLogCollectionProvider(
owner_.Root,
owner_.hosting_.NodeId,
owner_.hosting_.DeploymentFolder,
owner_.logCollectionProvider_);
WriteTrace(
error.ToLogLevel(),
TraceEnvironmentManager,
owner_.Root.TraceId,
"Create logCollectionProvider: error {0}",
error);
if (!error.IsSuccess())
{
TryComplete(thisSPtr, error);
return;
}
// Open log collection provider
WriteNoise(
TraceEnvironmentManager,
owner_.Root.TraceId,
"{0}: Begin(LogCollectionProvider->Open): timeout {1}",
owner_.hosting_.NodeId,
timeoutHelper_.GetRemainingTime());
auto operation = owner_.logCollectionProvider_->BeginOpen(
timeoutHelper_.GetRemainingTime(),
[this](AsyncOperationSPtr const & operation) { this->OnLogCollectionProviderOpenCompleted(operation); },
thisSPtr);
if (operation->CompletedSynchronously)
{
FinishLogCollectionProviderOpen(operation);
}
}
void OnLogCollectionProviderOpenCompleted(
AsyncOperationSPtr const & operation)
{
if (!operation->CompletedSynchronously)
{
FinishLogCollectionProviderOpen(operation);
}
}
void FinishLogCollectionProviderOpen(
AsyncOperationSPtr const & operation)
{
auto error = owner_.logCollectionProvider_->EndOpen(operation);
WriteTrace(
error.ToLogLevel(),
TraceEnvironmentManager,
owner_.Root.TraceId,
"{0}: End(logCollectionProvider->Open): error {1}",
owner_.hosting_.NodeId,
error);
if (!error.IsSuccess())
{
TryComplete(operation->Parent, error);
return;
}
if (!HostingConfig::GetConfig().EndpointProviderEnabled)
{
WriteInfo(
TraceEnvironmentManager,
owner_.Root.TraceId,
"Endpoint filtering disabled, do not create and open endpoint provider");
}
else
{
// 3. Endpoint Provider
owner_.endpointProvider_ = make_unique<EndpointProvider>(
owner_.Root,
owner_.hosting_.NodeId,
owner_.hosting_.StartApplicationPortRange,
owner_.hosting_.EndApplicationPortRange);
error = owner_.endpointProvider_->Open();
if (!error.IsSuccess())
{
WriteWarning(
TraceEnvironmentManager,
owner_.Root.TraceId,
"Open endpointProvider failed: error {0}",
error);
TryComplete(operation->Parent, error);
return;
}
}
// 4. ETW session provider
#if !defined(PLATFORM_UNIX)
owner_.etwSessionProvider_ = make_unique<EtwSessionProvider>(
owner_.Root,
owner_.hosting_.NodeName,
owner_.hosting_.DeploymentFolder);
error = owner_.etwSessionProvider_->Open();
if (!error.IsSuccess())
{
WriteWarning(
TraceEnvironmentManager,
owner_.Root.TraceId,
"Open EtwSessionProvider failed: error {0}",
error);
TryComplete(operation->Parent, error);
return;
}
#endif
// 5. Crash dump provider
owner_.crashDumpProvider_ = make_unique<CrashDumpProvider>(
owner_.Root,
owner_);
WriteNoise(
TraceEnvironmentManager,
owner_.Root.TraceId,
"{0}: Begin(CrashDumpProvider->Open): timeout {1}",
owner_.hosting_.NodeId,
timeoutHelper_.GetRemainingTime());
auto nextOperation = owner_.crashDumpProvider_->BeginOpen(
timeoutHelper_.GetRemainingTime(),
[this](AsyncOperationSPtr const & nextOperation) { this->OnCrashDumpProviderOpenCompleted(nextOperation); },
operation->Parent);
if (nextOperation->CompletedSynchronously)
{
FinishCrashDumpProviderOpen(nextOperation);
}
}
void OnCrashDumpProviderOpenCompleted(
AsyncOperationSPtr const & operation)
{
if (!operation->CompletedSynchronously)
{
FinishCrashDumpProviderOpen(operation);
}
}
void FinishCrashDumpProviderOpen(
AsyncOperationSPtr const & operation)
{
auto error = owner_.crashDumpProvider_->EndOpen(operation);
WriteTrace(
error.ToLogLevel(),
TraceEnvironmentManager,
owner_.Root.TraceId,
"{0}: End(CrashDumpProvider->Open): error {1}",
owner_.hosting_.NodeId,
error);
if(!error.IsSuccess())
{
TryComplete(operation->Parent, error);
return;
}
GetCurrentUserSid(operation->Parent);
}
void GetCurrentUserSid(AsyncOperationSPtr thisSPtr)
{
SidUPtr currentUserSid;
auto error = BufferedSid::GetCurrentUserSid(currentUserSid);
if(!error.IsSuccess())
{
WriteWarning(TraceEnvironmentManager,
owner_.Root.TraceId,
"Failed to get current user sid. Error={0}",
error);
TryComplete(thisSPtr, error);
return;
}
owner_.isSystem_ = currentUserSid->IsLocalSystem();
error = currentUserSid->ToString(owner_.currentUserSid_);
WriteTrace(error.ToLogLevel(),
TraceEnvironmentManager,
owner_.Root.TraceId,
"Failed to get current user sid string. Error={0}",
error);
if (error.IsSuccess())
{
error = SecurityUtility::IsCurrentUserAdmin(owner_.isAdminUser_);
WriteTrace(error.ToLogLevel(),
TraceEnvironmentManager,
owner_.Root.TraceId,
"Failed to check if current user is Admin. Error={0}",
error);
}
TryComplete(thisSPtr, error);
}
private:
EnvironmentManager & owner_;
TimeoutHelper timeoutHelper_;
};
// ********************************************************************************************************************
// EnvironmentManager::CloseAsyncOperation Implementation
//
class EnvironmentManager::CloseAsyncOperation
: public AsyncOperation,
TextTraceComponent<TraceTaskCodes::Hosting>
{
DENY_COPY(CloseAsyncOperation)
public:
CloseAsyncOperation(
__in EnvironmentManager & owner,
TimeSpan const timeout,
AsyncCallback const & callback,
AsyncOperationSPtr const & parent)
: AsyncOperation(callback, parent),
owner_(owner),
timeoutHelper_(timeout),
lastError_(ErrorCodeValue::Success)
{
}
virtual ~CloseAsyncOperation()
{
}
static ErrorCode CloseAsyncOperation::End(
AsyncOperationSPtr const & operation)
{
auto thisPtr = AsyncOperation::End<CloseAsyncOperation>(operation);
return thisPtr->Error;
}
protected:
void OnStart(AsyncOperationSPtr const & thisSPtr)
{
// Close the providers; on errors, save the error and
// continue closing the other providers
// 1. Endpoint Provider
if (owner_.endpointProvider_)
{
ASSERT_IFNOT(
HostingConfig::GetConfig().EndpointProviderEnabled,
"Endpoint Filtering disabled, endpoint provider shouldn't exist");
auto error = owner_.endpointProvider_->Close();
WriteTrace(
error.ToLogLevel(),
TraceEnvironmentManager,
owner_.Root.TraceId,
"Close endpointProvider: error {0}",
error);
if (!error.IsSuccess())
{
lastError_.Overwrite(error);
}
}
// 3. ETW session provider
if (owner_.etwSessionProvider_)
{
auto error = owner_.etwSessionProvider_->Close();
WriteTrace(
error.ToLogLevel(),
TraceEnvironmentManager,
owner_.Root.TraceId,
"Close EtwSessionProvider: error {0}",
error);
if (!error.IsSuccess())
{
lastError_.Overwrite(error);
}
}
// 4. Crash dump provider
if (owner_.crashDumpProvider_)
{
WriteNoise(
TraceEnvironmentManager,
owner_.Root.TraceId,
"{0}: Begin(CrashDumpProvider->Close): timeout {1}",
owner_.hosting_.NodeId,
timeoutHelper_.GetRemainingTime());
auto operation = owner_.crashDumpProvider_->BeginClose(
timeoutHelper_.GetRemainingTime(),
[this](AsyncOperationSPtr const & operation) { this->OnCrashDumpProviderCloseCompleted(operation); },
thisSPtr);
if (operation->CompletedSynchronously)
{
FinishCrashDumpProviderClose(operation);
}
}
}
void OnCrashDumpProviderCloseCompleted(
AsyncOperationSPtr const & operation)
{
if (!operation->CompletedSynchronously)
{
FinishCrashDumpProviderClose(operation);
}
}
void FinishCrashDumpProviderClose(
AsyncOperationSPtr const & operation)
{
auto error = owner_.crashDumpProvider_->EndClose(operation);
WriteTrace(
error.ToLogLevel(),
TraceEnvironmentManager,
owner_.Root.TraceId,
"{0}: End(CrashDumpProvider->Close): error {1}",
owner_.hosting_.NodeId,
error);
if (!error.IsSuccess())
{
lastError_.Overwrite(error);
}
// 5. Log Collection Provider
if (owner_.logCollectionProvider_)
{
WriteNoise(
TraceEnvironmentManager,
owner_.Root.TraceId,
"{0}: Begin(LogCollectionProvider->Close): timeout {1}",
owner_.hosting_.NodeId,
timeoutHelper_.GetRemainingTime());
auto nextOperation = owner_.logCollectionProvider_->BeginClose(
timeoutHelper_.GetRemainingTime(),
[this](AsyncOperationSPtr const & nextOperation) { this->OnLogCollectionProviderCloseCompleted(nextOperation); },
operation->Parent);
if (nextOperation->CompletedSynchronously)
{
FinishLogCollectionProviderClose(nextOperation);
}
}
}
void OnLogCollectionProviderCloseCompleted(
AsyncOperationSPtr const & operation)
{
if (!operation->CompletedSynchronously)
{
FinishLogCollectionProviderClose(operation);
}
}
void FinishLogCollectionProviderClose(
AsyncOperationSPtr const & operation)
{
auto error = owner_.logCollectionProvider_->EndClose(operation);
WriteTrace(
error.ToLogLevel(),
TraceEnvironmentManager,
owner_.Root.TraceId,
"{0}: End(logCollectionProvider->Close): error {1}",
owner_.hosting_.NodeId,
error);
if (!error.IsSuccess())
{
lastError_.Overwrite(error);
}
TryComplete(operation->Parent, lastError_);
}
private:
EnvironmentManager & owner_;
TimeoutHelper timeoutHelper_;
ErrorCode lastError_;
};
// ********************************************************************************************************************
// EnvironmentManager::SetupApplicationAsyncOperation Implementation
//
class EnvironmentManager::SetupApplicationAsyncOperation
: public AsyncOperation,
TextTraceComponent<TraceTaskCodes::Hosting>
{
DENY_COPY(SetupApplicationAsyncOperation)
public:
SetupApplicationAsyncOperation(
__in EnvironmentManager & owner,
ApplicationIdentifier const & applicationId,
ApplicationPackageDescription const & appPackageDescription,
TimeSpan const timeout,
AsyncCallback const & callback,
AsyncOperationSPtr const & parent)
: AsyncOperation(callback, parent),
owner_(owner),
applicationId_(applicationId),
appPackageDescription_(appPackageDescription),
timeoutHelper_(timeout),
appEnvironmentContext_(),
lastError_(ErrorCodeValue::Success)
{
appEnvironmentContext_ = make_shared<ApplicationEnvironmentContext>(applicationId_);
}
virtual ~SetupApplicationAsyncOperation()
{
}
static ErrorCode SetupApplicationAsyncOperation::End(
AsyncOperationSPtr const & operation,
__out ApplicationEnvironmentContextSPtr & appEnvironmentContext)
{
auto thisPtr = AsyncOperation::End<SetupApplicationAsyncOperation>(operation);
if (thisPtr->Error.IsSuccess())
{
appEnvironmentContext = move(thisPtr->appEnvironmentContext_);
}
return thisPtr->Error;
}
protected:
void OnStart(AsyncOperationSPtr const & thisSPtr)
{
SetupEnvironment(thisSPtr);
}
private:
void SetupEnvironment(AsyncOperationSPtr const & thisSPtr)
{
owner_.crashDumpProvider_->SetupApplicationCrashDumps(
applicationId_.ToString(),
appPackageDescription_);
if ((!appPackageDescription_.DigestedEnvironment.Principals.Users.empty()) ||
(!appPackageDescription_.DigestedEnvironment.Principals.Groups.empty()))
{
SetupPrincipals(thisSPtr);
}
else
{
WriteNoise(
TraceEnvironmentManager,
owner_.Root.TraceId,
"{0}: RunAsPolicyEnabled disabled, do not set up principals",
applicationId_);
WritePrincipalSIDsToFile(thisSPtr);
SetupLogCollectionPolicy(appPackageDescription_.DigestedEnvironment.Policies.LogCollectionEntries, thisSPtr);
}
}
void SetupPrincipals(AsyncOperationSPtr const & thisSPtr)
{
// Set up the security users and the groups
if (!HostingConfig::GetConfig().RunAsPolicyEnabled)
{
WriteWarning(
TraceEnvironmentManager,
owner_.Root.TraceId,
"{0}: RunAsPolicyEnabled disabled, but users and groups specified",
applicationId_);
TryComplete(thisSPtr, ErrorCode(ErrorCodeValue::OperationFailed, StringResource::Get(IDS_HOSTING_RunAsPolicy_NotEnabled)));
return;
}
else
{
WriteNoise(
TraceEnvironmentManager,
owner_.Root.TraceId,
"{0}: Begin setup principals",
applicationId_);
auto operation = owner_.hosting_.FabricActivatorClientObj->BeginConfigureSecurityPrincipals(
applicationId_.ToString(),
applicationId_.ApplicationNumber,
appPackageDescription_.DigestedEnvironment.Principals,
timeoutHelper_.GetRemainingTime(),
[this](AsyncOperationSPtr const & operation) { this->OnConfigureSecurityPrincipalsCompleted(operation, false); },
thisSPtr);
OnConfigureSecurityPrincipalsCompleted(operation, true);
}
}
void OnConfigureSecurityPrincipalsCompleted(AsyncOperationSPtr operation, bool expectedCompletedSynhronously)
{
if(operation->CompletedSynchronously != expectedCompletedSynhronously)
{
return;
}
PrincipalsProviderContextUPtr principalsContext;
auto error = owner_.hosting_.FabricActivatorClientObj->EndConfigureSecurityPrincipals(operation, principalsContext);
WriteTrace(
error.ToLogLevel(),
TraceEnvironmentManager,
owner_.Root.TraceId,
"{0}: End ConfigureSecurityPrincipals: error {1}",
applicationId_,
error);
if(!error.IsSuccess())
{
TryComplete(operation->Parent,
ErrorCode(error.ReadValue(), wformatString("{0}{1}", StringResource::Get(IDS_HOSTING_ApplicationPrincipals_Setup_Failed), error.ErrorCodeValueToString())));
return;
}
appEnvironmentContext_->SetPrincipalsProviderContext(move(principalsContext));
WriteTrace(
error.ToLogLevel(),
TraceEnvironmentManager,
owner_.Root.TraceId,
"{0}: End setup principals: error {1}",
applicationId_,
error);
// Set up default runAs policy
wstring const & runAsName = appPackageDescription_.DigestedEnvironment.Policies.DefaultRunAs.UserRef;
WriteNoise(
TraceEnvironmentManager,
owner_.Root.TraceId,
"{0}: Set default RunAs to \"{1}\"",
applicationId_,
runAsName);
error = appEnvironmentContext_->SetDefaultRunAsUser(runAsName);
WriteTrace(
error.ToLogLevel(),
TraceEnvironmentManager,
owner_.Root.TraceId,
"{0}: End set default RunAs: error {1}",
applicationId_,
error);
if (!error.IsSuccess())
{
CleanupOnError(operation->Parent, error);
return;
}
WritePrincipalSIDsToFile(operation->Parent);
SetupLogCollectionPolicy(appPackageDescription_.DigestedEnvironment.Policies.LogCollectionEntries, operation->Parent);
}
void WritePrincipalSIDsToFile(AsyncOperationSPtr const & thisSPtr)
{
ImageModel::RunLayoutSpecification runLayout(owner_.Hosting.DeploymentFolder);
map<wstring, wstring> sids;
if(HostingConfig::GetConfig().RunAsPolicyEnabled && appEnvironmentContext_->PrincipalsContext)
{
for(auto it = appEnvironmentContext_->PrincipalsContext->PrincipalsInformation.begin(); it != appEnvironmentContext_->PrincipalsContext->PrincipalsInformation.end(); ++it)
{
sids.insert(make_pair((*it)->Name, (*it)->SidString));
}
}
// Write the endpoint file
wstring sidsFileName = runLayout.GetPrincipalSIDsFile(applicationId_.ToString());
bool isSuccess = PrincipalsDescription::WriteSIDsToFile(sidsFileName, sids);
if(!isSuccess)
{
WriteWarning(
TraceEnvironmentManager,
owner_.Root.TraceId,
"Could not write the SIDS to file {0}, ApplicationId={1}. Cleaning the application environment.",
sidsFileName,
applicationId_);
CleanupOnError(thisSPtr, ErrorCode(ErrorCodeValue::OperationFailed));
return;
}
}
void SetupLogCollectionPolicy(
vector<LogCollectionPolicyDescription> const & policies,
AsyncOperationSPtr const & thisSPtr)
{
if (policies.empty())
{
// Nothing to do
WriteNoise(
TraceEnvironmentManager,
owner_.Root.TraceId,
"{0}: Log Collection is not enabled",
applicationId_);
TryComplete(thisSPtr, ErrorCode(ErrorCodeValue::Success));
return;
}
RunLayoutSpecification runLayout(owner_.Hosting.DeploymentFolder);
wstring logFolderPath = runLayout.GetApplicationLogFolder(applicationId_.ToString());
appEnvironmentContext_->LogCollectionPath = logFolderPath;
vector<wstring> paths;
for(auto it = policies.begin(); it != policies.end(); ++it)
{
wstring path = it->Path.empty() ? logFolderPath : Path::Combine(logFolderPath, it->Path);
paths.push_back(move(path));
}
WriteNoise(
TraceEnvironmentManager,
owner_.Root.TraceId,
"{0}: Begin(LogCollectionProvider->AddLogPaths): root path {1}",
applicationId_,
logFolderPath);
auto operation = owner_.logCollectionProvider_->BeginAddLogPaths(
applicationId_.ToString(),
paths,
timeoutHelper_.GetRemainingTime(),
[this](AsyncOperationSPtr const & operation) { this->OnLogCollectionSetupCompleted(operation); },
thisSPtr);
if (operation->CompletedSynchronously)
{
FinishLogCollectionSetup(operation);
}
}
void OnLogCollectionSetupCompleted(
AsyncOperationSPtr const & operation)
{
if (!operation->CompletedSynchronously)
{
FinishLogCollectionSetup(operation);
}
}
void FinishLogCollectionSetup(
AsyncOperationSPtr const & operation)
{
auto error = owner_.logCollectionProvider_->EndAddLogPaths(operation);
WriteTrace(
error.ToLogLevel(),
TraceEnvironmentManager,
owner_.Root.TraceId,
"{0}: End(logCollectionProvider->AddLogPaths): error {1}",
applicationId_,
error);
if (!error.IsSuccess())
{
CleanupOnError(operation->Parent, error);
return;
}
TryComplete(operation->Parent, error);
}
void CleanupOnError(
AsyncOperationSPtr const & thisSPtr,
ErrorCode const error)
{
// Call cleanup, best effort
lastError_.Overwrite(error);
WriteNoise(
TraceEnvironmentManager,
owner_.Root.TraceId,
"{0}: Begin(Setup->CleanupApplication due to error {1})",
applicationId_,
error);
auto operation = owner_.BeginCleanupApplicationEnvironment(
appEnvironmentContext_,
timeoutHelper_.GetRemainingTime(),
[this](AsyncOperationSPtr const & operation) { this->FinishCleanupApplication(operation, false); },
thisSPtr);
FinishCleanupApplication(operation, true);
}
void FinishCleanupApplication(AsyncOperationSPtr const & operation, bool expectedCompletedSynchronously)
{
if (operation->CompletedSynchronously != expectedCompletedSynchronously)
{
return;
}
auto error = owner_.EndCleanupApplicationEnvironment(operation);
WriteTrace(
error.ToLogLevel(),
TraceEnvironmentManager,
owner_.Root.TraceId,
"{0}: End(Setup->CleanupApplication due to error {1}): error {2}",
applicationId_,
lastError_,
error);
// Complete with the saved error
TryComplete(operation->Parent, lastError_);
}
private:
EnvironmentManager & owner_;
ApplicationIdentifier const applicationId_;
ApplicationPackageDescription const appPackageDescription_;
TimeoutHelper timeoutHelper_;
ApplicationEnvironmentContextSPtr appEnvironmentContext_;
ErrorCode lastError_;
};
// ********************************************************************************************************************
// EnvironmentManager::CleanupApplicationAsyncOperation Implementation
//
class EnvironmentManager::CleanupApplicationAsyncOperation
: public AsyncOperation,
TextTraceComponent<TraceTaskCodes::Hosting>
{
DENY_COPY(CleanupApplicationAsyncOperation)
public:
CleanupApplicationAsyncOperation(
EnvironmentManager & appEnvironmentManager,
ApplicationEnvironmentContextSPtr const & appEnvironmentContext,
TimeSpan const timeout,
AsyncCallback const & callback,
AsyncOperationSPtr const & parent)
: AsyncOperation(callback, parent),
owner_(appEnvironmentManager),
appEnvironmentContext_(appEnvironmentContext),
timeoutHelper_(timeout),
lastError_(ErrorCodeValue::Success)
{
}
virtual ~CleanupApplicationAsyncOperation()
{
}
static ErrorCode CleanupApplicationAsyncOperation::End(
AsyncOperationSPtr const & operation)
{
auto thisPtr = AsyncOperation::End<CleanupApplicationAsyncOperation>(operation);
return thisPtr->Error;
}
protected:
void OnStart(AsyncOperationSPtr const & thisSPtr)
{
if (!appEnvironmentContext_)
{
TryComplete(thisSPtr, ErrorCode(ErrorCodeValue::Success));
return;
}
owner_.crashDumpProvider_->CleanupApplicationCrashDumps(
appEnvironmentContext_->ApplicationId.ToString());
if (!HostingConfig::GetConfig().RunAsPolicyEnabled)
{
WriteNoise(
TraceEnvironmentManager,
owner_.Root.TraceId,
"{0}: App sandbox disabled, no principals to clean up",
appEnvironmentContext_->ApplicationId);
ASSERT_IF(appEnvironmentContext_->PrincipalsContext, "RunAsPolicyEnabled disabled, principals context shouldn't exist");
// Disable log collection if enabled
CleanupLogCollectionPolicy(thisSPtr);
}
else
{
if (!appEnvironmentContext_->PrincipalsContext)
{
WriteNoise(
TraceEnvironmentManager,
owner_.Root.TraceId,
"{0}: App PrincipalContext not present, no principals to clean up",
appEnvironmentContext_->ApplicationId);
CleanupLogCollectionPolicy(thisSPtr);
return;
}
auto operation = owner_.hosting_.FabricActivatorClientObj->BeginCleanupSecurityPrincipals(
appEnvironmentContext_->ApplicationId.ToString(),
timeoutHelper_.GetRemainingTime(),
[this](AsyncOperationSPtr const & operation) { this->OnCleanupSecurityPrincipalsCompleted(operation, false); },
thisSPtr);
OnCleanupSecurityPrincipalsCompleted(operation, true);
}
}
private:
void OnCleanupSecurityPrincipalsCompleted(AsyncOperationSPtr operation, bool expectedCompletedSynchronously)
{
if(operation->CompletedSynchronously != expectedCompletedSynchronously)
{
return;
}
auto error = owner_.hosting_.FabricActivatorClientObj->EndCleanupSecurityPrincipals(operation);
WriteTrace(
error.ToLogLevel(),
TraceEnvironmentManager,
owner_.Root.TraceId,
"{0}: End ConfigureSecurityPrincipals: error {1}",
appEnvironmentContext_->ApplicationId,
error);
if(!error.IsSuccess())
{
TryComplete(operation->Parent, error);
return;
}
// Disable log collection if enabled
CleanupLogCollectionPolicy(operation->Parent);
}
void CleanupLogCollectionPolicy(
AsyncOperationSPtr const & thisSPtr)
{
wstring const & path = appEnvironmentContext_->LogCollectionPath;
if (path.empty())
{
// Log collection policy not set, nothing to do
TryComplete(thisSPtr, lastError_);
return;
}
WriteNoise(
TraceEnvironmentManager,
owner_.Root.TraceId,
"{0}: Begin(logCollectionProvider->RemoveLogPaths): path {1}",
appEnvironmentContext_->ApplicationId,
path);
ASSERT_IFNOT(owner_.logCollectionProvider_, "Log collection provider should exist");
auto operation = owner_.logCollectionProvider_->BeginRemoveLogPaths(
path,
timeoutHelper_.GetRemainingTime(),
[this](AsyncOperationSPtr const & operation) { this->FinishLogCollectionCleanup(operation, false); },
thisSPtr);
FinishLogCollectionCleanup(operation, true);
}
void FinishLogCollectionCleanup(AsyncOperationSPtr const & operation, bool expectedCompletedSynchronously)
{
if (operation->CompletedSynchronously != expectedCompletedSynchronously)
{
return;
}
auto error = owner_.logCollectionProvider_->EndRemoveLogPaths(operation);
WriteTrace(
error.ToLogLevel(),
TraceEnvironmentManager,
owner_.Root.TraceId,
"{0}: End(logCollectionProvider->RemoveLogPaths): error {1}",
appEnvironmentContext_->ApplicationId,
error);
if (!error.IsSuccess())
{
lastError_.Overwrite(error);
}
TryComplete(operation->Parent, lastError_);
}
private:
EnvironmentManager & owner_;
ApplicationEnvironmentContextSPtr const appEnvironmentContext_;
TimeoutHelper timeoutHelper_;
ErrorCode lastError_;
};
ErrorCode EnvironmentManager::GetIsolatedNicIpAddress(std::wstring & ipAddress)
{
ErrorCode error(ErrorCodeValue::Success);
ipAddress = hosting_.FabricNodeConfigObj.IPAddressOrFQDN;
std::wstring isolatedNetworkInterfaceName;
error = FabricEnvironment::GetFabricIsolatedNetworkInterfaceName(isolatedNetworkInterfaceName);
if (!error.IsSuccess())
{
WriteWarning(
TraceEnvironmentManager,
Root.TraceId,
"GetFabricIsolatedNetworkInterfaceName failed: ErrorCode={0}",
error);
return error;
}
auto ipInterface = wformatString("({0})", isolatedNetworkInterfaceName);
wstring ipAddressOnAdapter;
error = IpUtility::GetIpAddressOnAdapter(ipInterface, AF_INET, ipAddressOnAdapter);
if (!error.IsSuccess())
{
WriteWarning(
TraceEnvironmentManager,
Root.TraceId,
"GetIpAddressOnAdapter failed: ErrorCode={0}",
error);
// Linux primary interface will not have an ip on it,
// if open network is set up. The host ip is being used in this case
// and we return success error code.
#if defined(PLATFORM_UNIX)
WriteInfo(
TraceEnvironmentManager,
Root.TraceId,
"GetIsolatedNicIpAddress: Using IP {0} for interface [{1}]",
ipAddress,
isolatedNetworkInterfaceName);
return ErrorCodeValue::Success;
#endif
return error;
}
if (!ipAddressOnAdapter.empty())
{
ipAddress = ipAddressOnAdapter;
}
WriteInfo(
TraceEnvironmentManager,
Root.TraceId,
"GetFabricIsolatedNetworkInterfaceName: found IP {0} for interface [{1}]",
ipAddress,
isolatedNetworkInterfaceName);
return error;
}
// ********************************************************************************************************************
// EnvironmentManager::SetupServicePackageInstanceAsyncOperation Implementation
//
class EnvironmentManager::SetupServicePackageInstanceAsyncOperation
: public AsyncOperation,
TextTraceComponent<TraceTaskCodes::Hosting>
{
DENY_COPY(SetupServicePackageInstanceAsyncOperation)
public:
SetupServicePackageInstanceAsyncOperation(
__in EnvironmentManager & owner,
ApplicationEnvironmentContextSPtr const & appEnvironmentContext,
wstring const & applicationName,
ServicePackageInstanceIdentifier const & servicePackageInstanceId,
ServiceModel::ServicePackageDescription const & servicePackageDescription,
int64 instanceId,
TimeSpan const timeout,
AsyncCallback const & callback,
AsyncOperationSPtr const & parent)
: AsyncOperation(callback, parent),
owner_(owner),
appEnvironmentContext_(appEnvironmentContext),
applicationName_(applicationName),
servicePackageInstanceId_(servicePackageInstanceId),
instanceId_(instanceId),
servicePackageDescription_(servicePackageDescription),
timeoutHelper_(timeout),
packageInstanceEnvironmentContext(),
endpoints_(),
firewallPortsToOpen_(),
endpointsAclCount_(0),
setupContainerGroup_(false),
lastError_(ErrorCodeValue::Success),
endpointsWithACL_(),
runLayout_(owner_.Hosting.DeploymentFolder),
codePackageDescriptionsWithNetwork_(),
servicePackageEnvironmentId_(EnvironmentManager::GetServicePackageIdentifier(servicePackageInstanceId_.ToString(), instanceId_))
{
packageInstanceEnvironmentContext = make_shared<ServicePackageInstanceEnvironmentContext>(servicePackageInstanceId_);
packageInstanceEnvironmentContext->AddNetworks(NetworkType::Enum::Isolated, GetIsolatedNetworks());
}
virtual ~SetupServicePackageInstanceAsyncOperation()
{
}
static ErrorCode SetupServicePackageInstanceAsyncOperation::End(
AsyncOperationSPtr const & operation,
__out ServicePackageInstanceEnvironmentContextSPtr & packageEnvironmentContext)
{
auto thisPtr = AsyncOperation::End<SetupServicePackageInstanceAsyncOperation>(operation);
packageEnvironmentContext = move(thisPtr->packageInstanceEnvironmentContext);
return thisPtr->Error;
}
protected:
void OnStart(AsyncOperationSPtr const & thisSPtr)
{
SetupEnvironment(thisSPtr);
}
#if defined(PLATFORM_UNIX)
bool ContainerGroupIsolated()
{
ContainerIsolationMode::Enum isolationMode = ContainerIsolationMode::process;
ContainerIsolationMode::FromString(this->servicePackageDescription_.ContainerPolicyDescription.Isolation, isolationMode);
return (isolationMode == ContainerIsolationMode::hyperv);
}
#endif
private:
void SetupEnvironment(AsyncOperationSPtr const & thisSPtr)
{
WriteInfo(
TraceEnvironmentManager,
owner_.Root.TraceId,
"SetupServicePackage: Id={0}, RunAsPolicyEnabled={1}, EndpointProviderEnabled={2}",
servicePackageEnvironmentId_,
HostingConfig::GetConfig().RunAsPolicyEnabled,
HostingConfig::GetConfig().EndpointProviderEnabled);
SetupDiagnostics(thisSPtr);
}
std::vector<std::wstring> GetIsolatedNetworks()
{
std::vector<std::wstring> isolatedNetworks = servicePackageDescription_.GetNetworks(NetworkType::IsolatedStr);
WriteNoise(
TraceEnvironmentManager,
owner_.Root.TraceId,
"Isolated networks retrieved for {0} exclusive mode {1} count {2}.",
servicePackageInstanceId_,
this->servicePackageInstanceId_.ActivationContext.IsExclusive,
isolatedNetworks.size());
return isolatedNetworks;
}
void PopulateCodePackageNetworkConfig()
{
// check service package level network policies
NetworkType::Enum networkType = NetworkType::Enum::Other;
for (auto const & cnp : servicePackageDescription_.NetworkPolicies.ContainerNetworkPolicies)
{
if (StringUtility::CompareCaseInsensitive(cnp.NetworkRef, NetworkType::OpenStr) != 0 &&
StringUtility::CompareCaseInsensitive(cnp.NetworkRef, NetworkType::OtherStr) != 0)
{
networkType = networkType | NetworkType::Enum::Isolated;
}
else if (StringUtility::CompareCaseInsensitive(cnp.NetworkRef, NetworkType::OpenStr) == 0)
{
networkType = networkType | NetworkType::Enum::Open;
}
}
// populate code package network configs
for (auto it = servicePackageDescription_.DigestedCodePackages.begin(); it != servicePackageDescription_.DigestedCodePackages.end(); ++it)
{
it->ContainerPolicies.NetworkConfig.Type = it->ContainerPolicies.NetworkConfig.Type | networkType;
}
}
void AssignResources(AsyncOperationSPtr const & thisSPtr)
{
int containerCount = 0;
#if defined(PLATFORM_UNIX)
bool isolated = servicePackageDescription_.ContainerPolicyDescription.Isolation
== ContainerIsolationMode::EnumToString(ContainerIsolationMode::hyperv);
WriteNoise(
TraceEnvironmentManager,
owner_.Root.TraceId,
"SetupServicePackage: Id={0}, Isolation={1}",
servicePackageEnvironmentId_, servicePackageDescription_.ContainerPolicyDescription.Isolation);
#endif
for (auto it = servicePackageDescription_.DigestedCodePackages.begin(); it != servicePackageDescription_.DigestedCodePackages.end(); ++it)
{
if (it->CodePackage.EntryPoint.EntryPointType == EntryPointType::ContainerHost)
{
containerCount++;
}
if ((it->ContainerPolicies.NetworkConfig.Type & NetworkType::Enum::Open) == NetworkType::Enum::Open ||
(packageInstanceEnvironmentContext->NetworkExists(NetworkType::Enum::Isolated)))
{
#if defined(PLATFORM_UNIX)
if (codePackageDescriptionsWithNetwork_.empty())
{
codePackageDescriptionsWithNetwork_.push_back(*it);
}
#else
codePackageDescriptionsWithNetwork_.push_back(*it);
#endif
}
}
#if defined(PLATFORM_UNIX)
if (containerCount > 1 || ContainerGroupIsolated())
{
setupContainerGroup_ = true;
}
#endif
if (codePackageDescriptionsWithNetwork_.empty())
{
if (setupContainerGroup_)
{
#if defined(PLATFORM_UNIX)
if (ContainerGroupIsolated())
{
SetupEndpoints(thisSPtr);
return;
}
#endif
SetupContainerGroup(thisSPtr, ServiceModel::NetworkType::Other, L"", std::map<std::wstring, std::wstring>());
}
else
{
SetupEndpoints(thisSPtr);
}
}
else
{
// Assign open network resources
BeginAssignIpAddress(thisSPtr);
}
}
void BeginAssignIpAddress(AsyncOperationSPtr const & thisSPtr)
{
vector<wstring> codePackages;
for (auto const & cp : codePackageDescriptionsWithNetwork_)
{
if ((cp.ContainerPolicies.NetworkConfig.Type & NetworkType::Enum::Open) == NetworkType::Enum::Open)
{
codePackages.push_back(cp.Name);
}
}
bool codePackagesNeedIpsAllocated = (codePackages.size() > 0);
if (codePackagesNeedIpsAllocated)
{
std::vector<std::wstring> networkNames;
if (HostingConfig::GetConfig().LocalNatIpProviderEnabled)
{
networkNames.push_back(HostingConfig::GetConfig().LocalNatIpProviderNetworkName);
}
else
{
networkNames.push_back(Common::ContainerEnvironment::ContainerNetworkName);
}
packageInstanceEnvironmentContext->AddNetworks(NetworkType::Enum::Open, networkNames);
}
auto operation = owner_.Hosting.FabricActivatorClientObj->BeginAssignIpAddresses(
this->servicePackageInstanceId_.ToString(),
codePackages,
false,
timeoutHelper_.GetRemainingTime(),
[this, codePackagesNeedIpsAllocated](AsyncOperationSPtr const & operation)
{
this->OnAssignIpAddressCompleted(operation, false, codePackagesNeedIpsAllocated);
},
thisSPtr);
this->OnAssignIpAddressCompleted(operation, true, codePackagesNeedIpsAllocated);
}
void OnAssignIpAddressCompleted(
AsyncOperationSPtr const & operation,
bool expectedCompletedSynchronously,
bool codePackagesNeedIpsAllocated)
{
if (operation->CompletedSynchronously != expectedCompletedSynchronously)
{
return;
}
vector<wstring> assignedIps;
auto error = owner_.Hosting.FabricActivatorClientObj->EndAssignIpAddresses(operation, assignedIps);
// Invoke clean up only if the request for open network resources resulted in error
if (!error.IsSuccess() && codePackagesNeedIpsAllocated)
{
WriteWarning(
TraceEnvironmentManager,
owner_.Root.TraceId,
"{0}: End BeginAssignIpAddress. Error {1}",
servicePackageEnvironmentId_,
error);
lastError_.Overwrite(error);
CleanupOnError(operation->Parent, lastError_);
return;
}
for (auto it = assignedIps.begin(); it != assignedIps.end(); ++it)
{
wstring ipAddress;
wstring codePackageName;
StringUtility::SplitOnce(*it, ipAddress, codePackageName, L',');
packageInstanceEnvironmentContext->AddAssignedIpAddresses(codePackageName, ipAddress);
}
// Assign overlay network resources
BeginAssignOverlayNetworkResources(operation->Parent);
}
void BeginAssignOverlayNetworkResources(AsyncOperationSPtr const & thisSPtr)
{
std::map<std::wstring, std::vector<std::wstring>> codePackageNetworkNames;
if (packageInstanceEnvironmentContext->NetworkExists(NetworkType::Enum::Isolated))
{
std::vector<std::wstring> networkNames;
packageInstanceEnvironmentContext->GetNetworks(NetworkType::Enum::Isolated, networkNames);
for (auto const & cp : codePackageDescriptionsWithNetwork_)
{
codePackageNetworkNames.insert(make_pair(cp.Name, networkNames));
}
}
bool codePackagesNeedOverlayNetworkResourcesAllocated = (codePackageNetworkNames.size() > 0);
wstring nodeIpAddress = L"";
if (codePackagesNeedOverlayNetworkResourcesAllocated)
{
auto error = owner_.GetIsolatedNicIpAddress(nodeIpAddress);
if (!error.IsSuccess())
{
lastError_.Overwrite(error);
CleanupOnError(thisSPtr, lastError_);
return;
}
}
auto operation = owner_.Hosting.FabricActivatorClientObj->BeginManageOverlayNetworkResources(
owner_.hosting_.NodeName,
nodeIpAddress,
this->servicePackageInstanceId_.ToString(),
codePackageNetworkNames,
ManageOverlayNetworkAction::Assign,
timeoutHelper_.GetRemainingTime(),
[this, codePackagesNeedOverlayNetworkResourcesAllocated](AsyncOperationSPtr const & operation)
{
this->OnAssignOverlayNetworkResourcesCompleted(operation, false, codePackagesNeedOverlayNetworkResourcesAllocated);
},
thisSPtr);
this->OnAssignOverlayNetworkResourcesCompleted(operation, true, codePackagesNeedOverlayNetworkResourcesAllocated);
}
void OnAssignOverlayNetworkResourcesCompleted(
AsyncOperationSPtr const & operation,
bool expectedCompletedSynchronously,
bool codePackagesNeedOverlayNetworkResourcesAllocated)
{
if (operation->CompletedSynchronously != expectedCompletedSynchronously)
{
return;
}
std::map<wstring, std::map<std::wstring, std::wstring>> assignedOverlayNetworkResources;
auto error = owner_.Hosting.FabricActivatorClientObj->EndManageOverlayNetworkResources(operation, assignedOverlayNetworkResources);
// Invoke clean up only if the request for overlay network resources resulted in error
if (!error.IsSuccess() && codePackagesNeedOverlayNetworkResourcesAllocated)
{
WriteWarning(
TraceEnvironmentManager,
owner_.Root.TraceId,
"{0}: End BeginAssignOverlayNetworkResources. Error {1}",
servicePackageEnvironmentId_,
error);
lastError_.Overwrite(error);
CleanupOnError(operation->Parent, lastError_);
return;
}
for (auto it = assignedOverlayNetworkResources.begin(); it != assignedOverlayNetworkResources.end(); ++it)
{
packageInstanceEnvironmentContext->AddAssignedOverlayNetworkResources(it->first, it->second);
}
if (setupContainerGroup_)
{
std::wstring openNetworkAssignedIp;
std::map<std::wstring, std::wstring> overlayNetworkResources;
NetworkType::Enum networkType = NetworkType::Enum::Other;
GetGroupAssignedNetworkResource(openNetworkAssignedIp, overlayNetworkResources, networkType);
#if defined(PLATFORM_UNIX)
if (ContainerGroupIsolated())
{
SetupEndpoints(operation->Parent);
return;
}
#endif
SetupContainerGroup(operation->Parent, networkType, openNetworkAssignedIp, overlayNetworkResources);
}
else
{
SetupEndpoints(operation->Parent);
}
}
void GetGroupAssignedNetworkResource(
std::wstring & openNetworkAssignedIp,
std::map<std::wstring, std::wstring> & overlayNetworkResources,
ServiceModel::NetworkType::Enum & networkType)
{
wstring networkResourceList;
packageInstanceEnvironmentContext->GetGroupAssignedNetworkResource(openNetworkAssignedIp, overlayNetworkResources, networkType, networkResourceList);
WriteNoise(
TraceEnvironmentManager,
owner_.Root.TraceId,
"Id={0}, Container Group Network Type={1}, Network Resource List={2}",
packageInstanceEnvironmentContext->ServicePackageInstanceId.ToString(),
networkType,
networkResourceList);
}
void SetupContainerGroup(
AsyncOperationSPtr const & thisSPtr,
ServiceModel::NetworkType::Enum networkType,
wstring const & openNetworkAssignedIp,
std::map<std::wstring, std::wstring> const & overlayNetworkResources)
{
WriteInfo(
TraceEnvironmentManager,
owner_.Root.TraceId,
"{0}: Start SetupContainerGroup.",
servicePackageEnvironmentId_);
vector<wstring> dnsServers;
#if defined(PLATFORM_UNIX)
std::vector<PodPortMapping> portMappings;
if (ContainerGroupIsolated())
{
for (auto pb : this->servicePackageDescription_.ContainerPolicyDescription.PortBindings)
{
if (this->servicePackageDescription_.DigestedResources.DigestedEndpoints.find(pb.EndpointResourceRef)
!= this->servicePackageDescription_.DigestedResources.DigestedEndpoints.end())
{
EndpointDescription const &ep = this->servicePackageDescription_.DigestedResources.DigestedEndpoints.at(pb.EndpointResourceRef).Endpoint;
int port = ep.Port;
if (port == 0)
{
for (auto iep : packageInstanceEnvironmentContext->Endpoints)
{
if (iep->Name == ep.Name)
{
port = iep->Port; break;
}
}
}
portMappings.emplace_back(
owner_.Hosting.FabricNodeConfigObj.IPAddressOrFQDN,
ep.Protocol == ProtocolType::Udp ? PodPortMapping::UDP : PodPortMapping::TCP,
pb.ContainerPort,
port);
}
}
}
packageInstanceEnvironmentContext->SetContainerGroupIsolatedFlag(ContainerGroupIsolated());
// If both open and isolated network resources are empty, then NAT port mappings are passed to container group set up.
ContainerPodDescription podDescription(
this->servicePackageDescription_.ContainerPolicyDescription.Hostname,
ContainerGroupIsolated() ? ContainerIsolationMode::hyperv : ContainerIsolationMode::process,
(openNetworkAssignedIp.empty() && overlayNetworkResources.empty()) ? portMappings : std::vector<PodPortMapping>());
// For Isolated and NAT, we end up using the constant defined here - ClearContainerHelper::ClearContainerLogHelperConstants::DnsServiceIP4Address
// For MIP, we find the vm host ip.
if ((networkType & NetworkType::Enum::Open) == NetworkType::Enum::Open && !openNetworkAssignedIp.empty())
{
std::wstring dnsServerIP;
auto err = Common::IpUtility::GetAdapterAddressOnTheSameNetwork(openNetworkAssignedIp, /*out*/dnsServerIP);
if (err.IsSuccess())
{
dnsServers.push_back(dnsServerIP);
}
else
{
WriteWarning(
TraceEnvironmentManager,
owner_.Root.TraceId,
"Failed to get DNS IP address for container with assigned IP={0} : ErrorCode={1}",
openNetworkAssignedIp, err);
}
}
#endif
packageInstanceEnvironmentContext->SetContainerGroupSetupFlag(true);
auto op = owner_.Hosting.FabricActivatorClientObj->BeginSetupContainerGroup(
this->servicePackageInstanceId_.ToString(),
networkType,
openNetworkAssignedIp,
overlayNetworkResources,
dnsServers,
owner_.Hosting.RunLayout.GetApplicationFolder(appEnvironmentContext_->ApplicationId.ToString()),
StringUtility::ToWString(appEnvironmentContext_->ApplicationId.ApplicationNumber),
this->applicationName_,
this->servicePackageInstanceId_.ActivationContext.ToString(),
this->servicePackageInstanceId_.PublicActivationId,
this->servicePackageDescription_.ResourceGovernanceDescription,
#if defined(PLATFORM_UNIX)
podDescription,
#endif
false,
timeoutHelper_.GetRemainingTime(),
[this](AsyncOperationSPtr const & op)
{
this->OnSetupContainerGroupCompleted(op, false);
},
thisSPtr);
OnSetupContainerGroupCompleted(op, true);
}
void OnSetupContainerGroupCompleted(AsyncOperationSPtr const & operation, bool expectedCompletedSynchronously)
{
if (operation->CompletedSynchronously != expectedCompletedSynchronously)
{
return;
}
wstring containerName;
auto error = owner_.Hosting.FabricActivatorClientObj->EndSetupContainerGroup(operation, containerName);
if (!error.IsSuccess())
{
WriteWarning(
TraceEnvironmentManager,
owner_.Root.TraceId,
"{0}: End SetupContainerGroup. Error{1}",
servicePackageEnvironmentId_,
error);
TryComplete(operation->Parent, error);
return;
}
packageInstanceEnvironmentContext->AddGroupContainerName(containerName);
WriteInfo(
TraceEnvironmentManager,
owner_.Root.TraceId,
"Container group with container name {0} set for service instance {1}",
containerName,
this->servicePackageInstanceId_);
#if defined(PLATFORM_UNIX)
if (ContainerGroupIsolated())
{
TryComplete(operation->Parent, error);
return;
}
#endif
SetupEndpoints(operation->Parent);
}
void SetupEndpoints(AsyncOperationSPtr const & thisSPtr)
{
if (HostingConfig::GetConfig().EndpointProviderEnabled)
{
ASSERT_IFNOT(owner_.endpointProvider_, "Endpoint provider should exist");
auto endpointIsolatedNetworkMap = servicePackageDescription_.GetEndpointNetworkMap(NetworkType::IsolatedStr);
for (auto endpointIter = servicePackageDescription_.DigestedResources.DigestedEndpoints.begin();
endpointIter != servicePackageDescription_.DigestedResources.DigestedEndpoints.end();
++endpointIter)
{
EndpointResourceSPtr endpointResource = make_shared<EndpointResource>(
endpointIter->second.Endpoint,
endpointIter->second.EndpointBindingPolicy);
auto error = SetEndPointResourceIpAddress(endpointResource, endpointIsolatedNetworkMap);
if (!error.IsSuccess())
{
WriteWarning(
TraceEnvironmentManager,
owner_.Root.TraceId,
"SetEndPointResourceIpAddress() finished with Error={0} for endpoint resource={1}.",
error,
endpointResource->Name);
CleanupOnError(thisSPtr, error);
return;
}
if (!endpointIter->second.SecurityAccessPolicy.ResourceRef.empty())
{
ASSERT_IFNOT(
StringUtility::AreEqualCaseInsensitive(endpointIter->second.SecurityAccessPolicy.ResourceRef, endpointIter->second.Endpoint.Name),
"Error in the DigestedEndpoint element. The ResourceRef name '{0}' in SecurityAccessPolicy does not match the resource name '{1}'.",
endpointIter->second.SecurityAccessPolicy.ResourceRef,
endpointIter->second.Endpoint.Name);
SecurityPrincipalInformationSPtr principalInfo;
error = appEnvironmentContext_->PrincipalsContext->TryGetPrincipalInfo(endpointIter->second.SecurityAccessPolicy.PrincipalRef, principalInfo);
if (!error.IsSuccess())
{
WriteWarning(
TraceEnvironmentManager,
owner_.Root.TraceId,
"Error getting the User object '{0}'.ApplicationId={1}",
endpointIter->second.SecurityAccessPolicy.PrincipalRef,
servicePackageInstanceId_.ApplicationId);
CleanupOnError(thisSPtr, error);
return;
}
endpointResource->AddSecurityAccessPolicy(principalInfo, endpointIter->second.SecurityAccessPolicy.Rights);
}
else if (EndpointProvider::IsHttpEndpoint(endpointResource) &&
!owner_.isAdminUser_)
{
SecurityPrincipalInformationSPtr secPrincipal = make_shared<SecurityPrincipalInformation>(
owner_.currentUserSid_,
owner_.currentUserSid_,
owner_.currentUserSid_,
false);
endpointResource->AddSecurityAccessPolicy(secPrincipal, ServiceModel::GrantAccessType::Full);
}
if (endpointIter->second.Endpoint.ExplicitPortSpecified)
{
WriteInfo(TraceEnvironmentManager,
owner_.Root.TraceId,
"ExplicitPort specified for Endpoint '{0}' Port {1}. ServicePackageInstanceId={2} Network Enabled={3} CodePackageRef={4}",
endpointIter->second.Endpoint.Name,
endpointIter->second.Endpoint.Port,
servicePackageInstanceId_,
!codePackageDescriptionsWithNetwork_.empty(),
endpointResource->EndpointDescriptionObj.CodePackageRef);
if (HostingConfig::GetConfig().FirewallPolicyEnabled && codePackageDescriptionsWithNetwork_.empty())
{
firewallPortsToOpen_.push_back(endpointIter->second.Endpoint.Port);
}
}
else
{
error = owner_.endpointProvider_->AddEndpoint(endpointResource);
if (!error.IsSuccess())
{
CleanupOnError(thisSPtr, error);
return;
}
}
if (EndpointProvider::IsHttpEndpoint(endpointResource))
{
EnvironmentResource::ResourceAccess resourceAccess;
error = endpointResource->GetDefaultSecurityAccess(resourceAccess);
if (error.IsSuccess())
{
endpointsWithACL_.push_back(endpointResource);
}
else if (error.IsError(ErrorCodeValue::NotFound))
{
AddEndpointResource(endpointResource);
}
else
{
WriteWarning(
TraceEnvironmentManager,
owner_.Root.TraceId,
"Error getting default security access for endpointResource '{0}'.Error={1}",
endpointResource,
error);
CleanupOnError(thisSPtr, error);
return;
}
}
else
{
AddEndpointResource(endpointResource);
}
}
}
else
{
if (servicePackageDescription_.DigestedResources.DigestedEndpoints.size() > 0)
{
WriteWarning(
TraceEnvironmentManager,
owner_.Root.TraceId,
"EndpointProvider is not enabled on this node, however the ServicePackage {0} contains {1} endpoints.",
servicePackageEnvironmentId_,
servicePackageDescription_.DigestedResources.DigestedEndpoints.size());
auto error = ErrorCode(ErrorCodeValue::EndpointProviderNotEnabled);
error.ReadValue();
CleanupOnError(thisSPtr, error);
return;
}
}
if (endpointsWithACL_.size() > 0)
{
this->SetupEndpointSecurity(thisSPtr);
}
else
{
ConfigureEndpointBindingAndFirewallPolicy(thisSPtr);
}
}
void SetupEndpointSecurity(AsyncOperationSPtr thisSPtr)
{
WriteInfo(
TraceEnvironmentManager,
owner_.Root.TraceId,
"Start ConfigureEndpointSecurity for servicepackage {0}",
servicePackageEnvironmentId_);
endpointsAclCount_.store(endpointsWithACL_.size());
EnvironmentResource::ResourceAccess resourceAccess;
for(auto iter = endpointsWithACL_.begin(); iter != endpointsWithACL_.end(); ++iter)
{
EndpointResourceSPtr endpointResource = *iter;
auto error = endpointResource->GetDefaultSecurityAccess(resourceAccess);
ASSERT_IFNOT(error.IsSuccess(), "endpointresource getdefaultsecurityaccess returned error {0}", error);
if(error.IsSuccess())
{
auto operation = owner_.Hosting.FabricActivatorClientObj->BeginConfigureEndpointSecurity(
resourceAccess.PrincipalInfo->SidString,
endpointResource->Port,
endpointResource->Protocol == ServiceModel::ProtocolType::Enum::Https,
false,
owner_.Hosting.FabricNodeConfigObj.IPAddressOrFQDN,
EnvironmentManager::GetServicePackageIdentifier(servicePackageInstanceId_.ToString(), instanceId_),
endpointResource->EndpointDescriptionObj.ExplicitPortSpecified,
[this, endpointResource](AsyncOperationSPtr const & operation)
{
this->OnEndpointSecurityConfigurationCompleted(endpointResource, operation, false);
},
thisSPtr);
this->OnEndpointSecurityConfigurationCompleted(endpointResource, operation, true);
}
}
}
ErrorCode SetEndPointResourceIpAddress(EndpointResourceSPtr const & endpointResource, std::map<std::wstring, std::wstring> endpointIsolatedNetworkMap)
{
auto codePackageRef = endpointResource->EndpointDescriptionObj.CodePackageRef;
auto nodeIpAddress = owner_.Hosting.FabricNodeConfigObj.IPAddressOrFQDN;
if (codePackageRef.empty())
{
endpointResource->IpAddressOrFqdn = nodeIpAddress;
WriteNoise(
TraceEnvironmentManager,
owner_.Root.TraceId,
"Endpoint resource '{0}' has no CodePackageRef. Assigning NodeIPAddress={1}.",
endpointResource->Name,
nodeIpAddress);
return ErrorCodeValue::Success;
}
wstring openNetworkAssignedIpAddress;
std::map<wstring, wstring> overlayNetworkResources;
auto error = packageInstanceEnvironmentContext->GetNetworkResourceAssignedToCodePackage(
codePackageRef,
openNetworkAssignedIpAddress,
overlayNetworkResources);
if (error.IsSuccess() && !overlayNetworkResources.empty())
{
auto endpointIter = endpointIsolatedNetworkMap.find(endpointResource->Name);
if (endpointIter != endpointIsolatedNetworkMap.end())
{
wstring isolatedNetworkName = endpointIter->second;
auto onrIter = overlayNetworkResources.find(isolatedNetworkName);
if (onrIter != overlayNetworkResources.end())
{
wstring overlayNetworkAssignedIpAddress;
wstring overlayNetworkAssignedMacAddress;
StringUtility::SplitOnce(onrIter->second, overlayNetworkAssignedIpAddress, overlayNetworkAssignedMacAddress, L',');
if (!overlayNetworkAssignedIpAddress.empty())
{
endpointResource->IpAddressOrFqdn = overlayNetworkAssignedIpAddress;
WriteNoise(
TraceEnvironmentManager,
owner_.Root.TraceId,
"Endpoint resource '{0}' has CodePackageRef='{1}'. Found assigned IPAddress={2}.",
endpointResource->Name,
codePackageRef,
overlayNetworkAssignedIpAddress);
return error;
}
}
}
}
if (!openNetworkAssignedIpAddress.empty())
{
endpointResource->IpAddressOrFqdn = openNetworkAssignedIpAddress;
WriteNoise(
TraceEnvironmentManager,
owner_.Root.TraceId,
"Endpoint resource '{0}' has CodePackageRef='{1}'. Found assigned IPAddress={2}.",
endpointResource->Name,
codePackageRef,
openNetworkAssignedIpAddress);
return error;
}
if (error.IsError(ErrorCodeValue::NotFound) || endpointResource->IpAddressOrFqdn.empty())
{
endpointResource->IpAddressOrFqdn = nodeIpAddress;
WriteNoise(
TraceEnvironmentManager,
owner_.Root.TraceId,
"Endpoint resource '{0}' has CodePackageRef='{1}'. Assigned IPAddress not found. Assigning NodeIPAddress={2}.",
endpointResource->Name,
codePackageRef,
nodeIpAddress);
return ErrorCodeValue::Success;
}
return error;
}
void OnEndpointSecurityConfigurationCompleted(
EndpointResourceSPtr endpointResource,
AsyncOperationSPtr operation,
bool expectedCompletedSynhronously)
{
if (operation->CompletedSynchronously != expectedCompletedSynhronously)
{
return;
}
ErrorCode error = owner_.Hosting.FabricActivatorClientObj->EndConfigureEndpointSecurity(operation);
if(!error.IsSuccess())
{
lastError_.Overwrite(error);
// Empty the endpoint which failed, so we don;t add it to the endpoits_.
// O(n) search for endpoint is okay because the #endpoints to be very huge.
auto endpointResourceToRemove = std::find(endpointsWithACL_.begin(), endpointsWithACL_.end(), endpointResource);
ASSERT_IF(endpointResourceToRemove == endpointsWithACL_.end(), "Endpoint should exist in endpointWithACL");
*endpointResourceToRemove = nullptr;
}
uint64 pendingOperationCount = --endpointsAclCount_;
CheckPendingOperations(operation->Parent, pendingOperationCount);
}
void CheckPendingOperations(AsyncOperationSPtr thisSPtr, uint64 pendingOperationCount)
{
if (pendingOperationCount == 0)
{
WriteTrace(
lastError_.ToLogLevel(),
TraceEnvironmentManager,
owner_.Root.TraceId,
"{0}: End ConfigureEndpointSecurity. lastError {1}",
servicePackageEnvironmentId_,
lastError_);
for (auto & endpoint : endpointsWithACL_)
{
//Donot add the endpoint if it is empty because it failed the SetupSecurity.
if (endpoint)
{
AddEndpointResource(endpoint);
}
}
if(!lastError_.IsSuccess())
{
CleanupOnError(thisSPtr, lastError_);
return;
}
ConfigureEndpointBindingAndFirewallPolicy(thisSPtr);
}
}
void AddEndpointResource(EndpointResourceSPtr & endpointResource)
{
endpoints_.push_back(endpointResource->EndpointDescriptionObj);
packageInstanceEnvironmentContext->AddEndpoint(move(endpointResource));
}
void ConfigureEndpointBindingAndFirewallPolicy(AsyncOperationSPtr const & thisSPtr)
{
if (!firewallPortsToOpen_.empty())
{
packageInstanceEnvironmentContext->FirewallPorts = firewallPortsToOpen_;
}
vector<EndpointCertificateBinding> endpointCertBindings;
owner_.GetEndpointBindingPolicies(
packageInstanceEnvironmentContext->Endpoints,
servicePackageDescription_.DigestedResources.DigestedCertificates,
endpointCertBindings);
if (!endpointCertBindings.empty() || !packageInstanceEnvironmentContext->FirewallPorts.empty())
{
WriteInfo(
TraceEnvironmentManager,
owner_.Root.TraceId,
"{0}: Start ConfigureEndpointBindingAndFirewallPolicy",
servicePackageEnvironmentId_);
auto operation = owner_.Hosting.FabricActivatorClientObj->BeginConfigureEndpointCertificateAndFirewallPolicy(
EnvironmentManager::GetServicePackageIdentifier(servicePackageInstanceId_.ToString(), instanceId_),
endpointCertBindings,
false,
false,
packageInstanceEnvironmentContext->FirewallPorts,
timeoutHelper_.GetRemainingTime(),
[this](AsyncOperationSPtr const & operation)
{
this->OnBeginConfigureEndpointCertificateAndFirewallPolicy(operation, false);
},
thisSPtr);
this->OnBeginConfigureEndpointCertificateAndFirewallPolicy(operation, true);
}
else
{
SetupContainerCertificates(thisSPtr);
}
}
void OnBeginConfigureEndpointCertificateAndFirewallPolicy(AsyncOperationSPtr operation, bool expectedCompletedSynchronously)
{
if (operation->CompletedSynchronously != expectedCompletedSynchronously)
{
return;
}
ErrorCode error = owner_.Hosting.FabricActivatorClientObj->EndConfigureEndpointCertificateAndFirewallPolicy(operation);
if (!error.IsSuccess())
{
WriteWarning(
TraceEnvironmentManager,
owner_.Root.TraceId,
"{0}: End ConfigureEndpointBindingAndFirewallPolicy. Error {1}",
servicePackageEnvironmentId_,
error);
CleanupOnError(operation->Parent, error);
return;
}
SetupContainerCertificates(operation->Parent);
}
void SetupContainerCertificates(AsyncOperationSPtr const & thisSPtr)
{
std::map<wstring, std::vector<ServiceModel::ContainerCertificateDescription>> allCertificateRef;
for (auto digestedCodePackage = servicePackageDescription_.DigestedCodePackages.begin();
digestedCodePackage != servicePackageDescription_.DigestedCodePackages.end();
++digestedCodePackage)
{
if (!digestedCodePackage->ContainerPolicies.CertificateRef.empty())
{
allCertificateRef[digestedCodePackage->CodePackage.Name] = digestedCodePackage->ContainerPolicies.CertificateRef;
}
}
if (!allCertificateRef.empty())
{
wstring applicationWorkFolder = runLayout_.GetApplicationWorkFolder(servicePackageInstanceId_.ApplicationId.ToString());
wstring certificateFolder = Path::Combine(applicationWorkFolder, L"Certificates_" + servicePackageDescription_.ManifestName);
ErrorCode error = Directory::Create2(certificateFolder);
if (!error.IsSuccess())
{
WriteError(
TraceEnvironmentManager,
owner_.Root.TraceId,
"Error creating directory {0}. Error={1}",
certificateFolder,
error);
CleanupOnError(thisSPtr, error);
return;
}
WriteInfo(
TraceEnvironmentManager,
owner_.Root.TraceId,
"{0}: Start ConfigureContainerCertificateExport.",
servicePackageEnvironmentId_);
auto operation = owner_.Hosting.FabricActivatorClientObj->BeginConfigureContainerCertificateExport(
allCertificateRef,
certificateFolder,
timeoutHelper_.GetRemainingTime(),
[this](AsyncOperationSPtr const & operation)
{
this->OnConfigureContainerCertificateExportCompleted(operation, false);
},
thisSPtr);
this->OnConfigureContainerCertificateExportCompleted(operation, true);
}
else
{
WriteResources(thisSPtr);
}
}
void OnConfigureContainerCertificateExportCompleted(AsyncOperationSPtr operation, bool expectedCompletedSynchronously)
{
if (operation->CompletedSynchronously != expectedCompletedSynchronously)
{
return;
}
map<std::wstring, std::wstring> certificatePaths;
map<std::wstring, std::wstring> certificatePasswordPaths;
ErrorCode error = owner_.Hosting.FabricActivatorClientObj->EndConfigureContainerCertificateExport(operation, certificatePaths, certificatePasswordPaths);
if (!error.IsSuccess())
{
WriteWarning(
TraceEnvironmentManager,
owner_.Root.TraceId,
"{0}: End ConfigureContainerCertificateExport. Error {1}",
servicePackageEnvironmentId_,
error);
CleanupOnError(operation->Parent, error);
return;
}
packageInstanceEnvironmentContext->AddCertificatePaths(certificatePaths, certificatePasswordPaths);
WriteResources(operation->Parent);
}
void WriteResources(AsyncOperationSPtr const & thisSPtr)
{
// Write the endpoint file
wstring endpointFileName = runLayout_.GetEndpointDescriptionsFile(
servicePackageInstanceId_.ApplicationId.ToString(),
servicePackageInstanceId_.ServicePackageName,
servicePackageInstanceId_.PublicActivationId);
bool isSuccess = EndpointDescription::WriteToFile(endpointFileName, endpoints_);
if(isSuccess)
{
WriteNoise(
TraceEnvironmentManager,
owner_.Root.TraceId,
"EndpointDescription successfully written to file {0}, ServicePackageInstanceId={1}, NodeVersion={2}.",
endpointFileName,
servicePackageEnvironmentId_,
owner_.Hosting.FabricNodeConfigObj.NodeVersion);
#if defined(PLATFORM_UNIX)
if (ContainerGroupIsolated())
{
std::wstring openNetworkAssignedIp;
std::map<std::wstring, std::wstring> overlayNetworkResources;
NetworkType::Enum networkType = NetworkType::Enum::Other;
GetGroupAssignedNetworkResource(openNetworkAssignedIp, overlayNetworkResources, networkType);
SetupContainerGroup(thisSPtr, networkType, openNetworkAssignedIp, overlayNetworkResources);
return;
}
#endif
TryComplete(thisSPtr, ErrorCode(ErrorCodeValue::Success));
return;
}
else
{
WriteWarning(
TraceEnvironmentManager,
owner_.Root.TraceId,
"Could not write the endpoints to file {0}, ServicePackageInstanceId={1}, NodeVersion={2}. Cleaning the ServicePackageInstanceEnvironmentContext and retrying.",
endpointFileName,
servicePackageEnvironmentId_,
owner_.Hosting.FabricNodeConfigObj.NodeVersion);
CleanupOnError(thisSPtr, ErrorCode(ErrorCodeValue::OperationFailed));
return;
}
}
void SetupDiagnostics(AsyncOperationSPtr const & thisSPtr)
{
#if !defined(PLATFORM_UNIX)
ASSERT_IFNOT(owner_.etwSessionProvider_, "ETW session provider should exist");
auto error = owner_.etwSessionProvider_->SetupServiceEtw(servicePackageDescription_, packageInstanceEnvironmentContext);
if(!error.IsSuccess())
{
WriteWarning(
TraceEnvironmentManager,
owner_.Root.TraceId,
"{0}: End SetupServiceEtw. Error {1}",
servicePackageEnvironmentId_,
error);
CleanupOnError(thisSPtr, error);
return;
}
#endif
ASSERT_IFNOT(owner_.crashDumpProvider_, "Crash dump provider should exist");
WriteInfo(
TraceEnvironmentManager,
owner_.Root.TraceId,
"{0}: Start BeginSetupServiceCrashDumps.",
servicePackageEnvironmentId_);
auto operation = owner_.crashDumpProvider_->BeginSetupServiceCrashDumps(
servicePackageDescription_,
packageInstanceEnvironmentContext,
timeoutHelper_.GetRemainingTime(),
[this](AsyncOperationSPtr const & operation) { this->FinishSetupServiceCrashDumps(operation, false); },
thisSPtr);
this->FinishSetupServiceCrashDumps(operation, true);
}
void FinishSetupServiceCrashDumps(AsyncOperationSPtr const & operation, bool expectedCompletedSynhronously)
{
if (operation->CompletedSynchronously != expectedCompletedSynhronously)
{
return;
}
auto error = owner_.crashDumpProvider_->EndSetupServiceCrashDumps(operation);
if(!error.IsSuccess())
{
WriteWarning(
TraceEnvironmentManager,
owner_.Root.TraceId,
"{0}: End SetupServiceCrashDumps. Error {1}",
servicePackageEnvironmentId_,
error);
CleanupOnError(operation->Parent, error);
return;
}
// translate service package network policies to
// code package network config
PopulateCodePackageNetworkConfig();
AssignResources(operation->Parent);
}
void CleanupOnError(AsyncOperationSPtr const & thisSPtr, ErrorCode const error)
{
// Call cleanup, best effort
lastError_.Overwrite(error);
WriteInfo(
TraceEnvironmentManager,
owner_.Root.TraceId,
"{0}: Begin(Setup->BeginCleanupServicePackageEnvironment due to error {1})",
servicePackageEnvironmentId_,
error);
auto operation = owner_.BeginCleanupServicePackageInstanceEnvironment(
packageInstanceEnvironmentContext,
servicePackageDescription_,
instanceId_,
timeoutHelper_.OriginalTimeout,
[this](AsyncOperationSPtr const & operation) { this->FinishCleanupServicePackageEnvironment(operation, false); },
thisSPtr);
this->FinishCleanupServicePackageEnvironment(operation, true);
}
void FinishCleanupServicePackageEnvironment(AsyncOperationSPtr const & operation, bool expectedCompletedSynhronously)
{
if (operation->CompletedSynchronously != expectedCompletedSynhronously)
{
return;
}
auto error = owner_.EndCleanupServicePackageInstanceEnvironment(operation);
WriteTrace(
error.ToLogLevel(),
TraceEnvironmentManager,
owner_.Root.TraceId,
"{0}: End(Setup->EndCleanupServicePackageEnvironment due to error {1}): error {2}",
servicePackageEnvironmentId_,
lastError_,
error);
// Complete with the saved error
TryComplete(operation->Parent, lastError_);
}
private:
EnvironmentManager & owner_;
ApplicationEnvironmentContextSPtr const appEnvironmentContext_;
wstring const applicationName_;
ServicePackageInstanceIdentifier const servicePackageInstanceId_;
ServiceModel::ServicePackageDescription servicePackageDescription_;
int64 instanceId_;
TimeoutHelper timeoutHelper_;
ServicePackageInstanceEnvironmentContextSPtr packageInstanceEnvironmentContext;
bool setupContainerGroup_;
vector<EndpointDescription> endpoints_;
vector<LONG> firewallPortsToOpen_;
atomic_uint64 endpointsAclCount_;
ErrorCode lastError_;
vector<EndpointResourceSPtr> endpointsWithACL_;
ImageModel::RunLayoutSpecification runLayout_;
vector<DigestedCodePackageDescription> codePackageDescriptionsWithNetwork_;
wstring servicePackageEnvironmentId_;
};
// ********************************************************************************************************************
// EnvironmentManager::CleanupServicePackageInstanceAsyncOperation Implementation
//
class EnvironmentManager::CleanupServicePackageInstanceAsyncOperation
: public AsyncOperation,
TextTraceComponent<TraceTaskCodes::Hosting>
{
DENY_COPY(CleanupServicePackageInstanceAsyncOperation)
public:
CleanupServicePackageInstanceAsyncOperation(
EnvironmentManager & owner,
ServicePackageInstanceEnvironmentContextSPtr const & packageEnvironmentContext,
ServiceModel::ServicePackageDescription const & servicePackageDescription,
int64 instanceId,
TimeSpan const timeout,
AsyncCallback const & callback,
AsyncOperationSPtr const & parent)
: AsyncOperation(callback, parent),
owner_(owner),
packageInstanceEnvironmentContext(packageEnvironmentContext),
servicePackageDescription_(servicePackageDescription),
instanceId_(instanceId),
timeoutHelper_(timeout),
pendingOperations_(0),
lastError_(ErrorCodeValue::Success),
servicePackageInstaceId_(EnvironmentManager::GetServicePackageIdentifier(packageInstanceEnvironmentContext->ServicePackageInstanceId.ToString(), instanceId_))
{
}
virtual ~CleanupServicePackageInstanceAsyncOperation()
{
}
static ErrorCode CleanupServicePackageInstanceAsyncOperation::End(
AsyncOperationSPtr const & operation)
{
auto thisPtr = AsyncOperation::End<CleanupServicePackageInstanceAsyncOperation>(operation);
return thisPtr->Error;
}
protected:
void OnStart(AsyncOperationSPtr const & thisSPtr)
{
// Note: For cleanup we use OrignalTimeout for each of the operation individually.
// This will give it a chance to cleanup atleast every resource if even one of them timesout.
// If we still fail to do a cleanup then Abort should do it.
ASSERT_IFNOT(owner_.crashDumpProvider_, "Crash dump provider should exist");
WriteInfo(
TraceEnvironmentManager,
owner_.Root.TraceId,
"{0}: Start CleanupServiceCrashDumps.",
servicePackageInstaceId_);
auto operation = owner_.crashDumpProvider_->BeginCleanupServiceCrashDumps(
packageInstanceEnvironmentContext,
timeoutHelper_.OriginalTimeout,
[this](AsyncOperationSPtr const & operation) { this->FinishCleanupServiceCrashDumps(operation, false); },
thisSPtr);
this->FinishCleanupServiceCrashDumps(operation, true);
}
private:
void FinishCleanupServiceCrashDumps(AsyncOperationSPtr const & operation, bool expectedCompletedSynhronously)
{
if (operation->CompletedSynchronously != expectedCompletedSynhronously)
{
return;
}
auto error = owner_.crashDumpProvider_->EndCleanupServiceCrashDumps(operation);
if(!error.IsSuccess())
{
WriteWarning(
TraceEnvironmentManager,
owner_.Root.TraceId,
"{0}: End CleanupServiceCrashDumps. Error {1}",
servicePackageInstaceId_,
error);
lastError_.Overwrite(error);
}
if (!packageInstanceEnvironmentContext)
{
TryComplete(operation->Parent, lastError_);
}
else
{
auto thisSPtr = operation->Parent;
if (packageInstanceEnvironmentContext->SetupContainerGroup)
{
WriteInfo(
TraceEnvironmentManager,
owner_.Root.TraceId,
"{0}: Start SetupContainerGroup Cleanup",
servicePackageInstaceId_);
wstring openNetworkAssignedIp;
std::map<std::wstring, std::wstring> overlayNetworkResources;
ServiceModel::NetworkType::Enum networkType;
wstring networkResourceList;
packageInstanceEnvironmentContext->GetGroupAssignedNetworkResource(openNetworkAssignedIp, overlayNetworkResources, networkType, networkResourceList);
WriteNoise(
TraceEnvironmentManager,
owner_.Root.TraceId,
"Id={0}, Container Group Network Type={1}, Network Resource List={2}",
packageInstanceEnvironmentContext->ServicePackageInstanceId.ToString(),
networkType,
networkResourceList);
auto op = owner_.Hosting.FabricActivatorClientObj->BeginSetupContainerGroup(
packageInstanceEnvironmentContext->ServicePackageInstanceId.ToString(),
networkType,
openNetworkAssignedIp,
overlayNetworkResources,
vector<wstring>(),
wstring(),
wstring(),
wstring(),
wstring(),
wstring(),
ServicePackageResourceGovernanceDescription(),
#if defined(PLATFORM_UNIX)
ContainerPodDescription(),
#endif
true,
timeoutHelper_.OriginalTimeout,
[this](AsyncOperationSPtr const & op)
{
this->OnCleanupContainerGroupSetupCompleted(op, false);
}, thisSPtr);
OnCleanupContainerGroupSetupCompleted(op, true);
}
else
{
ReleaseAssignedIPs(operation->Parent);
}
}
}
void OnCleanupContainerGroupSetupCompleted(AsyncOperationSPtr const & operation, bool expectedCompletedSynchronously)
{
if (operation->CompletedSynchronously != expectedCompletedSynchronously)
{
return;
}
wstring result;
auto error = owner_.Hosting.FabricActivatorClientObj->EndSetupContainerGroup(operation, result);
if (!error.IsSuccess())
{
WriteWarning(
TraceEnvironmentManager,
owner_.Root.TraceId,
"{0}: End SetupContainerGroup Cleanup. Error {1}",
servicePackageInstaceId_,
error);
lastError_.Overwrite(error);
}
ReleaseAssignedIPs(operation->Parent);
}
void ReleaseAssignedIPs(AsyncOperationSPtr const & thisSPtr)
{
if (packageInstanceEnvironmentContext->HasIpsAssigned)
{
WriteInfo(
TraceEnvironmentManager,
owner_.Root.TraceId,
"{0}: Start AssignIpAddress Cleanup",
servicePackageInstaceId_);
auto operation = owner_.Hosting.FabricActivatorClientObj->BeginAssignIpAddresses(
packageInstanceEnvironmentContext->ServicePackageInstanceId.ToString(),
vector<wstring>(),
true,
timeoutHelper_.OriginalTimeout,
[this](AsyncOperationSPtr const & operation)
{
this->OnIPAddressesReleased(operation, false);
},
thisSPtr);
this->OnIPAddressesReleased(operation, true);
}
else if (packageInstanceEnvironmentContext->HasOverlayNetworkResourcesAssigned)
{
ReleaseOverlayNetworkResources(thisSPtr);
}
else
{
CleanupEndpointSecurity(thisSPtr);
}
}
void OnIPAddressesReleased(AsyncOperationSPtr const & operation, bool expectedCompletedSynchronously)
{
if (operation->CompletedSynchronously != expectedCompletedSynchronously)
{
return;
}
vector<wstring> result;
ErrorCode error = owner_.Hosting.FabricActivatorClientObj->EndAssignIpAddresses(operation, result);
if (!error.IsSuccess())
{
WriteWarning(
TraceEnvironmentManager,
owner_.Root.TraceId,
"{0}: End AssignIpAddress Cleanup. Error {1}",
servicePackageInstaceId_,
error);
lastError_.Overwrite(error);
}
ReleaseOverlayNetworkResources(operation->Parent);
}
void ReleaseOverlayNetworkResources(AsyncOperationSPtr const & thisSPtr)
{
if (packageInstanceEnvironmentContext->HasOverlayNetworkResourcesAssigned)
{
std::map<wstring, vector<wstring>> codePackageNetworkNames;
packageInstanceEnvironmentContext->GetCodePackageOverlayNetworkNames(codePackageNetworkNames);
wstring nodeIpAddress = L"";
if (!codePackageNetworkNames.empty())
{
auto error = owner_.GetIsolatedNicIpAddress(nodeIpAddress);
if (!error.IsSuccess())
{
lastError_.Overwrite(error);
}
}
auto operation = owner_.Hosting.FabricActivatorClientObj->BeginManageOverlayNetworkResources(
owner_.hosting_.NodeName,
nodeIpAddress,
packageInstanceEnvironmentContext->ServicePackageInstanceId.ToString(),
codePackageNetworkNames,
ManageOverlayNetworkAction::Unassign,
timeoutHelper_.GetRemainingTime(),
[this](AsyncOperationSPtr const & operation)
{
this->OnOverlayNetworkResourcesReleased(operation, false);
},
thisSPtr);
this->OnOverlayNetworkResourcesReleased(operation, true);
}
else
{
CleanupEndpointSecurity(thisSPtr);
}
}
void OnOverlayNetworkResourcesReleased(AsyncOperationSPtr const & operation, bool expectedCompletedSynchronously)
{
if (operation->CompletedSynchronously != expectedCompletedSynchronously)
{
return;
}
std::map<wstring, std::map<std::wstring, std::wstring>> assignedOverlayNetworkResources;
ErrorCode error = owner_.Hosting.FabricActivatorClientObj->EndManageOverlayNetworkResources(operation, assignedOverlayNetworkResources);
if (!error.IsSuccess())
{
WriteWarning(
TraceEnvironmentManager,
owner_.Root.TraceId,
"EndReleaseOverlayNetworkResources: ServicePackageId={0}, ErrorCode={1}.",
packageInstanceEnvironmentContext->ServicePackageInstanceId,
error);
lastError_.Overwrite(error);
}
CleanupEndpointSecurity(operation->Parent);
}
void CleanupEndpointSecurity(AsyncOperationSPtr const & thisSPtr)
{
vector<EndpointResourceSPtr> acldEndpoints;
if (HostingConfig::GetConfig().EndpointProviderEnabled)
{
ASSERT_IFNOT(owner_.endpointProvider_, "Endpoint provider must not be null as EndpointFiltering is enabled");
for(auto iter = packageInstanceEnvironmentContext->Endpoints.begin();
iter != packageInstanceEnvironmentContext->Endpoints.end();
++iter)
{
EndpointResourceSPtr endpointResource = *iter;
SecurityPrincipalInformationSPtr principalInfo;
auto error = EndpointProvider::GetPrincipal(endpointResource, principalInfo);
if(error.IsSuccess() &&
EndpointProvider::IsHttpEndpoint(endpointResource))
{
acldEndpoints.push_back(endpointResource);
}
else
{
if(!error.IsSuccess() &&
!error.IsError(ErrorCodeValue::NotFound))
{
WriteWarning(TraceEnvironmentManager,
owner_.Root.TraceId,
"Retrieve PrincipalInfo for endpoint {0} returned ErrorCode={1}. Continuing to remove other endpoints.",
endpointResource->Name,
error);
lastError_.Overwrite(error);
}
error = owner_.endpointProvider_->RemoveEndpoint(*iter);
if(!error.IsSuccess())
{
WriteWarning(
TraceEnvironmentManager,
owner_.Root.TraceId,
"RemoveEndpoint failed. ServicePackageId={0}, Endpoint={1}, ErrorCode={2}. Continuing to remove other endpoints.",
servicePackageInstaceId_,
(*iter)->Name,
error);
lastError_.Overwrite(error);
}
}
}
}
if(acldEndpoints.size() == 0)
{
CheckPendingOperations(thisSPtr, 0);
}
else
{
CleanupEndpointSecurity(acldEndpoints, thisSPtr);
}
}
void CleanupEndpointSecurity(vector<EndpointResourceSPtr> const & endpointResources, AsyncOperationSPtr thisSPtr)
{
pendingOperations_.store(endpointResources.size());
for(auto iter = endpointResources.begin(); iter != endpointResources.end(); iter++)
{
EndpointResourceSPtr endpointResource = *iter;
SecurityPrincipalInformationSPtr principalInfo;
auto error = EndpointProvider::GetPrincipal(endpointResource, principalInfo);
ASSERT_IFNOT(error.IsSuccess(), "EndpointProvider::GetPrincipal returned error {0}", error);
WriteInfo(
TraceEnvironmentManager,
owner_.Root.TraceId,
"{0}: Start ConfigureEndpointSecurity Cleanup. Endpoint Name:{1}, Port: {2}",
servicePackageInstaceId_,
endpointResource->Name,
endpointResource->Port);
auto operation = owner_.hosting_.FabricActivatorClientObj->BeginConfigureEndpointSecurity(
principalInfo->SidString,
endpointResource->Port,
endpointResource->Protocol == ServiceModel::ProtocolType::Enum::Https,
true,
owner_.Hosting.FabricNodeConfigObj.IPAddressOrFQDN,
EnvironmentManager::GetServicePackageIdentifier(packageInstanceEnvironmentContext->ServicePackageInstanceId.ToString(), instanceId_),
endpointResource->EndpointDescriptionObj.ExplicitPortSpecified,
[this, endpointResource](AsyncOperationSPtr const & operation)
{
this->OnEndpointSecurityConfigurationCompleted(endpointResource, operation, false);
},
thisSPtr);
this->OnEndpointSecurityConfigurationCompleted(endpointResource, operation, true);
}
}
void OnEndpointSecurityConfigurationCompleted(
EndpointResourceSPtr const & endpointResource,
AsyncOperationSPtr operation,
bool expectedCompletedSynhronously)
{
if (operation->CompletedSynchronously != expectedCompletedSynhronously)
{
return;
}
ErrorCode error = owner_.Hosting.FabricActivatorClientObj->EndConfigureEndpointSecurity(operation);
if(!error.IsSuccess())
{
WriteWarning(
TraceEnvironmentManager,
owner_.Root.TraceId,
"{0}: End ConfigureEndpointSecurity Cleanup: EndpointName={1} Port={2}, ErrorCode={3}.",
servicePackageInstaceId_,
endpointResource->Name,
endpointResource->Port,
error);
lastError_.Overwrite(error);
}
error = owner_.endpointProvider_->RemoveEndpoint(endpointResource);
if(!error.IsSuccess())
{
WriteWarning(
TraceEnvironmentManager,
owner_.Root.TraceId,
"RemoveEndpoint failed. ServicePackageId={0}, Endpoint={1}, ErrorCode={2}. Continuing to remove other endpoints.",
packageInstanceEnvironmentContext->ServicePackageInstanceId,
endpointResource->Name,
error);
lastError_.Overwrite(error);
}
uint64 pendingOperationCount = --pendingOperations_;
CheckPendingOperations(operation->Parent, pendingOperationCount);
}
void CheckPendingOperations(AsyncOperationSPtr thisSPtr, uint64 pendingOperationCount)
{
if (pendingOperationCount == 0)
{
#if !defined(PLATFORM_UNIX)
ASSERT_IFNOT(owner_.etwSessionProvider_, "ETW session provider should exist");
owner_.etwSessionProvider_->CleanupServiceEtw(packageInstanceEnvironmentContext);
#endif
ConfigureEndpointBindingAndFirewallPolicyPolicy(thisSPtr);
}
}
void ConfigureEndpointBindingAndFirewallPolicyPolicy(AsyncOperationSPtr const & thisSPtr)
{
vector<EndpointCertificateBinding> endpointCertBindings;
auto error = owner_.GetEndpointBindingPolicies(packageInstanceEnvironmentContext->Endpoints, servicePackageDescription_.DigestedResources.DigestedCertificates, endpointCertBindings);
if (!error.IsSuccess())
{
WriteWarning(
TraceEnvironmentManager,
owner_.Root.TraceId,
"Error getting EndpointBindingPolicies {0}",
error);
lastError_.Overwrite(error);
}
if (!endpointCertBindings.empty() || !packageInstanceEnvironmentContext->FirewallPorts.empty())
{
WriteInfo(
TraceEnvironmentManager,
owner_.Root.TraceId,
"{0}: Start ConfigureEndpointCertificateAndFirewallPolicy Cleanup. CleanupFirewallPolicy {1}, CleanupEndpointCertificate {2}",
servicePackageInstaceId_,
!packageInstanceEnvironmentContext->FirewallPorts.empty(),
!endpointCertBindings.empty());
auto operation = owner_.Hosting.FabricActivatorClientObj->BeginConfigureEndpointCertificateAndFirewallPolicy(
EnvironmentManager::GetServicePackageIdentifier(packageInstanceEnvironmentContext->ServicePackageInstanceId.ToString(), instanceId_),
endpointCertBindings,
true,
true,
packageInstanceEnvironmentContext->FirewallPorts,
timeoutHelper_.OriginalTimeout,
[this](AsyncOperationSPtr const & operation)
{
this->OnConfigureEndpointBindingsAndFirewallPolicyCompleted(operation, false);
},
thisSPtr);
this->OnConfigureEndpointBindingsAndFirewallPolicyCompleted(operation, true);
}
else
{
CleanupContainerCertificates(thisSPtr);
}
}
void OnConfigureEndpointBindingsAndFirewallPolicyCompleted(AsyncOperationSPtr operation, bool expectedCompletedSynchronously)
{
if (operation->CompletedSynchronously != expectedCompletedSynchronously)
{
return;
}
ErrorCode error = owner_.Hosting.FabricActivatorClientObj->EndConfigureEndpointCertificateAndFirewallPolicy(operation);
if (!error.IsSuccess())
{
WriteWarning(
TraceEnvironmentManager,
owner_.Root.TraceId,
"{0}: End ConfigureEndpointCertificateAndFirewallPolicy cleanup. ErrorCode={1}.",
servicePackageInstaceId_,
error);
lastError_.Overwrite(error);
}
else
{
// This is an optimization and needed because the firewall rule API's are slow.
packageInstanceEnvironmentContext->FirewallPorts = vector<LONG>();
}
CleanupContainerCertificates(operation->Parent);
}
void RemoveResourceFile(AsyncOperationSPtr thisSPtr)
{
ImageModel::RunLayoutSpecification runLayout(owner_.Hosting.DeploymentFolder);
auto svcPkgInstanceId = packageInstanceEnvironmentContext->ServicePackageInstanceId;
auto endpointFileName = runLayout.GetEndpointDescriptionsFile(
svcPkgInstanceId.ApplicationId.ToString(),
svcPkgInstanceId.ServicePackageName,
svcPkgInstanceId.PublicActivationId);
if (File::Exists(endpointFileName))
{
auto error = File::Delete2(endpointFileName, true);
if (!error.IsSuccess())
{
WriteWarning(
TraceEnvironmentManager,
owner_.Root.TraceId,
"Failed to remove enpoint resource file={0}. Error={1}. NodeVersion={2}.",
endpointFileName,
error,
owner_.Hosting.FabricNodeConfigObj.NodeVersion);
lastError_.Overwrite(error);
}
}
else
{
WriteNoise(
TraceEnvironmentManager,
owner_.Root.TraceId,
"Successfully removed enpoint resource file={0}. NodeVersion={1}.",
endpointFileName,
owner_.Hosting.FabricNodeConfigObj.NodeVersion);
}
if (lastError_.IsSuccess())
{
packageInstanceEnvironmentContext->Reset();
}
TryComplete(thisSPtr, lastError_);
}
void CleanupContainerCertificates(AsyncOperationSPtr thisSPtr)
{
if (!packageInstanceEnvironmentContext->CertificatePaths.empty())
{
WriteInfo(
TraceEnvironmentManager,
owner_.Root.TraceId,
"{0}: Start CleanupContainerCertificateExport Cleanup.",
servicePackageInstaceId_);
auto operation = owner_.Hosting.FabricActivatorClientObj->BeginCleanupContainerCertificateExport(
packageInstanceEnvironmentContext->CertificatePaths,
packageInstanceEnvironmentContext->CertificatePasswordPaths,
timeoutHelper_.OriginalTimeout,
[this](AsyncOperationSPtr const & operation)
{
this->OnCleanupContainerCertificateExportCompleted(operation, false);
},
thisSPtr);
this->OnCleanupContainerCertificateExportCompleted(operation, true);
}
else
{
this->RemoveResourceFile(thisSPtr);
}
}
void OnCleanupContainerCertificateExportCompleted(AsyncOperationSPtr operation, bool expectedCompletedSynchronously)
{
if (operation->CompletedSynchronously != expectedCompletedSynchronously)
{
return;
}
ErrorCode error = owner_.Hosting.FabricActivatorClientObj->EndCleanupContainerCertificateExport(operation);
if (!error.IsSuccess())
{
WriteWarning(
TraceEnvironmentManager,
owner_.Root.TraceId,
"{0}: CleanupContainerCertificateExport cleanup. ErrorCode={1}.",
servicePackageInstaceId_,
error);
lastError_.Overwrite(error);
}
this->RemoveResourceFile(operation->Parent);
}
private:
EnvironmentManager & owner_;
ServicePackageInstanceEnvironmentContextSPtr const packageInstanceEnvironmentContext;
TimeoutHelper timeoutHelper_;
ErrorCode lastError_;
atomic_uint64 pendingOperations_;
ServiceModel::ServicePackageDescription servicePackageDescription_;
int64 instanceId_;
wstring servicePackageInstaceId_;
};
EnvironmentManager::EnvironmentManager(
ComponentRoot const & root,
__in HostingSubsystem & hosting)
: RootedObject(root),
AsyncFabricComponent(),
hosting_(hosting),
logCollectionProvider_(),
endpointProvider_(),
crashDumpProvider_(),
currentUserSid_(),
isSystem_(false),
isAdminUser_(false)
{
}
EnvironmentManager::~EnvironmentManager()
{
}
// ********************************************************************************************************************
// AsyncFabricComponent methods
AsyncOperationSPtr EnvironmentManager::OnBeginOpen(
TimeSpan timeout,
AsyncCallback const & callback,
AsyncOperationSPtr const & parent)
{
return AsyncOperation::CreateAndStart<OpenAsyncOperation>(
*this,
timeout,
callback,
parent);
}
ErrorCode EnvironmentManager::OnEndOpen(
AsyncOperationSPtr const & asyncOperation)
{
return OpenAsyncOperation::End(asyncOperation);
}
AsyncOperationSPtr EnvironmentManager::OnBeginClose(
TimeSpan const timeout,
AsyncCallback const & callback,
AsyncOperationSPtr const & parent)
{
return AsyncOperation::CreateAndStart<CloseAsyncOperation>(
*this,
timeout,
callback,
parent);
}
ErrorCode EnvironmentManager::OnEndClose(
AsyncOperationSPtr const & asyncOperation)
{
return CloseAsyncOperation::End(asyncOperation);
}
void EnvironmentManager::OnAbort()
{
if (logCollectionProvider_)
{
logCollectionProvider_->Abort();
}
if (etwSessionProvider_)
{
etwSessionProvider_->Abort();
}
if (crashDumpProvider_)
{
crashDumpProvider_->Abort();
}
if (endpointProvider_)
{
ASSERT_IFNOT(
HostingConfig::GetConfig().EndpointProviderEnabled,
"Endpoint filtering disabled, endpoint provider shouldn't exist");
endpointProvider_->Abort();
}
}
// ****************************************************
// Application environment specific methods
// ****************************************************
AsyncOperationSPtr EnvironmentManager::BeginSetupApplicationEnvironment(
ApplicationIdentifier const & applicationId,
ApplicationPackageDescription const & appPackageDescription,
TimeSpan const timeout,
AsyncCallback const & callback,
AsyncOperationSPtr const & parent)
{
return AsyncOperation::CreateAndStart<SetupApplicationAsyncOperation>(
*this,
applicationId,
appPackageDescription,
timeout,
callback,
parent);
}
ErrorCode EnvironmentManager::EndSetupApplicationEnvironment(
AsyncOperationSPtr const & asyncOperation,
__out ApplicationEnvironmentContextSPtr & context)
{
return SetupApplicationAsyncOperation::End(asyncOperation, context);
}
AsyncOperationSPtr EnvironmentManager::BeginCleanupApplicationEnvironment(
ApplicationEnvironmentContextSPtr const & appEnvironmentContext,
TimeSpan const timeout,
AsyncCallback const & callback,
AsyncOperationSPtr const & parent)
{
return AsyncOperation::CreateAndStart<CleanupApplicationAsyncOperation>(
*this,
appEnvironmentContext,
timeout,
callback,
parent);
}
ErrorCode EnvironmentManager::EndCleanupApplicationEnvironment(
AsyncOperationSPtr const & asyncOperation)
{
return CleanupApplicationAsyncOperation::End(asyncOperation);
}
void EnvironmentManager::AbortApplicationEnvironment(ApplicationEnvironmentContextSPtr const & appEnvironmentContext)
{
if (appEnvironmentContext == nullptr) { return; }
if (HostingConfig::GetConfig().RunAsPolicyEnabled)
{
if(appEnvironmentContext->PrincipalsContext)
{
hosting_.FabricActivatorClientObj->AbortApplicationEnvironment(appEnvironmentContext->ApplicationId.ToString());
}
else
{
WriteNoise(
TraceEnvironmentManager,
Root.TraceId,
"No security principals setup for Application Id {0}, proceeding without cleanup",
appEnvironmentContext->ApplicationId);
}
}
// log collection provider does not have a way to cleanup without wait
wstring const & path = appEnvironmentContext->LogCollectionPath;
if (path.empty())
{
ASSERT_IFNOT(logCollectionProvider_, "Log collection provider should exist");
WriteNoise(
TraceEnvironmentManager,
Root.TraceId,
"{0}: logCollectionProvider_->RemoveLogPaths: path {1}",
appEnvironmentContext->ApplicationId,
path);
logCollectionProvider_->CleanupLogPaths(path);
}
}
// ****************************************************
// ServicePackage environment specific methods
// ****************************************************
AsyncOperationSPtr EnvironmentManager::BeginSetupServicePackageInstanceEnvironment(
ApplicationEnvironmentContextSPtr const & appEnvironmentContext,
wstring const & applicationName,
ServicePackageInstanceIdentifier const & servicePackageInstanceId,
int64 instanceId,
ServiceModel::ServicePackageDescription const & servicePackageDescription,
TimeSpan const timeout,
AsyncCallback const & callback,
AsyncOperationSPtr const & parent)
{
return AsyncOperation::CreateAndStart<SetupServicePackageInstanceAsyncOperation>(
*this,
appEnvironmentContext,
applicationName,
servicePackageInstanceId,
move(servicePackageDescription),
instanceId,
timeout,
callback,
parent);
}
ErrorCode EnvironmentManager::EndSetupServicePackageInstanceEnvironment(
AsyncOperationSPtr const & asyncOperation,
__out ServicePackageInstanceEnvironmentContextSPtr & packageEnvironmentContext)
{
return SetupServicePackageInstanceAsyncOperation::End(asyncOperation, packageEnvironmentContext);
}
AsyncOperationSPtr EnvironmentManager::BeginCleanupServicePackageInstanceEnvironment(
ServicePackageInstanceEnvironmentContextSPtr const & packageEnvironmentContext,
ServiceModel::ServicePackageDescription const & packageDescription,
int64 instanceId,
TimeSpan const timeout,
AsyncCallback const & callback,
AsyncOperationSPtr const & parent)
{
return AsyncOperation::CreateAndStart<CleanupServicePackageInstanceAsyncOperation>(
*this,
packageEnvironmentContext,
packageDescription,
instanceId,
timeout,
callback,
parent);
}
ErrorCode EnvironmentManager::EndCleanupServicePackageInstanceEnvironment(
AsyncOperationSPtr const & asyncOperation)
{
return CleanupServicePackageInstanceAsyncOperation::End(asyncOperation);
}
void EnvironmentManager::AbortServicePackageInstanceEnvironment(
ServicePackageInstanceEnvironmentContextSPtr const & packageEnvironmentContext,
ServiceModel::ServicePackageDescription const & packageDescription,
int64 instanceId)
{
auto error = CleanupServicePackageInstanceEnvironment(
packageEnvironmentContext,
packageDescription,
instanceId);
error.ReadValue(); // ignore error in abort path
// crash dump provider does not have a way to cleanup without wait
ASSERT_IFNOT(crashDumpProvider_, "Crash dump provider should exist");
crashDumpProvider_->CleanupServiceCrashDumps(packageEnvironmentContext);
}
ErrorCode EnvironmentManager::CleanupServicePackageInstanceEnvironment(
ServicePackageInstanceEnvironmentContextSPtr const & packageEnvironmentContext,
ServicePackageDescription const & packageDescription,
int64 instanceId)
{
if (!packageEnvironmentContext)
{
return ErrorCode(ErrorCodeValue::Success);
}
auto servicePackageInstanceId = EnvironmentManager::GetServicePackageIdentifier(packageEnvironmentContext->ServicePackageInstanceId.ToString(), instanceId);
ErrorCode lastError(ErrorCodeValue::Success);
if (HostingConfig::GetConfig().EndpointProviderEnabled)
{
ASSERT_IFNOT(endpointProvider_, "Endpoint provider must not be null as EndpointFiltering is enabled");
for(auto iter = packageEnvironmentContext->Endpoints.begin();
iter != packageEnvironmentContext->Endpoints.end();
++iter)
{
EndpointResourceSPtr endpointResource = *iter;
SecurityPrincipalInformationSPtr principalInfo;
auto error = EndpointProvider::GetPrincipal(endpointResource, principalInfo);
if(error.IsSuccess())
{
if (EndpointProvider::IsHttpEndpoint(endpointResource))
{
error = this->hosting_.FabricActivatorClientObj->ConfigureEndpointSecurity(
principalInfo->SidString,
endpointResource->Port,
endpointResource->Protocol == ServiceModel::ProtocolType::Enum::Https,
true,
servicePackageInstanceId,
endpointResource->EndpointDescriptionObj.ExplicitPortSpecified);
if (!error.IsSuccess())
{
WriteWarning(
TraceEnvironmentManager,
Root.TraceId,
"Remove EndpointSecurity failed. Endpoint={0}, ErrorCode={1}. Continuing to remove other endpoints.",
endpointResource->Name,
error);
lastError.Overwrite(error);
}
}
}
else
{
WriteWarning(
TraceEnvironmentManager,
Root.TraceId,
"Unable to retrieve PrincipalInfo for endpoint. ErrorCode={1}. Continuing to remove other endpoints.",
endpointResource->Name,
error);
}
error = endpointProvider_->RemoveEndpoint(*iter);
if(!error.IsSuccess())
{
WriteWarning(
TraceEnvironmentManager,
Root.TraceId,
"RemoveEndpoint failed. ServicePackageInstanceId={0}, Endpoint={1}, ErrorCode={2}. Continuing to remove other endpoints.",
servicePackageInstanceId,
(*iter)->Name,
error);
lastError.Overwrite(error);
}
}
vector<EndpointCertificateBinding> endpointCertBindings;
auto error = GetEndpointBindingPolicies(packageEnvironmentContext->Endpoints, packageDescription.DigestedResources.DigestedCertificates, endpointCertBindings);
if (!error.IsSuccess())
{
WriteWarning(
TraceEnvironmentManager,
Root.TraceId,
"Error getting EndpointBindingPolicies {0}",
error);
lastError.Overwrite(error);
}
if (!endpointCertBindings.empty() || !packageEnvironmentContext->FirewallPorts.empty())
{
error = Hosting.FabricActivatorClientObj->ConfigureEndpointBindingAndFirewallPolicy(
servicePackageInstanceId,
endpointCertBindings,
true,
true,
packageEnvironmentContext->FirewallPorts);
if (!error.IsSuccess())
{
WriteWarning(
TraceEnvironmentManager,
Root.TraceId,
"{0}: Error in ConfigureEndpointBindingAndFirewallPolicy during Abort. Error {1}",
servicePackageInstanceId,
error);
}
}
}
if (packageEnvironmentContext->SetupContainerGroup)
{
Hosting.FabricActivatorClientObj->AbortProcess(packageEnvironmentContext->ServicePackageInstanceId.ToString());
}
if (packageEnvironmentContext->HasIpsAssigned)
{
auto error = Hosting.FabricActivatorClientObj->CleanupAssignedIPs(packageEnvironmentContext->ServicePackageInstanceId.ToString());
if (!error.IsSuccess())
{
WriteWarning(
TraceEnvironmentManager,
Root.TraceId,
"{0}: Error in CleanupAssignedIps {1}",
servicePackageInstanceId,
error);
lastError.Overwrite(error);
}
}
else
{
WriteNoise(TraceEnvironmentManager,
Root.TraceId,
"No IPs assigned for {0}",
servicePackageInstanceId);
}
if (packageEnvironmentContext->HasOverlayNetworkResourcesAssigned)
{
std::map<std::wstring, std::vector<std::wstring>> codePackageNetworkNames;
packageEnvironmentContext->GetCodePackageOverlayNetworkNames(codePackageNetworkNames);
auto error = Hosting.FabricActivatorClientObj->CleanupAssignedOverlayNetworkResources(
codePackageNetworkNames,
hosting_.NodeName,
hosting_.FabricNodeConfigObj.IPAddressOrFQDN,
packageEnvironmentContext->ServicePackageInstanceId.ToString());
if (!error.IsSuccess())
{
WriteWarning(
TraceEnvironmentManager,
Root.TraceId,
"{0}: Error in CleanupAssignedOverlayNetworkResources {1}",
servicePackageInstanceId,
error);
lastError.Overwrite(error);
}
}
else
{
WriteNoise(TraceEnvironmentManager,
Root.TraceId,
"No overlay network resources assigned for {0}",
servicePackageInstanceId);
}
#if !defined(PLATFORM_UNIX)
ASSERT_IFNOT(etwSessionProvider_, "ETW session provider should exist");
etwSessionProvider_->CleanupServiceEtw(packageEnvironmentContext);
#endif
packageEnvironmentContext->Reset();
return lastError;
}
ErrorCode EnvironmentManager::GetEndpointBindingPolicies(
vector<EndpointResourceSPtr> const & endpointResources,
map<wstring, EndpointCertificateDescription> digestedCertificates,
vector<EndpointCertificateBinding> & bindings)
{
for (auto endpointIter = endpointResources.begin();
endpointIter != endpointResources.end();
++endpointIter)
{
EndpointResourceSPtr endPointResource = *endpointIter;
wstring principalSid;
EnvironmentResource::ResourceAccess resourceAccess;
auto error = endPointResource->GetDefaultSecurityAccess(resourceAccess);
if (error.IsSuccess())
{
principalSid = resourceAccess.PrincipalInfo->SidString;
}
else if (!error.IsError(ErrorCodeValue::NotFound))
{
WriteWarning(
TraceEnvironmentManager,
Root.TraceId,
"Error getting default security access for endpointResource '{0}'.Error={1}",
endPointResource->Name,
error);
return error;
}
if (!endPointResource->EndPointBindingPolicy.CertificateRef.empty())
{
auto it = digestedCertificates.find(endPointResource->EndPointBindingPolicy.CertificateRef);
if (it == digestedCertificates.end())
{
WriteWarning(
TraceEnvironmentManager,
Root.TraceId,
"Error getting the certificate '{0}' for endpoint {1}",
endPointResource->EndPointBindingPolicy.CertificateRef,
endPointResource->Name);
return ErrorCodeValue::NotFound;
}
EndpointCertificateBinding binding(
endPointResource->Port,
endPointResource->EndpointDescriptionObj.ExplicitPortSpecified,
principalSid,
it->second.X509FindValue,
it->second.X509StoreName,
it->second.X509FindType);
bindings.push_back(binding);
}
}
return ErrorCodeValue::Success;
}
std::wstring EnvironmentManager::GetServicePackageIdentifier(std::wstring const & servicePackageId, int64 instanceId)
{
return wformatString("{0}:{1}", servicePackageId, instanceId);
}
| 52,340 |
524 | from collections import namedtuple
RGB = namedtuple("RGB", "red, green, blue")
COLORS = {
"red": RGB(255, 0, 0),
"orange-deep": RGB(255, 40, 0),
"orange": RGB(255, 120, 0),
"yellow": RGB(255, 200, 0),
"yellow-acid": RGB(160, 255, 0),
"green": RGB(0, 255, 0),
"green-forest": RGB(34, 139, 34),
"green-spring": RGB(0, 255, 127),
"green-teal": RGB(0, 128, 128),
"green-turquoise": RGB(0, 199, 140),
"green-coral": RGB(0, 255, 50),
"cyan": RGB(0, 255, 255),
"blue": RGB(0, 0, 255),
"blue-light": RGB(65, 105, 225),
"blue-navy": RGB(0, 0, 128),
"blue-aqua": RGB(0, 255, 255),
"purple": RGB(128, 0, 128),
"pink": RGB(255, 0, 178),
"magenta": RGB(255, 0, 255),
"black": RGB(0, 0, 0),
"white": RGB(255, 255, 255),
"brown": RGB(139, 69, 19),
"gold": RGB(255, 215, 0),
"hotpink": RGB(255, 105, 180),
"lightblue": RGB(173, 216, 230),
"lightgreen": RGB(152, 251, 152),
"lightpink": RGB(255, 182, 193),
"lightyellow": RGB(255, 255, 224),
"maroon": RGB(128, 0, 0),
"mint": RGB(189, 252, 201),
"olive": RGB(85, 107, 47),
"peach": RGB(255, 100, 100),
"plum": RGB(221, 160, 221),
"sepia": RGB(94, 38, 18),
"skyblue": RGB(135, 206, 235),
"steelblue": RGB(70, 130, 180),
"tan": RGB(210, 180, 140),
"violetred": RGB(208, 32, 144),
}
GRADIENTS = {
"Rainbow": {
"colors": [
"red",
"orange",
"yellow",
"green",
"green-turquoise",
"blue",
"purple",
"pink",
]
},
"Dancefloor": {"colors": ["red", "pink", "blue"]},
"Plasma": {"colors": ["blue", "purple", "red", "orange-deep", "yellow"]},
"Ocean": {"colors": ["blue-aqua", "blue"]},
"Viridis": {"colors": ["purple", "blue", "green-teal", "green", "yellow"]},
"Jungle": {"colors": ["green", "green-forest", "orange"]},
"Spring": {"colors": ["pink", "orange-deep", "yellow"]},
"Winter": {"colors": ["green-turquoise", "green-coral"]},
"Frost": {"colors": ["blue", "blue-aqua", "purple", "pink"]},
"Sunset": {"colors": ["blue-navy", "orange", "red"]},
"Borealis": {
"colors": [
"orange-deep",
"purple",
"green-turquoise",
"green",
]
},
"Rust": {"colors": ["orange-deep", "red"]},
"Christmas": {
"colors": [
"red",
"red",
"red",
"red",
"red",
"green",
"green",
"green",
"green",
"green",
],
"method": "repeat",
},
"Winamp": {
"colors": [
"green",
"yellow",
"orange",
"orange-deep",
"red",
]
},
}
| 1,494 |
504 | <reponame>steakknife/pcgeos
/* crypto/asn1/a_object.c */
/* Copyright (C) 1995-1998 <NAME> (<EMAIL>)
* All rights reserved.
*
* This package is an SSL implementation written
* by <NAME> (<EMAIL>).
* The implementation was written so as to conform with Netscapes SSL.
*
* This library is free for commercial and non-commercial use as long as
* the following conditions are aheared to. The following conditions
* apply to all code found in this distribution, be it the RC4, RSA,
* lhash, DES, etc., code; not just the SSL code. The SSL documentation
* included with this distribution is covered by the same copyright terms
* except that the holder is <NAME> (<EMAIL>).
*
* Copyright remains <NAME>ung's, and as such any Copyright notices in
* the code are not to be removed.
* If this package is used in a product, Eric Young should be given attribution
* as the author of the parts of the library used.
* This can be in the form of a textual message at program startup or
* in documentation (online or textual) provided with the package.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* "This product includes cryptographic software written by
* <NAME> (<EMAIL>)"
* The word 'cryptographic' can be left out if the rouines from the library
* being used are not cryptographic related :-).
* 4. If you include any Windows specific code (or a derivative thereof) from
* the apps directory (application code) you must include an acknowledgement:
* "This product includes software written by <NAME> (<EMAIL>)"
*
* THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* The licence and distribution terms for any publically available version or
* derivative of this code cannot be changed. i.e. this code cannot simply be
* copied and put under another distribution licence
* [including the GNU Public Licence.]
*/
#ifdef __GEOS__
#include <Ansi/stdio.h>
#else
#include <stdio.h>
#endif
#include "cryptlib.h"
#include "buffer.h"
#include "asn1.h"
#include "objects.h"
/* ASN1err(ASN1_F_ASN1_OBJECT_NEW,ASN1_R_EXPECTING_AN_OBJECT);
* ASN1err(ASN1_F_D2I_ASN1_OBJECT,ASN1_R_BAD_OBJECT_HEADER);
* ASN1err(ASN1_F_I2T_ASN1_OBJECT,ASN1_R_BAD_OBJECT_HEADER);
*/
int i2d_ASN1_OBJECT(a, pp)
ASN1_OBJECT *a;
unsigned char **pp;
{
unsigned char *p;
if ((a == NULL) || (a->data == NULL)) return(0);
if (pp == NULL)
return(ASN1_object_size(0,a->length,V_ASN1_OBJECT));
p= *pp;
ASN1_put_object(&p,0,a->length,V_ASN1_OBJECT,V_ASN1_UNIVERSAL);
memcpy(p,a->data,a->length);
p+=a->length;
*pp=p;
return(a->length);
}
int a2d_ASN1_OBJECT(out,olen,buf,num)
unsigned char *out;
int olen;
char *buf;
int num;
{
int i,first,len=0,c;
char tmp[24],*p;
unsigned long l;
if (num == 0)
return(0);
else if (num == -1)
num=strlen(buf);
p=buf;
c= *(p++);
num--;
if ((c >= '0') && (c <= '2'))
{
first=(c-'0')*40;
}
else
{
ASN1err(ASN1_F_A2D_ASN1_OBJECT,ASN1_R_FIRST_NUM_TOO_LARGE);
goto err;
}
if (num <= 0)
{
ASN1err(ASN1_F_A2D_ASN1_OBJECT,ASN1_R_MISSING_SECOND_NUMBER);
goto err;
}
c= *(p++);
num--;
for (;;)
{
if (num <= 0) break;
if ((c != '.') && (c != ' '))
{
ASN1err(ASN1_F_A2D_ASN1_OBJECT,ASN1_R_INVALID_SEPARATOR);
goto err;
}
l=0;
for (;;)
{
if (num <= 0) break;
num--;
c= *(p++);
if ((c == ' ') || (c == '.'))
break;
if ((c < '0') || (c > '9'))
{
ASN1err(ASN1_F_A2D_ASN1_OBJECT,ASN1_R_INVALID_DIGIT);
goto err;
}
l=l*10L+(long)(c-'0');
}
if (len == 0)
{
if ((first < 2) && (l >= 40))
{
ASN1err(ASN1_F_A2D_ASN1_OBJECT,ASN1_R_SECOND_NUMBER_TOO_LARGE);
goto err;
}
l+=(long)first;
}
i=0;
for (;;)
{
tmp[i++]=(unsigned char)l&0x7f;
l>>=7L;
if (l == 0L) break;
}
if (out != NULL)
{
if (len+i > olen)
{
ASN1err(ASN1_F_A2D_ASN1_OBJECT,ASN1_R_BUFFER_TOO_SMALL);
goto err;
}
while (--i > 0)
out[len++]=tmp[i]|0x80;
out[len++]=tmp[0];
}
else
len+=i;
}
return(len);
err:
return(0);
}
int i2t_ASN1_OBJECT(buf,buf_len,a)
char *buf;
int buf_len;
ASN1_OBJECT *a;
{
int i,idx=0,n=0,len,nid;
unsigned long l;
unsigned char *p;
char *s;
char tbuf[32];
if (buf_len <= 0) return(0);
if ((a == NULL) || (a->data == NULL))
{
buf[0]='\0';
return(0);
}
nid=OBJ_obj2nid(a);
if (nid == NID_undef)
{
len=a->length;
p=a->data;
idx=0;
l=0;
while (idx < a->length)
{
l|=(p[idx]&0x7f);
if (!(p[idx] & 0x80)) break;
l<<=7L;
idx++;
}
idx++;
i=(int)(l/40);
if (i > 2) i=2;
l-=(long)(i*40);
sprintf(tbuf,"%d.%ld",i,l);
i=strlen(tbuf);
strncpy(buf,tbuf,buf_len);
buf_len-=i;
buf+=i;
n+=i;
l=0;
for (; idx<len; idx++)
{
l|=p[idx]&0x7f;
if (!(p[idx] & 0x80))
{
sprintf(tbuf,".%ld",l);
i=strlen(tbuf);
if (buf_len > 0)
strncpy(buf,tbuf,buf_len);
buf_len-=i;
buf+=i;
n+=i;
l=0;
}
l<<=7L;
}
}
else
{
s=(char *)OBJ_nid2ln(nid);
if (s == NULL)
s=(char *)OBJ_nid2sn(nid);
strncpy(buf,s,buf_len);
n=strlen(s);
}
buf[buf_len-1]='\0';
return(n);
}
int i2a_ASN1_OBJECT(bp,a)
BIO *bp;
ASN1_OBJECT *a;
{
char buf[80];
int i;
if ((a == NULL) || (a->data == NULL))
return(BIO_write(bp,"NULL",4));
i=i2t_ASN1_OBJECT(buf,80,a);
if (i > 80) i=80;
BIO_write(bp,buf,i);
return(i);
}
ASN1_OBJECT *d2i_ASN1_OBJECT(a, pp, length)
ASN1_OBJECT **a;
unsigned char **pp;
long length;
{
ASN1_OBJECT *ret=NULL;
unsigned char *p;
long len;
int tag,xclass;
int inf,i;
/* only the ASN1_OBJECTs from the 'table' will have values
* for ->sn or ->ln */
if ((a == NULL) || ((*a) == NULL) ||
!((*a)->flags & ASN1_OBJECT_FLAG_DYNAMIC))
{
if ((ret=ASN1_OBJECT_new()) == NULL) return(NULL);
}
else ret=(*a);
p= *pp;
inf=ASN1_get_object(&p,&len,&tag,&xclass,length);
if (inf & 0x80)
{
i=ASN1_R_BAD_OBJECT_HEADER;
goto err;
}
if (tag != V_ASN1_OBJECT)
{
i=ASN1_R_EXPECTING_AN_OBJECT;
goto err;
}
if ((ret->data == NULL) || (ret->length < len))
{
if (ret->data != NULL) Free((char *)ret->data);
ret->data=(unsigned char *)Malloc((int)len);
ret->flags|=ASN1_OBJECT_FLAG_DYNAMIC_DATA;
if (ret->data == NULL)
{ i=ERR_R_MALLOC_FAILURE; goto err; }
}
memcpy(ret->data,p,(int)len);
ret->length=(int)len;
ret->sn=NULL;
ret->ln=NULL;
/* ret->flags=ASN1_OBJECT_FLAG_DYNAMIC; we know it is dynamic */
p+=len;
if (a != NULL) (*a)=ret;
*pp=p;
return(ret);
err:
ASN1err(ASN1_F_D2I_ASN1_OBJECT,i);
if ((ret != NULL) && ((a == NULL) || (*a != ret)))
ASN1_OBJECT_free(ret);
return(NULL);
}
ASN1_OBJECT *ASN1_OBJECT_new()
{
ASN1_OBJECT *ret;
ret=(ASN1_OBJECT *)Malloc(sizeof(ASN1_OBJECT));
if (ret == NULL)
{
ASN1err(ASN1_F_ASN1_OBJECT_NEW,ERR_R_MALLOC_FAILURE);
return(NULL);
}
ret->length=0;
ret->data=NULL;
ret->nid=0;
ret->sn=NULL;
ret->ln=NULL;
ret->flags=ASN1_OBJECT_FLAG_DYNAMIC;
return(ret);
}
void ASN1_OBJECT_free(a)
ASN1_OBJECT *a;
{
if (a == NULL) return;
if (a->flags & ASN1_OBJECT_FLAG_DYNAMIC_STRINGS)
{
if (a->sn != NULL) Free(a->sn);
if (a->ln != NULL) Free(a->ln);
a->sn=a->ln=NULL;
}
if (a->flags & ASN1_OBJECT_FLAG_DYNAMIC_DATA)
{
if (a->data != NULL) Free(a->data);
a->data=NULL;
a->length=0;
}
if (a->flags & ASN1_OBJECT_FLAG_DYNAMIC)
Free((char *)a);
}
ASN1_OBJECT *ASN1_OBJECT_create(nid,data,len,sn,ln)
int nid;
unsigned char *data;
int len;
char *sn,*ln;
{
ASN1_OBJECT o;
o.sn=sn;
o.ln=ln;
o.data=data;
o.nid=nid;
o.length=len;
o.flags=ASN1_OBJECT_FLAG_DYNAMIC|
ASN1_OBJECT_FLAG_DYNAMIC_STRINGS|ASN1_OBJECT_FLAG_DYNAMIC_DATA;
return(OBJ_dup(&o));
}
| 4,603 |
491 | <gh_stars>100-1000
import time
import os
from tape.models.file_utils import url_to_filename, get_cache, get_etag
from tape import ProteinBertModel
from tape import TAPETokenizer
from tape.models.modeling_bert import BERT_PRETRAINED_MODEL_ARCHIVE_MAP
import torch
def test_forcedownload():
model = ProteinBertModel.from_pretrained('bert-base')
url = BERT_PRETRAINED_MODEL_ARCHIVE_MAP['bert-base']
filename = url_to_filename(url, get_etag(url))
wholepath = get_cache()/filename
oldtime = time.ctime(os.path.getmtime(wholepath))
model = ProteinBertModel.from_pretrained('bert-base', force_download=True)
newtime = time.ctime(os.path.getmtime(wholepath))
assert(newtime != oldtime)
# Deploy model
# iupac is the vocab for TAPE models, use unirep for the UniRep model
tokenizer = TAPETokenizer(vocab='iupac')
# Pfam Family: Hexapep, Clan: CL0536
sequence = 'GCTVEDRCLIGMGAILLNGCVIGSGSLVAAGALITQ'
token_ids = torch.tensor([tokenizer.encode(sequence)])
model(token_ids)
| 393 |
568 | <gh_stars>100-1000
package com.hfad.bitsandpizzas;
import android.os.Bundle;
import android.app.Fragment;
import android.view.LayoutInflater;
import android.view.View;
import android.view.ViewGroup;
public class TopFragment extends Fragment {
@Override
public View onCreateView(LayoutInflater inflater, ViewGroup container,
Bundle savedInstanceState) {
return inflater.inflate(R.layout.fragment_top, container, false);
}
}
| 180 |
2,963 | //
// FunctionParser.hpp
// EmojicodeCompiler
//
// Created by <NAME> on 28/07/2017.
// Copyright © 2017 <NAME>. All rights reserved.
//
#ifndef FunctionParser_hpp
#define FunctionParser_hpp
#include <utility>
#include "AST/ASTStatements.hpp"
#include "AbstractParser.hpp"
#include "Lex/TokenStream.hpp"
#include "Parsing/OperatorHelper.hpp"
namespace EmojicodeCompiler {
class ASTNode;
class ASTBinaryOperator;
class FunctionParser : protected AbstractParser {
public:
FunctionParser(Package *pkg, TokenStream &stream) : AbstractParser(pkg, stream) {}
std::unique_ptr<ASTBlock> parse();
std::shared_ptr<ASTExpr> parseExpr(int precedence) {
return parseExprTokens(stream_.consumeToken(), precedence);
}
private:
std::unique_ptr<ASTStatement> parseStatement();
ASTBlock parseBlock();
void parseMainArguments(ASTArguments *arguments, const SourcePosition &position);
std::shared_ptr<ASTExpr> parseExprLeft(const Token &token, int precedence);
std::shared_ptr<ASTExpr> parseRight(std::shared_ptr<ASTExpr> left, int precendence);
std::shared_ptr<ASTExpr> parseClosure(const Token &token);
std::unique_ptr<ASTStatement> parseIf(const SourcePosition &position);
std::unique_ptr<ASTStatement> parseErrorHandler(const SourcePosition &position);
std::shared_ptr<ASTExpr> parseExprTokens(const Token &token, int precendence);
std::shared_ptr<ASTExpr> parseExprIdentifier(const Token &token);
std::shared_ptr<ASTExpr> parseInitialization(const SourcePosition &position);
std::shared_ptr<ASTExpr> parseInterpolation(const Token &token);
std::shared_ptr<ASTExpr> parseCondition();
std::shared_ptr<ASTExpr> parseGroup();
std::shared_ptr<ASTExpr> parseTypeAsValue(const Token &token);
std::pair<std::shared_ptr<ASTExpr>, ASTArguments> parseCalleeAndArguments(const SourcePosition &position);
ASTArguments parseArgumentsWithoutCallee(const SourcePosition &position);
std::shared_ptr<ASTTypeExpr> parseTypeExpr(const SourcePosition &p);
template <typename T>
std::shared_ptr<T> parseUnaryPrefix(const Token &token) {
return std::make_shared<T>(parseExpr(kPrefixPrecedence), token.position());
}
std::shared_ptr<ASTExpr> parseListingLiteral(const SourcePosition &position);
int peakOperatorPrecedence();
ASTBlock parseBlockToEnd(const SourcePosition &pos);
/// Tries to recover from a syntax error by consuming all tokens up to a synchronization token. Must only be used
/// at statement level.
void recover();
std::unique_ptr<ASTStatement> handleStatementToken(const Token &token);
std::unique_ptr<ASTStatement> parseExprStatement(const Token &token);
std::unique_ptr<ASTStatement> parseVariableDeclaration(const Token &token);
std::unique_ptr<ASTStatement> parseReturn(const Token &token);
std::unique_ptr<ASTStatement> parseAssignment(std::shared_ptr<ASTExpr> expr);
std::unique_ptr<ASTStatement> parseMethodAssignment(std::shared_ptr<ASTExpr> expr);
};
} // namespace EmojicodeCompiler
#endif /* FunctionParser_hpp */
| 1,021 |
521 | /* ***** BEGIN LICENSE BLOCK *****
* Version: MPL 1.1/GPL 2.0/LGPL 2.1
*
* The contents of this file are subject to the Mozilla Public License Version
* 1.1 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* http://www.mozilla.org/MPL/
*
* Software distributed under the License is distributed on an "AS IS" basis,
* WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
* for the specific language governing rights and limitations under the
* License.
*
* The Original Code is Java XPCOM Bindings.
*
* The Initial Developer of the Original Code is IBM Corporation.
* Portions created by the Initial Developer are Copyright (C) 2006
* IBM Corporation. All Rights Reserved.
*
* Contributor(s):
* <NAME> (<EMAIL>)
*
* Alternatively, the contents of this file may be used under the terms of
* either the GNU General Public License Version 2 or later (the "GPL"), or
* the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
* in which case the provisions of the GPL or the LGPL are applicable instead
* of those above. If you wish to allow use of your version of this file only
* under the terms of either the GPL or the LGPL, and not to allow others to
* use your version of this file under the terms of the MPL, indicate your
* decision by deleting the provisions above and replace them with the notice
* and other provisions required by the GPL or the LGPL. If you do not delete
* the provisions above, a recipient may use your version of this file under
* the terms of any one of the MPL, the GPL or the LGPL.
*
* ***** END LICENSE BLOCK ***** */
package org.mozilla.xpcom;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.lang.reflect.Field;
import java.lang.reflect.Method;
import java.net.MalformedURLException;
import java.net.URL;
import java.net.URLClassLoader;
import java.nio.charset.Charset;
import java.util.ArrayList;
import java.util.Enumeration;
import java.util.Iterator;
import java.util.Properties;
import org.mozilla.interfaces.nsIComponentManager;
import org.mozilla.interfaces.nsIComponentRegistrar;
import org.mozilla.interfaces.nsILocalFile;
import org.mozilla.interfaces.nsIServiceManager;
import org.mozilla.interfaces.nsISupports;
/**
* A singleton class which provides access to the Mozilla browser. Requires
* that XULRunner be installed on the user's system.
* <p>
* You would use to class to find a XULRunner installation, setup a profile (if
* necessary), and initialize embedding. A typical scenario would look like
* this:
* </p><pre>
* Mozilla mozilla = Mozilla.getInstance();
* GREVersionRange[] range = new GREVersionRange[1];
* range[0] = new GREVersionRange("1.8.0.*", false, "1.8.1.*", true);
* try {
* File grePath = Mozilla.getGREPathWithProperties(range, null);
* mozilla.initialize(grePath);
* profLock = mozilla.lockProfileDirectory(profileDir);
* // LocationProvider is a user class that implements IAppFileLocProvider
* LocationProvider locProvider = new LocationProvider(grePath, profileDir);
* mozilla.initEmbedding(grePath, grePath, locProvider);
* mozilla.notifyProfile();
* } catch (XPCOMInitializationException xie) {
* // handle exception
* } catch (XPCOMException xe) {
* // handle exception
* }
* </pre>
*
* @see http://www.mozilla.org/projects/embedding/GRE.html
*/
public class Mozilla implements IMozilla, IGRE, IXPCOM, IJavaXPCOMUtils,XPCOMError {
private static Mozilla mozillaInstance = new Mozilla();
private static final String JAVAXPCOM_JAR = "vboxjxpcom.jar";
private IMozilla mozilla = null;
private IGRE gre = null;
private IXPCOM xpcom = null;
private IJavaXPCOMUtils jxutils = null;
/**
* @return
*/
public static Mozilla getInstance() {
return mozillaInstance;
}
/**
*
*/
private Mozilla() {
}
/**
* Locates the path of a GRE with the specified properties. This method
* will only return GREs that support Java embedding (looks for the
* presence of "javaxpcom.jar").
* <p>
* Currently this uses a "first-fit" algorithm, it does not select
* the newest available GRE.
*
* @param aVersions An array of version ranges: if any version range
* matches, the GRE is considered acceptable.
* @param aProperties A list of GRE property/value pairs which must
* all be satisfied. This parameter is ignored on
* Macintosh, because of the manner in which the
* XUL frameworks are installed.
*
* @return A file object of the appropriate path. If
* the "local" GRE is specified (via the USE_LOCAL_GRE
* environment variable, for example), returns
* <code>null</code>.
*
* @throws FileNotFoundException if an appropriate GRE could not be found
*/
public static File getGREPathWithProperties(GREVersionRange[] aVersions,
Properties aProperties) throws FileNotFoundException {
File grePath = null;
// if GRE_HOME is in the environment, use that GRE
String env = System.getProperty("GRE_HOME");
if (env != null) {
try {
grePath = new File(env).getCanonicalFile();
} catch (IOException e) {
throw new FileNotFoundException("cannot access GRE_HOME");
}
if (!grePath.exists()) {
throw new FileNotFoundException("GRE_HOME doesn't exist");
}
return grePath;
}
// the Gecko bits that sit next to the application or in the PATH
env = System.getProperty("USE_LOCAL_GRE");
if (env != null) {
return null;
}
// Search for GRE in platform specific locations. We want a GRE that
// supports Java, so we look for the "javaxpcom" property by default.
if (aProperties == null) {
aProperties = new Properties();
}
aProperties.setProperty("javaxpcom", "1");
String osName = System.getProperty("os.name").toLowerCase();
if (osName.startsWith("mac os x")) {
grePath = getGREPathMacOSX(aVersions);
} else if (osName.startsWith("windows")) {
grePath = getGREPathWindows(aVersions, aProperties);
} else {
// assume everything else is Unix/Linux
grePath = getGREPathUnix(aVersions, aProperties);
}
if (grePath == null) {
throw new FileNotFoundException("GRE not found");
}
return grePath;
}
/**
* @param aVersions
* @return
*/
private static File getGREPathMacOSX(GREVersionRange[] aVersions) {
/*
* Check the application bundle first, for
* <bundle>/Contents/Frameworks/XUL.framework/libxpcom.dylib.
*/
File grePath = findGREBundleFramework();
if (grePath != null) {
return grePath;
}
// Check ~/Library/Frameworks/XUL.framework/Versions/<version>/libxpcom.dylib
String home = System.getProperty("user.home");
if (home != null) {
grePath = findGREFramework(home, aVersions);
if (grePath != null) {
return grePath;
}
}
// Check /Library/Frameworks/XUL.framework/Versions/<version>/libxpcom.dylib
return findGREFramework("", aVersions);
}
/**
* @return
*/
private static File findGREBundleFramework() {
/*
* Use reflection to get Apple's NSBundle class, which can be used
* to get the bundle's "Frameworks" directory.
*/
try {
URL[] urls = new URL[1];
urls[0] = new File("/System/Library/Java/").toURI().toURL();
ClassLoader loader = new URLClassLoader(urls);
Class<?> bundleClass = Class.forName("com.apple.cocoa.foundation.NSBundle",
true, loader);
// Get the bundle for this app. If this is not executing from
// a bundle, this will return null.
Method mainBundleMethod = bundleClass.getMethod("mainBundle", (java.lang.Class[])null);
Object bundle = mainBundleMethod.invoke(null, (java.lang.Object[])null);
if (bundle != null) {
// Get the path to the bundle's "Frameworks" directory
Method fwPathMethod = bundleClass.getMethod("privateFrameworksPath",
(java.lang.Class[])null);
String path = (String) fwPathMethod.invoke(bundle, (java.lang.Object[])null);
// look for libxpcom.dylib
if (path.length() != 0) {
File xulDir = new File(path, "XUL.framework");
if (xulDir.isDirectory()) {
File xpcomLib = new File(xulDir, "libxpcom.dylib");
if (xpcomLib.canRead()) {
File grePath = xpcomLib.getCanonicalFile().getParentFile();
// Since GRE Properties aren't supported on Mac OS X, we check
// for the existence of the "javaxpcom.jar" file in the GRE.
File jar = new File(grePath, JAVAXPCOM_JAR);
if (jar.canRead()) {
// found GRE
return grePath;
}
}
}
}
}
} catch (Exception e) { }
return null;
}
/**
* @param aRootPath
* @param aVersions
* @return
*/
private static File findGREFramework(String aRootPath,
GREVersionRange[] aVersions) {
File frameworkDir = new File(aRootPath +
"/Library/Frameworks/XUL.framework/Versions");
if (!frameworkDir.exists())
return null;
File[] files = frameworkDir.listFiles();
for (int i = 0; i < files.length; i++) {
if (checkVersion(files[i].getName(), aVersions)) {
File xpcomLib = new File(files[i], "libxpcom.dylib");
// Since GRE Properties aren't supported on Mac OS X, we check
// for the existence of the "javaxpcom.jar" file in the GRE.
File jar = new File(files[i], JAVAXPCOM_JAR);
if (xpcomLib.canRead() && jar.canRead()) {
return files[i];
}
}
}
return null;
}
/**
* @param aVersions
* @param aProperties
* @return
*/
private static File getGREPathWindows(GREVersionRange[] aVersions,
Properties aProperties) {
/*
* Note the usage of the "Software\\mozilla.org\\GRE" subkey - this allows
* us to have multiple versions of GREs on the same machine by having
* subkeys such as 1.0, 1.1, 2.0 etc. under it.
*
* Please see http://www.mozilla.org/projects/embedding/GRE.html for
* more info.
*/
final String greKey = "Software\\mozilla.org\\GRE";
// See if there is a GRE registered for the current user.
// If not, look for one on the system.
String key = "HKEY_CURRENT_USER" + "\\" + greKey;
File grePath = getGREPathFromRegKey(key, aVersions, aProperties);
if (grePath == null) {
key = "HKEY_LOCAL_MACHINE" + "\\" + greKey;
grePath = getGREPathFromRegKey(key, aVersions, aProperties);
}
return grePath;
}
/**
* @param aRegKey
* @param aVersions
* @param aProperties
* @return
*/
private static File getGREPathFromRegKey(String aRegKey,
GREVersionRange[] aVersions, Properties aProperties) {
// create a temp file for the registry export
File tempFile;
try {
tempFile = File.createTempFile("jx_registry", null);
} catch (IOException e) {
// failed to create temp file. ABORT
return null;
}
Process proc;
try {
proc = Runtime.getRuntime().exec("regedit /e " + "\"" + tempFile.getPath()
+ "\" \"" + aRegKey + "\"");
proc.waitFor();
} catch (Exception e) {
// Failed to run regedit.exe. Length of temp file is zero, and that's
// handled next.
}
// If there is a key by that name in the registry, then the file length
// will not be zero.
File grePath = null;
if (tempFile.length() != 0) {
grePath = getGREPathFromRegistryFile(tempFile.getPath(),
aRegKey, aVersions, aProperties);
}
tempFile.delete();
return grePath;
}
/**
* @param aFileName
* @param aCharset
* @param aKeyName
* @param aVersions
* @param aProperties
* @return
*/
private static File getGREPathFromRegistryFile(String aFileName,
String aKeyName, GREVersionRange[] aVersions,
Properties aProperties) {
INIParser parser;
try {
parser = new INIParser(aFileName, Charset.forName("UTF-16"));
} catch (Exception e) {
// Problem reading from file. Bail out.
return null;
}
Iterator sectionsIter = parser.getSections();
while (sectionsIter.hasNext()) {
// get 'section' name, which will be a registry key name
String section = (String) sectionsIter.next();
// Skip over GRE key ("<root>\Software\mozilla.org\GRE")
int gre_len = aKeyName.length();
if (section.length() <= gre_len) {
continue;
}
// Get the GRE subkey; that is, everything after
// "<root>\Software\mozilla.org\GRE\"
String subkeyName = section.substring(gre_len + 1);
// We are only interested in _immediate_ subkeys. We want
// "<root>\Software\mozilla.org\GRE\<version>" but not
// "<root>\Software\mozilla.org\GRE\<version>\<moretext>".
if (subkeyName.indexOf('\\') != -1) {
continue;
}
// See if this registry key has a "Version" value, and if so, compare
// it to our desired versions.
String version = parser.getString(section, "\"Version\"");
if (version == null) {
continue;
}
// remove quotes around string
version = version.substring(1, version.length() - 1);
if (!checkVersion(version, aVersions)) {
continue;
}
// All properties must match, keeping in mind that the propery/value
// pairs returned by regedit.exe have quotes around them.
if (aProperties != null) {
boolean ok = true;
Enumeration e = aProperties.propertyNames();
while (ok && e.hasMoreElements()) {
String prop = (String) e.nextElement();
String greValue = parser.getString(section, "\"" + prop + "\"");
if (greValue == null) {
// No such property is set for this GRE. Go on to next GRE.
ok = false;
} else {
// See if the value of the property for the GRE matches
// the given value.
String value = aProperties.getProperty(prop);
if (!greValue.equals("\"" + value + "\"")) {
ok = false;
}
}
}
if (!ok) {
continue;
}
}
String pathStr = parser.getString(section, "\"GreHome\"");
if (pathStr != null) {
// remove quotes around string
pathStr = pathStr.substring(1, pathStr.length() - 1);
File grePath = new File(pathStr);
if (grePath.exists()) {
File xpcomLib = new File(grePath, "xpcom.dll");
if (xpcomLib.canRead()) {
// found a good GRE
return grePath;
}
}
}
}
return null;
}
/**
* @param aVersions
* @param aProperties
* @return
*/
private static File getGREPathUnix(GREVersionRange[] aVersions,
Properties aProperties) {
File grePath = null;
String env = System.getProperty("MOZ_GRE_CONF");
if (env != null) {
grePath = getPathFromConfigFile(env, aVersions, aProperties);
if (grePath != null) {
return grePath;
}
}
final String greUserConfFile = ".gre.config";
final String greUserConfDir = ".gre.d";
final String greConfPath = "/etc/gre.conf";
final String greConfDir = "/etc/gre.d";
env = System.getProperty("user.home");
if (env != null) {
// Look in ~/.gre.config
grePath = getPathFromConfigFile(env + File.separator + greUserConfFile,
aVersions, aProperties);
if (grePath != null) {
return grePath;
}
// Look in ~/.gre.d/*.conf
grePath = getPathFromConfigDir(env + File.separator + greUserConfDir,
aVersions, aProperties);
if (grePath != null) {
return grePath;
}
}
// Look for a global /etc/gre.conf file
grePath = getPathFromConfigFile(greConfPath, aVersions, aProperties);
if (grePath != null) {
return grePath;
}
// Look for a group of config files in /etc/gre.d/
grePath = getPathFromConfigDir(greConfDir, aVersions, aProperties);
return grePath;
}
/**
* @param aFileName
* @param aVersions
* @param aProperties
* @return
*/
private static File getPathFromConfigFile(String aFileName,
GREVersionRange[] aVersions, Properties aProperties) {
INIParser parser;
try {
parser = new INIParser(aFileName);
} catch (Exception e) {
// Problem reading from file. Bail out.
return null;
}
Iterator sectionsIter = parser.getSections();
while (sectionsIter.hasNext()) {
// get 'section' name, which will be a version string
String section = (String) sectionsIter.next();
// if this isn't one of the versions we are looking for, move
// on to next section
if (!checkVersion(section, aVersions)) {
continue;
}
// all properties must match
if (aProperties != null) {
boolean ok = true;
Enumeration e = aProperties.propertyNames();
while (ok && e.hasMoreElements()) {
String prop = (String) e.nextElement();
String greValue = parser.getString(section, prop);
if (greValue == null) {
// No such property is set for this GRE. Go on to next GRE.
ok = false;
} else {
// See if the value of the property for the GRE matches
// the given value.
if (!greValue.equals(aProperties.getProperty(prop))) {
ok = false;
}
}
}
if (!ok) {
continue;
}
}
String pathStr = parser.getString(section, "GRE_PATH");
if (pathStr != null) {
File grePath = new File(pathStr);
if (grePath.exists()) {
File xpcomLib = new File(grePath, "libxpcom.so");
if (xpcomLib.canRead()) {
// found a good GRE
return grePath;
}
}
}
}
return null;
}
/**
* @param aDirName
* @param aVersions
* @param aProperties
* @return
*/
private static File getPathFromConfigDir(String aDirName,
GREVersionRange[] aVersions, Properties aProperties) {
/*
* Open the directory provided and try to read any files in that
* directory that end with .conf. We look for an entry that might
* point to the GRE that we're interested in.
*/
File dir = new File(aDirName);
if (!dir.isDirectory()) {
return null;
}
File grePath = null;
File[] files = dir.listFiles();
for (int i = 0; i < files.length && grePath == null; i++) {
// only look for files that end in '.conf'
if (!files[i].getName().endsWith(".conf")) {
continue;
}
grePath = getPathFromConfigFile(files[i].getPath(), aVersions,
aProperties);
}
return grePath;
}
/**
* @param aVersionToCheck
* @param aVersions
* @return
*/
private static boolean checkVersion(String aVersionToCheck,
GREVersionRange[] aVersions) {
for (int i = 0; i < aVersions.length; i++) {
if (aVersions[i].check(aVersionToCheck)) {
return true;
}
}
return false;
}
/**
* Initialize the Mozilla object with the given XULRunner path. All
* subsequent Mozilla method invocations be done against the given XULRunner
* version.
*
* @param aLibXULDirectory path of XULRunner build to use
*
* @throws XPCOMInitializationException if failure occurred during
* initialization
*/
public void initialize(File aLibXULDirectory)
throws XPCOMInitializationException {
File jar = new File(aLibXULDirectory, JAVAXPCOM_JAR);
if (!jar.exists()) {
jar = new File(this.getClass().getProtectionDomain().getCodeSource().getLocation().getPath());
if (!jar.exists())
throw new XPCOMInitializationException("Could not find " + JAVAXPCOM_JAR +
" in " + aLibXULDirectory);
}
URL[] urls = new URL[1];
try {
urls[0] = jar.toURI().toURL();
} catch (MalformedURLException e) {
throw new XPCOMInitializationException(e);
}
ClassLoader loader = new URLClassLoader(urls,
this.getClass().getClassLoader());
try {
Class mozillaClass = Class.forName("org.mozilla.xpcom.internal.MozillaImpl",
true, loader);
mozilla = (IMozilla) mozillaClass.newInstance();
Class greClass = Class.forName("org.mozilla.xpcom.internal.GREImpl",
true, loader);
gre = (IGRE) greClass.newInstance();
Class xpcomClass = Class.forName("org.mozilla.xpcom.internal.XPCOMImpl",
true, loader);
xpcom = (IXPCOM) xpcomClass.newInstance();
Class javaXPCOMClass =
Class.forName("org.mozilla.xpcom.internal.JavaXPCOMMethods",
true, loader);
jxutils = (IJavaXPCOMUtils) javaXPCOMClass.newInstance();
} catch (Exception e) {
throw new XPCOMInitializationException("Could not load " +
"org.mozilla.xpcom.internal.* classes", e);
}
mozilla.initialize(aLibXULDirectory);
}
/**
* Initializes libXUL for embedding purposes.
* <p>
* NOTE: This function must be called from the "main" thread.
* <p>
* NOTE: At the present time, this function may only be called once in
* a given process. Use <code>termEmbedding</code> to clean up and free
* resources allocated by <code>initEmbedding</code>.
*
* @param aLibXULDirectory The directory in which the libXUL shared library
* was found.
* @param aAppDirectory The directory in which the application components
* and resources can be found. This will map to
* the "resource:app" directory service key.
* @param aAppDirProvider A directory provider for the application. This
* provider will be aggregated by a libXUL provider
* which will provide the base required GRE keys.
*
* @throws XPCOMException if a failure occurred during initialization
* @throws XPCOMInitializationException if Mozilla was not properly
* initialized
*/
public void initEmbedding(File aLibXULDirectory, File aAppDirectory,
IAppFileLocProvider aAppDirProvider) throws XPCOMException {
try {
gre.initEmbedding(aLibXULDirectory, aAppDirectory, aAppDirProvider);
} catch (NullPointerException e) {
throw new XPCOMInitializationException("Must call " +
"Mozilla.getInstance().initialize() before using this method", e);
}
}
/**
* Terminates libXUL embedding.
* <p>
* NOTE: Release any references to XPCOM objects that you may be holding
* before calling this function.
*
* @throws XPCOMInitializationException if Mozilla was not properly
* initialized
*/
public void termEmbedding() {
try {
gre.termEmbedding();
} catch (NullPointerException e) {
throw new XPCOMInitializationException("Must call " +
"Mozilla.getInstance().initialize() before using this method", e);
} finally {
mozilla = null;
gre = null;
xpcom = null;
}
}
/**
* Lock a profile directory using platform-specific semantics.
*
* @param aDirectory The profile directory to lock.
*
* @return A lock object. The directory will remain locked until the lock is
* released by invoking the <code>release</code> method, or by the
* termination of the JVM, whichever comes first.
*
* @throws XPCOMException if profile is already locked (with
* <code>errorcode</code> == <code>NS_ERROR_FILE_ACCESS_DENIED</code>);
* or if a failure occurred
* @throws XPCOMInitializationException if Mozilla was not properly
* initialized
*/
public ProfileLock lockProfileDirectory(File aDirectory)
throws XPCOMException {
try {
return gre.lockProfileDirectory(aDirectory);
} catch (NullPointerException e) {
throw new XPCOMInitializationException("Must call " +
"Mozilla.getInstance().initialize() before using this method", e);
}
}
/**
* Fire notifications to inform the toolkit about a new profile. This
* method should be called after <code>initEmbedding</code> if the
* embedder wishes to run with a profile.
* <p>
* Normally the embedder should call <code>lockProfileDirectory</code>
* to lock the directory before calling this method.
* <p>
* NOTE: There are two possibilities for selecting a profile:
* <ul>
* <li>
* Select the profile before calling <code>initEmbedding</code>.
* The aAppDirProvider object passed to <code>initEmbedding</code>
* should provide the NS_APP_USER_PROFILE_50_DIR key, and
* may also provide the following keys:
* <ul>
* <li>NS_APP_USER_PROFILE_LOCAL_50_DIR
* <li>NS_APP_PROFILE_DIR_STARTUP
* <li>NS_APP_PROFILE_LOCAL_DIR_STARTUP
* </ul>
* In this scenario <code>notifyProfile</code> should be called
* immediately after <code>initEmbedding</code>. Component
* registration information will be stored in the profile and
* JS components may be stored in the fastload cache.
* </li>
* <li>
* Select a profile some time after calling <code>initEmbedding</code>.
* In this case the embedder must install a directory service
* provider which provides NS_APP_USER_PROFILE_50_DIR and optionally
* NS_APP_USER_PROFILE_LOCAL_50_DIR. Component registration information
* will be stored in the application directory and JS components will not
* fastload.
* </li>
* </ul>
*
* @throws XPCOMInitializationException if Mozilla was not properly
* initialized
*/
public void notifyProfile() {
try {
gre.notifyProfile();
} catch (NullPointerException e) {
throw new XPCOMInitializationException("Must call " +
"Mozilla.getInstance().initialize() before using this method", e);
}
}
/**
* Initializes XPCOM. You must call this method before proceeding
* to use XPCOM.
*
* @param aMozBinDirectory The directory containing the component
* registry and runtime libraries;
* or use <code>null</code> to use the working
* directory.
*
* @param aAppFileLocProvider The object to be used by Gecko that specifies
* to Gecko where to find profiles, the component
* registry preferences and so on; or use
* <code>null</code> for the default behaviour.
*
* @return the service manager
*
* @throws XPCOMException <ul>
* <li> NS_ERROR_NOT_INITIALIZED - if static globals were not initialied,
* which can happen if XPCOM is reloaded, but did not completly
* shutdown. </li>
* <li> Other error codes indicate a failure during initialisation. </li>
* </ul>
* @throws XPCOMInitializationException if Mozilla was not properly
* initialized
*/
public nsIServiceManager initXPCOM(File aMozBinDirectory,
IAppFileLocProvider aAppFileLocProvider) throws XPCOMException {
try {
return xpcom.initXPCOM(aMozBinDirectory, aAppFileLocProvider);
} catch (NullPointerException e) {
throw new XPCOMInitializationException("Must call " +
"Mozilla.getInstance().initialize() before using this method", e);
}
}
/**
* Shutdown XPCOM. You must call this method after you are finished
* using xpcom.
*
* @param aServMgr The service manager which was returned by initXPCOM.
* This will release servMgr.
*
* @throws XPCOMException if a failure occurred during termination
* @throws XPCOMInitializationException if Mozilla was not properly
* initialized
*/
public void shutdownXPCOM(nsIServiceManager aServMgr) throws XPCOMException {
try {
xpcom.shutdownXPCOM(aServMgr);
} catch (NullPointerException e) {
throw new XPCOMInitializationException("Must call " +
"Mozilla.getInstance().initialize() before using this method", e);
} finally {
mozilla = null;
gre = null;
xpcom = null;
}
}
/**
* Public Method to access to the service manager.
*
* @return the service manager
*
* @throws XPCOMException if a failure occurred
* @throws XPCOMInitializationException if Mozilla was not properly
* initialized
*/
public nsIServiceManager getServiceManager() throws XPCOMException {
try {
return xpcom.getServiceManager();
} catch (NullPointerException e) {
throw new XPCOMInitializationException("Must call " +
"Mozilla.getInstance().initialize() before using this method", e);
}
}
/**
* Public Method to access to the component manager.
*
* @return the component manager
*
* @throws XPCOMException if a failure occurred
* @throws XPCOMInitializationException if Mozilla was not properly
* initialized
*/
public nsIComponentManager getComponentManager() throws XPCOMException {
try {
return xpcom.getComponentManager();
} catch (NullPointerException e) {
throw new XPCOMInitializationException("Must call " +
"Mozilla.getInstance().initialize() before using this method", e);
}
}
/**
* Public Method to access to the component registration manager.
*
* @return the component registration manager
*
* @throws XPCOMException if a failure occurred
* @throws XPCOMInitializationException if Mozilla was not properly
* initialized
*/
public nsIComponentRegistrar getComponentRegistrar() throws XPCOMException {
try {
return xpcom.getComponentRegistrar();
} catch (NullPointerException e) {
throw new XPCOMInitializationException("Must call " +
"Mozilla.getInstance().initialize() before using this method", e);
}
}
// #ifdef VBOX
public int waitForEvents(long tmo) throws XPCOMException {
try {
return xpcom.waitForEvents(tmo);
} catch (NullPointerException e) {
throw new XPCOMInitializationException("Must call " +
"Mozilla.getInstance().initialize() before using this method", e);
}
}
// #endif // VBOX
/**
* Public Method to create an instance of a nsILocalFile.
*
* @param aPath A string which specifies a full file path to a
* location. Relative paths will be treated as an
* error (NS_ERROR_FILE_UNRECOGNIZED_PATH).
* @param aFollowLinks This attribute will determine if the nsLocalFile will
* auto resolve symbolic links. By default, this value
* will be false on all non unix systems. On unix, this
* attribute is effectively a noop.
*
* @return an instance of an nsILocalFile that points to given path
*
* @throws XPCOMException <ul>
* <li> NS_ERROR_FILE_UNRECOGNIZED_PATH - raised for unrecognized paths
* or relative paths (must supply full file path) </li>
* </ul>
* @throws XPCOMInitializationException if Mozilla was not properly
* initialized
*/
public nsILocalFile newLocalFile(String aPath, boolean aFollowLinks)
throws XPCOMException {
try {
return xpcom.newLocalFile(aPath, aFollowLinks);
} catch (NullPointerException e) {
throw new XPCOMInitializationException("Must call " +
"Mozilla.getInstance().initialize() before using this method", e);
}
}
/**
* If you create a class that implements nsISupports, you will need to provide
* an implementation of the <code>queryInterface</code> method. This helper
* function provides a simple implementation. Therefore, if your class does
* not need to do anything special with <code>queryInterface</code>, your
* implementation would look like:
* <pre>
* public nsISupports queryInterface(String aIID) {
* return XPCOM.queryInterface(this, aIID);
* }
* </pre>
*
* @param aObject object to query
* @param aIID requested interface IID
*
* @return <code>aObject</code> if the given object supports that
* interface;
* <code>null</code> otherwise.
*/
public static nsISupports queryInterface(nsISupports aObject, String aIID) {
ArrayList<Class> classes = new ArrayList<Class>();
classes.add(aObject.getClass());
while (!classes.isEmpty()) {
Class clazz = classes.remove(0);
// Skip over any class/interface in the "java.*" and "javax.*" domains.
String className = clazz.getName();
if (className.startsWith("java.") || className.startsWith("javax.")) {
continue;
}
// If given IID matches that of the current interface, then we
// know that aObject implements the interface specified by the given IID.
if (clazz.isInterface() && className.startsWith("org.mozilla")) {
String iid = Mozilla.getInterfaceIID(clazz);
if (iid != null && aIID.equals(iid)) {
return aObject;
}
}
// clazz didn't match, so add the interfaces it implements
Class[] interfaces = clazz.getInterfaces();
for (int i = 0; i < interfaces.length; i++ ) {
classes.add(interfaces[i]);
}
// Also add its superclass
Class superclass = clazz.getSuperclass();
if (superclass != null) {
classes.add(superclass);
}
}
return null;
}
/**
* Gets the interface IID for a particular Java interface. This is similar
* to NS_GET_IID in the C++ Mozilla files.
*
* @param aInterface interface which has defined an IID
*
* @return IID for given interface
*/
public static String getInterfaceIID(Class aInterface) {
// Get short class name (i.e. "bar", not "org.blah.foo.bar")
StringBuffer iidName = new StringBuffer();
String fullClassName = aInterface.getName();
int index = fullClassName.lastIndexOf(".");
String className = index > 0 ? fullClassName.substring(index + 1)
: fullClassName;
// Create iid field name
if (className.startsWith("ns")) {
iidName.append("NS_");
iidName.append(className.substring(2).toUpperCase());
} else {
iidName.append(className.toUpperCase());
}
iidName.append("_IID");
String iid;
try {
Field iidField = aInterface.getDeclaredField(iidName.toString());
iid = (String) iidField.get(null);
} catch (NoSuchFieldException e) {
// Class may implement non-Mozilla interfaces, which would not have an
// IID method. In that case, just null.
iid = null;
} catch (IllegalAccessException e) {
// Not allowed to access that field for some reason. Write out an
// error message, but don't fail.
System.err.println("ERROR: Could not get field " + iidName.toString());
iid = null;
}
return iid;
}
public long getNativeHandleFromAWT(Object widget) {
try {
return mozilla.getNativeHandleFromAWT(widget);
} catch (NullPointerException e) {
throw new XPCOMInitializationException("Must call " +
"Mozilla.getInstance().initialize() before using this method", e);
}
}
public long wrapJavaObject(Object aJavaObject, String aIID) {
try {
return jxutils.wrapJavaObject(aJavaObject, aIID);
} catch (NullPointerException e) {
throw new XPCOMInitializationException("Must call " +
"Mozilla.getInstance().initialize() before using this method", e);
}
}
public Object wrapXPCOMObject(long aXPCOMObject, String aIID) {
try {
return jxutils.wrapXPCOMObject(aXPCOMObject, aIID);
} catch (NullPointerException e) {
throw new XPCOMInitializationException("Must call " +
"Mozilla.getInstance().initialize() before using this method", e);
}
}
}
| 14,451 |
365 | <gh_stars>100-1000
package io.eventuate.examples.tram.sagas.ordersandcustomers.apigateway.proxies;
public class UnknownProxyException extends RuntimeException{
public UnknownProxyException(String message) {
super(message);
}
}
| 74 |
1,172 | <reponame>michaelfig/caja<gh_stars>1000+
// Copyright (C) 2009 Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.caja.util;
import com.google.common.collect.Maps;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.Iterator;
import java.util.Map;
import junit.framework.TestCase;
@SuppressWarnings("static-method")
public class CollectionsTest extends TestCase {
public final void testListMultimaps() {
Multimap<String, String> m = Multimaps.newListHashMultimap();
assertTrue(m.isEmpty());
assertEquals("[]", m.keySet().toString());
assertEquals("[]", m.get("foo").toString());
m.put("foo", "bar");
assertEquals("[foo]", m.keySet().toString());
assertEquals("[bar]", m.get("foo").toString());
assertFalse(m.isEmpty());
m.put("foo", "bar");
assertEquals("[foo]", m.keySet().toString());
assertEquals("[bar, bar]", m.get("foo").toString());
assertFalse(m.isEmpty());
m.remove("foo", "bar");
assertEquals("[foo]", m.keySet().toString());
assertEquals("[bar]", m.get("foo").toString());
assertFalse(m.isEmpty());
m.remove("foo", "bar");
assertEquals("[]", m.keySet().toString());
assertEquals("[]", m.get("foo").toString());
assertTrue(m.isEmpty());
m.putAll("baz", Arrays.asList("boo", "far", "boo", "bob"));
assertEquals("[baz]", m.keySet().toString());
assertEquals("[boo, far, boo, bob]", m.get("baz").toString());
assertFalse(m.isEmpty());
m.removeAll("baz", Arrays.asList("bar", "boo", "far"));
assertEquals("[baz]", m.keySet().toString());
assertEquals("[bob]", m.get("baz").toString());
assertFalse(m.isEmpty());
m.removeAll("baz", Arrays.asList("bar", "bob"));
assertEquals("[]", m.keySet().toString());
assertEquals("[]", m.get("baz").toString());
assertTrue(m.isEmpty());
}
public final void testSetMultimaps() {
Multimap<String, String> m = Multimaps.newSetHashMultimap();
assertTrue(m.isEmpty());
assertEquals("[]", m.keySet().toString());
assertEquals("[]", m.get("foo").toString());
m.put("foo", "bar");
assertEquals("[foo]", m.keySet().toString());
assertEquals("[bar]", m.get("foo").toString());
assertFalse(m.isEmpty());
m.put("foo", "bar");
assertEquals("[foo]", m.keySet().toString());
assertEquals("[bar]", m.get("foo").toString());
assertFalse(m.isEmpty());
m.remove("foo", "bar");
assertEquals("[]", m.keySet().toString());
assertEquals("[]", m.get("foo").toString());
assertTrue(m.isEmpty());
m.putAll("baz", Arrays.asList("boo", "far", "boo", "bob"));
assertEquals("[baz]", m.keySet().toString());
assertEquals("[boo, far, bob]", m.get("baz").toString());
assertFalse(m.isEmpty());
m.removeAll("baz", Arrays.asList("bar", "boo", "far"));
assertEquals("[baz]", m.keySet().toString());
assertEquals("[bob]", m.get("baz").toString());
assertFalse(m.isEmpty());
m.removeAll("baz", Arrays.asList("bar", "bob"));
assertEquals("[]", m.keySet().toString());
assertEquals("[]", m.get("baz").toString());
assertTrue(m.isEmpty());
}
public final void testPetulantCollection() {
Multimap<String, String> m
= new Multimaps.MultimapImpl<String, String, Collection<String>>(
new Multimaps.Maker<Map<String, Collection<String>>>() {
public Map<String, Collection<String>> newInstance() {
return Maps.newHashMap();
}
},
new Multimaps.Maker<Collection<String>>() {
public Collection<String> newInstance() {
return new PetulantCollection();
}
});
assertTrue(m.isEmpty());
assertEquals("[]", m.keySet().toString());
assertEquals("[]", m.get("foo").toString());
m.put("foo", "bar");
assertEquals("[]", m.keySet().toString());
assertEquals("[]", m.get("foo").toString());
assertTrue(m.isEmpty());
m.put("foo", "bar");
assertEquals("[]", m.keySet().toString());
assertEquals("[]", m.get("foo").toString());
assertTrue(m.isEmpty());
m.remove("foo", "bar");
assertEquals("[]", m.keySet().toString());
assertEquals("[]", m.get("foo").toString());
assertTrue(m.isEmpty());
m.remove("foo", "bar");
assertEquals("[]", m.keySet().toString());
assertEquals("[]", m.get("foo").toString());
assertTrue(m.isEmpty());
m.putAll("baz", Arrays.asList("boo", "far"));
assertEquals("[]", m.keySet().toString());
assertEquals("[]", m.get("foo").toString());
assertTrue(m.isEmpty());
m.removeAll("baz", Arrays.asList("bar", "boo", "far"));
assertEquals("[]", m.keySet().toString());
assertEquals("[]", m.get("foo").toString());
assertTrue(m.isEmpty());
}
public final void testClone() {
Multimap<String, String> m = Multimaps.newListLinkedHashMultimap();
m.put("a", "A");
m.put("b", "B");
m.put("b", "BEE");
m.put("d", "D");
Multimap<String, String> m2 = m.clone();
m2.put("a", "AYE");
m2.put("c", "C");
m2.remove("b", "BEE");
assertEquals("[A]", m.get("a").toString());
assertEquals("[B, BEE]", m.get("b").toString());
assertEquals("[]", m.get("c").toString());
assertEquals("[D]", m.get("d").toString());
assertEquals("[A, AYE]", m2.get("a").toString());
assertEquals("[B]", m2.get("b").toString());
assertEquals("[C]", m2.get("c").toString());
assertEquals("[D]", m2.get("d").toString());
}
private static class PetulantCollection implements Collection<String> {
public boolean add(String e) { return false; }
public boolean addAll(Collection<? extends String> c) { return false; }
public void clear() { /* noop */ }
public boolean contains(Object o) { return false; }
public boolean containsAll(Collection<?> c) { return false; }
public boolean isEmpty() { return true; }
public Iterator<String> iterator() {
return Collections.<String>emptyList().iterator();
}
public boolean remove(Object o) { return false; }
public boolean removeAll(Collection<?> c) { return false; }
public boolean retainAll(Collection<?> c) { return false; }
public int size() { return 0; }
public Object[] toArray() { return new Object[0]; }
public <T> T[] toArray(T[] a) { return a; }
}
}
| 2,716 |
903 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.lucene.facet.range;
import java.util.Arrays;
import java.util.Comparator;
/**
* Counter for numeric ranges. Works for both single- and multi-valued cases (assuming you use it
* correctly).
*
* <p>Usage notes: When counting a document field that only has a single value, callers should call
* addSingleValued() with the value. Whenever a document field has multiple values, callers should
* call startMultiValuedDoc() at the beginning of processing the document, followed by
* addMultiValued() with each value before finally calling endMultiValuedDoc() at the end of
* processing the document. The call to endMultiValuedDoc() will respond with a boolean indicating
* whether-or-not the specific document matched against at least one of the ranges being counted.
* Finally, after processing all documents, the caller should call finish(). This final call will
* ensure the contents of the user-provided {@code countBuffer} contains accurate counts (each index
* corresponding to the provided {@code LongRange} in {@code ranges}). The final call to finish()
* will also report how many additional documents did not match against any ranges. The combination
* of the endMultiValuedDoc() boolean responses and the number reported by finish() communicates the
* total number of missing documents. Note that the call to finish() will not report any documents
* already reported missing by endMultiValuedDoc().
*/
abstract class LongRangeCounter {
/** accumulated counts for all of the ranges */
private final int[] countBuffer;
/**
* for multi-value docs, we keep track of the last elementary interval we've counted so we can use
* that as a lower-bound when counting subsequent values. this takes advantage of the fact that
* values within a given doc are sorted.
*/
protected int multiValuedDocLastSeenElementaryInterval;
static LongRangeCounter create(LongRange[] ranges, int[] countBuffer) {
if (hasOverlappingRanges(ranges)) {
return new OverlappingLongRangeCounter(ranges, countBuffer);
} else {
return new ExclusiveLongRangeCounter(ranges, countBuffer);
}
}
protected LongRangeCounter(int[] countBuffer) {
// We'll populate the user-provided count buffer with range counts:
this.countBuffer = countBuffer;
}
/** Start processing a new doc. It's unnecessary to call this for single-value cases. */
void startMultiValuedDoc() {
multiValuedDocLastSeenElementaryInterval = -1;
}
/**
* Finish processing a new doc. Returns whether-or-not the document contributed a count to at
* least one range. It's unnecessary to call this for single-value cases.
*/
abstract boolean endMultiValuedDoc();
/** Count a single valued doc */
void addSingleValued(long v) {
// NOTE: this works too, but it's ~6% slower on a simple
// test with a high-freq TermQuery w/ range faceting on
// wikimediumall:
/*
int index = Arrays.binarySearch(boundaries, v);
if (index < 0) {
index = -index-1;
}
leafCounts[index]++;
*/
// Binary search to find matched elementary range; we
// are guaranteed to find a match because the last
// boundary is Long.MAX_VALUE:
long[] boundaries = boundaries();
int lo = 0;
int hi = boundaries.length - 1;
while (true) {
int mid = (lo + hi) >>> 1;
if (v <= boundaries[mid]) {
if (mid == 0) {
processSingleValuedHit(mid);
return;
} else {
hi = mid - 1;
}
} else if (v > boundaries[mid + 1]) {
lo = mid + 1;
} else {
processSingleValuedHit(mid + 1);
return;
}
}
}
/** Count a multi-valued doc value */
void addMultiValued(long v) {
if (rangeCount() == 0) {
return; // don't bother if there aren't any requested ranges
}
long[] boundaries = boundaries();
// First check if we've "advanced" beyond the last elementary interval we counted for this doc.
// If we haven't, there's no sense doing anything else:
if (multiValuedDocLastSeenElementaryInterval != -1
&& v <= boundaries[multiValuedDocLastSeenElementaryInterval]) {
return;
}
// Also check if we've already counted the last elementary interval. If so, there's nothing
// else to count for this doc:
final int nextCandidateElementaryInterval = multiValuedDocLastSeenElementaryInterval + 1;
if (nextCandidateElementaryInterval == boundaries.length) {
return;
}
// Binary search in the range of the next candidate interval up to the last interval:
int lo = nextCandidateElementaryInterval;
int hi = boundaries.length - 1;
while (true) {
int mid = (lo + hi) >>> 1;
if (v <= boundaries[mid]) {
if (mid == nextCandidateElementaryInterval) {
processMultiValuedHit(mid);
multiValuedDocLastSeenElementaryInterval = mid;
return;
} else {
hi = mid - 1;
}
} else if (v > boundaries[mid + 1]) {
lo = mid + 1;
} else {
int idx = mid + 1;
processMultiValuedHit(idx);
multiValuedDocLastSeenElementaryInterval = idx;
return;
}
}
}
/**
* Finish processing all documents. This will return the number of docs that didn't contribute to
* any ranges (that weren't already reported when calling endMultiValuedDoc()).
*/
abstract int finish();
/** Provide boundary information for elementary intervals (max inclusive value per interval) */
protected abstract long[] boundaries();
/** Process a single-value "hit" against an elementary interval. */
protected abstract void processSingleValuedHit(int elementaryIntervalNum);
/** Process a multi-value "hit" against an elementary interval. */
protected abstract void processMultiValuedHit(int elementaryIntervalNum);
/** Increment the specified range by one. */
protected final void increment(int rangeNum) {
countBuffer[rangeNum]++;
}
/** Increment the specified range by the specified count. */
protected final void increment(int rangeNum, int count) {
countBuffer[rangeNum] += count;
}
/** Number of ranges requested by the caller. */
protected final int rangeCount() {
return countBuffer.length;
}
/** Determine whether-or-not any requested ranges overlap */
private static boolean hasOverlappingRanges(LongRange[] ranges) {
if (ranges.length == 0) {
return false;
}
// Copy before sorting so we don't mess with the caller's original ranges:
LongRange[] sortedRanges = new LongRange[ranges.length];
System.arraycopy(ranges, 0, sortedRanges, 0, ranges.length);
Arrays.sort(sortedRanges, Comparator.comparingLong(r -> r.min));
long previousMax = sortedRanges[0].max;
for (int i = 1; i < sortedRanges.length; i++) {
// Ranges overlap if the next min is <= the previous max (note that LongRange models
// closed ranges, so equal limit points are considered overlapping):
if (sortedRanges[i].min <= previousMax) {
return true;
}
previousMax = sortedRanges[i].max;
}
return false;
}
protected static final class InclusiveRange {
final long start;
final long end;
InclusiveRange(long start, long end) {
assert end >= start;
this.start = start;
this.end = end;
}
@Override
public String toString() {
return start + " to " + end;
}
}
}
| 2,590 |
435 | <gh_stars>100-1000
{
"copyright_text": "Standard YouTube License",
"description": "http://pycon.lt/2017.html",
"duration": 544,
"language": "eng",
"recorded": "2017-05-13T14:15:00+03:00",
"related_urls": [
{
"label": "schedule",
"url": "http://pycon.lt/2017.html"
}
],
"speakers": [
"<NAME>\u0161ka"
],
"tags": [],
"thumbnail_url": "https://i.ytimg.com/vi/sed4LdI8qC4/maxresdefault.jpg",
"title": "Run Your Pyramid Application on Production Server",
"videos": [
{
"type": "youtube",
"url": "https://www.youtube.com/watch?v=sed4LdI8qC4"
}
]
}
| 280 |
1,338 | <reponame>Yn0ga/haiku
/*
* Copyright 2000, <NAME>. All rights reserved.
* Distributed under the terms of the MIT License.
*/
//Useful until be gets around to making these sorts of things
//globals akin to be_plain_font, etc.
#ifndef _COLORS_H_
#define _COLORS_H_
#include <GraphicsDefs.h>
// ui_color(B_PANEL_BACKGROUND_COLOR)
// ui_color(B_MENU_BACKGROUND_COLOR)
// ui_color(B_WINDOW_TAB_COLOR)
// ui_color(B_KEYBOARD_NAVIGATION_COLOR)
// ui_color(B_DESKTOP_COLOR)
// tint_color(ui_color(B_PANEL_BACKGROUND_COLOR), B_DARKEN_1_TINT)
//Other colors
const rgb_color kBlack = {0, 0, 0, 255};
const rgb_color kWhite = {255,255,255, 255};
const rgb_color kRed = {255,0, 0, 255};
const rgb_color kGreen = {0, 203,0, 255};
const rgb_color kLightGreen = {90, 240,90, 255};
const rgb_color kBlue = {49, 61, 225, 255};
const rgb_color kLightBlue = {64, 162,255, 255};
const rgb_color kPurple = {144,64, 221, 255};
const rgb_color kLightPurple = {166,74, 255, 255};
const rgb_color kLavender = {193,122,255, 255};
const rgb_color kYellow = {255,203,0, 255};
const rgb_color kOrange = {255,163,0, 255};
const rgb_color kFlesh = {255,231,186, 255};
const rgb_color kTan = {208,182,121, 255};
const rgb_color kBrown = {154,110,45, 255};
const rgb_color kLightMetallicBlue = {143,166,240, 255};
const rgb_color kMedMetallicBlue = {75, 96, 154, 255};
const rgb_color kDarkMetallicBlue = {78, 89, 126, 255};
const rgb_color kGebHighlight = {152, 152, 203, 255};
const rgb_color kBordeaux = {80, 0, 0, 255};
#endif // _COLORS_H_
| 726 |
453 | <filename>kernel/kernel/multitasking/tasks/record.h<gh_stars>100-1000
#ifndef SCHED_RECORD_H
#define SCHED_RECORD_H
#include "task.h"
#include <stdint.h>
#define MAX_TASK_HISTORY 256
#define MAX_PROC_NAME 64
typedef struct task_history {
char history[MAX_TASK_HISTORY][MAX_PROC_NAME];
int vals[MAX_TASK_HISTORY];
int count;
uint32_t time;
} task_history_t;
void sched_record_usage(task_t* current_task, uint32_t runtime);
void sched_log_history();
task_history_t* sched_get_task_history();
#endif
| 207 |
345 | <gh_stars>100-1000
import unittest
import programytest.externals as Externals
from programy.bot import Bot
from programy.config.bot.bot import BotConfiguration
from programy.nlp.translate.extension import TranslateExtension
from programytest.client import TestClient
class TranslateExtensionTests(unittest.TestCase):
def setUp(self):
self._client = TestClient()
config = BotConfiguration()
config._from_translator._classname = "programy.nlp.translate.textblob_translator.TextBlobTranslator"
self.client_context = self._client.create_client_context("testuser")
self.client_context._bot = Bot(config=config, client=self._client)
self.client_context._bot.initiate_translator()
def test_invalid_command(self):
extension = TranslateExtension()
self.assertIsNotNone(extension)
result = extension.execute(self.client_context, "XXX")
self.assertIsNotNone(result)
self.assertEqual("TRANSLATE INVALID COMMAND", result)
result = extension.execute(self.client_context, "TRANSLATE")
self.assertIsNotNone(result)
self.assertEqual("TRANSLATE INVALID COMMAND", result)
result = extension.execute(self.client_context, "TRANSLATE FROM")
self.assertIsNotNone(result)
self.assertEqual("TRANSLATE INVALID COMMAND", result)
result = extension.execute(self.client_context, "TRANSLATE FROM EN")
self.assertIsNotNone(result)
self.assertEqual("TRANSLATE INVALID COMMAND", result)
result = extension.execute(self.client_context, "TRANSLATE FROM EN TO")
self.assertIsNotNone(result)
self.assertEqual("TRANSLATE INVALID COMMAND", result)
result = extension.execute(self.client_context, "TRANSLATE FROM EN TO FR")
self.assertIsNotNone(result)
self.assertEqual("TRANSLATE INVALID COMMAND", result)
result = extension.execute(self.client_context, "OTHER FROM EN TO FR HELLO I LOVE YOU")
self.assertIsNotNone(result)
self.assertEqual("TRANSLATE INVALID COMMAND", result)
result = extension.execute(self.client_context, "TRANSLATE OTHER EN TO FR HELLO I LOVE YOU")
self.assertIsNotNone(result)
self.assertEqual("TRANSLATE INVALID COMMAND", result)
result = extension.execute(self.client_context, "TRANSLATE FROM EN OTHER FR HELLO I LOVE YOU")
self.assertIsNotNone(result)
self.assertEqual("TRANSLATE INVALID COMMAND", result)
def test_enabled(self):
extension = TranslateExtension()
self.assertIsNotNone(extension)
result = extension.execute(self.client_context, "TRANSLATE ENABLED")
self.assertEquals("TRANSLATE ENABLED", result)
self.client_context.bot._from_translator = None
result = extension.execute(self.client_context, "TRANSLATE ENABLED")
self.assertEquals("TRANSLATE DISABLED", result)
@unittest.skipIf(Externals.google_translate is False or Externals.all_externals is False, Externals.google_translate_disabled)
def test_translate(self):
extension = TranslateExtension()
self.assertIsNotNone(extension)
result = extension.execute(self.client_context, "TRANSLATE ENABLED")
self.assertIsNotNone(result)
self.assertEqual("TRANSLATE ENABLED", result)
result = extension.execute(self.client_context, "TRANSLATE FROM EN TO FR HELLO I LOVE YOU")
self.assertIsNotNone(result)
self.assertEqual("TRANSLATED SALUT JE T'AIME", result)
def test_translate_disabled(self):
extension = TranslateExtension()
self.assertIsNotNone(extension)
self.client_context.bot._from_translator = None
result = extension.execute(self.client_context, "TRANSLATE FROM EN TO FR HELLO I LOVE YOU")
self.assertIsNotNone(result)
self.assertEqual("TRANSLATE DISABLED", result)
| 1,514 |
350 | <reponame>qweraqq/apm-agent-python<filename>tests/client/span_compression_tests.py
# BSD 3-Clause License
#
# Copyright (c) 2021, Elasticsearch BV
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import pytest
import elasticapm
from elasticapm.conf.constants import SPAN, TRANSACTION
@pytest.mark.parametrize(
"elasticapm_client",
[
{
"span_compression_enabled": True,
"span_compression_same_kind_max_duration": "5ms",
"span_compression_exact_match_max_duration": "5ms",
}
],
indirect=True,
)
def test_exact_match(elasticapm_client):
transaction = elasticapm_client.begin_transaction("test")
with elasticapm.capture_span(
"test",
span_type="a",
span_subtype="b",
span_action="c",
leaf=True,
duration=2,
extra={"destination": {"service": {"resource": "x"}}},
) as span1:
assert span1.is_compression_eligible()
with elasticapm.capture_span(
"test",
span_type="a",
span_subtype="b",
span_action="c",
leaf=True,
duration=3,
extra={"destination": {"service": {"resource": "x"}}},
) as span2:
assert span2.is_compression_eligible()
assert span1.is_exact_match(span2)
elasticapm_client.end_transaction("test")
spans = elasticapm_client.events[SPAN]
assert len(spans) == 1
span = spans[0]
assert "composite" in span
assert span["composite"]["count"] == 2
assert span["composite"]["sum"] == 5000
assert span["composite"]["compression_strategy"] == "exact_match"
@pytest.mark.parametrize(
"elasticapm_client",
[
{
"span_compression_enabled": True,
"span_compression_same_kind_max_duration": "5ms",
"span_compression_exact_match_max_duration": "5ms",
}
],
indirect=True,
)
def test_same_kind(elasticapm_client):
transaction = elasticapm_client.begin_transaction("test")
with elasticapm.capture_span(
"test1",
span_type="a",
span_subtype="b",
span_action="c",
leaf=True,
duration=2,
extra={"destination": {"service": {"resource": "x"}}},
) as span1:
assert span1.is_compression_eligible()
with elasticapm.capture_span(
"test2",
span_type="a",
span_subtype="b",
span_action="c",
leaf=True,
duration=3,
extra={"destination": {"service": {"resource": "x"}}},
) as span2:
assert span2.is_compression_eligible()
assert not span1.is_exact_match(span2)
assert span1.is_same_kind(span2)
elasticapm_client.end_transaction("test")
spans = elasticapm_client.events[SPAN]
assert len(spans) == 1
span = spans[0]
assert span["name"] == "Calls to x"
assert "composite" in span
assert span["composite"]["count"] == 2
assert span["composite"]["sum"] == 5000
assert span["composite"]["compression_strategy"] == "same_kind"
@pytest.mark.parametrize(
"elasticapm_client",
[
{
"span_compression_enabled": True,
"span_compression_same_kind_max_duration": "5ms",
"span_compression_exact_match_max_duration": "5ms",
}
],
indirect=True,
)
def test_exact_match_after_same_kind(elasticapm_client):
# if a span that is an exact match is attempted to be compressed with a same_kind composite, it stays same_kind
transaction = elasticapm_client.begin_transaction("test")
with elasticapm.capture_span(
"test1",
span_type="a",
span_subtype="b",
span_action="c",
leaf=True,
duration=2,
extra={"destination": {"service": {"resource": "x"}}},
) as span1:
assert span1.is_compression_eligible()
with elasticapm.capture_span(
"test2",
span_type="a",
span_subtype="b",
span_action="c",
leaf=True,
duration=3,
extra={"destination": {"service": {"resource": "x"}}},
) as span2:
assert span2.is_compression_eligible()
assert not span1.is_exact_match(span2)
assert span1.is_same_kind(span2)
with elasticapm.capture_span(
"test1",
span_type="a",
span_subtype="b",
span_action="c",
leaf=True,
duration=2,
extra={"destination": {"service": {"resource": "x"}}},
) as span3:
assert span3.is_compression_eligible()
elasticapm_client.end_transaction("test")
spans = elasticapm_client.events[SPAN]
assert len(spans) == 1
span = spans[0]
assert span["composite"]["compression_strategy"] == "same_kind"
assert span["composite"]["count"] == 3
@pytest.mark.parametrize(
"elasticapm_client",
[
{
"span_compression_enabled": True,
"span_compression_same_kind_max_duration": "5ms",
"span_compression_exact_match_max_duration": "5ms",
}
],
indirect=True,
)
def test_nested_spans(elasticapm_client):
transaction = elasticapm_client.begin_transaction("test")
with elasticapm.capture_span("test", "x.y.z") as span1:
with elasticapm.capture_span(
"test1",
span_type="a",
span_subtype="b",
span_action="c",
leaf=True,
duration=2,
extra={"destination": {"service": {"resource": "x"}}},
) as span2:
pass
with elasticapm.capture_span(
"test2",
span_type="a",
span_subtype="b",
span_action="c",
leaf=True,
duration=2,
extra={"destination": {"service": {"resource": "x"}}},
) as span3:
pass
assert span1.compression_buffer is span2
assert span2.composite
# assert transaction.compression_buffer is span1
# assert not span1.compression_buffer
elasticapm_client.end_transaction("test")
spans = elasticapm_client.events[SPAN]
assert len(spans) == 2
@pytest.mark.parametrize(
"elasticapm_client",
[
{
"span_compression_enabled": True,
"span_compression_same_kind_max_duration": "5ms",
"span_compression_exact_match_max_duration": "5ms",
}
],
indirect=True,
)
def test_buffer_is_reported_if_next_child_ineligible(elasticapm_client):
transaction = elasticapm_client.begin_transaction("test")
with elasticapm.capture_span("test", "x.y.z") as span1:
with elasticapm.capture_span(
"test",
"x.y.z",
leaf=True,
duration=2,
extra={"destination": {"service": {"resource": "x"}}},
) as span2:
pass
assert span1.compression_buffer is span2
with elasticapm.capture_span("test", "x.y.z") as span3:
pass
assert span1.compression_buffer is None
elasticapm_client.end_transaction("test")
spans = elasticapm_client.events[SPAN]
assert len(spans) == 3
@pytest.mark.parametrize(
"elasticapm_client",
[
{
"span_compression_enabled": True,
"span_compression_same_kind_max_duration": "5ms",
"span_compression_exact_match_max_duration": "5ms",
}
],
indirect=True,
)
def test_compressed_spans_not_counted(elasticapm_client):
elasticapm_client.begin_transaction("test")
with elasticapm.capture_span(
"test1",
span_type="a",
span_subtype="b",
span_action="c",
leaf=True,
duration=2,
extra={"destination": {"service": {"resource": "x"}}},
) as span1:
pass
with elasticapm.capture_span(
"test2",
span_type="a",
span_subtype="b",
span_action="c",
leaf=True,
duration=3,
extra={"destination": {"service": {"resource": "x"}}},
) as span2:
pass
elasticapm_client.end_transaction("test")
transaction = elasticapm_client.events[TRANSACTION][0]
spans = elasticapm_client.events[SPAN]
assert len(spans) == transaction["span_count"]["started"] == 1
assert transaction["span_count"]["dropped"] == 0
@pytest.mark.parametrize(
"elasticapm_client",
[
{
"span_compression_enabled": False,
"span_compression_same_kind_max_duration": "5ms",
"span_compression_exact_match_max_duration": "5ms",
}
],
indirect=True,
)
def test_span_compression_disabled(elasticapm_client):
transaction = elasticapm_client.begin_transaction("test")
with elasticapm.capture_span(
"test",
span_type="a",
span_subtype="b",
span_action="c",
leaf=True,
duration=2,
extra={"destination": {"service": {"resource": "x"}}},
) as span1:
assert not span1.is_compression_eligible()
with elasticapm.capture_span(
"test",
span_type="a",
span_subtype="b",
span_action="c",
leaf=True,
duration=3,
extra={"destination": {"service": {"resource": "x"}}},
) as span2:
assert not span2.is_compression_eligible()
elasticapm_client.end_transaction("test")
spans = elasticapm_client.events[SPAN]
assert len(spans) == 2
span = spans[0]
assert "composite" not in span
| 4,844 |
829 | namespace global_configuration {
const int INT_BIT = 64;
} | 18 |
1,006 | /****************************************************************************
* arch/hc/src/m9s12/m9s12_gpioirq.c
*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership. The
* ASF licenses this file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance with the
* License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
****************************************************************************/
/****************************************************************************
* Included Files
****************************************************************************/
#include <nuttx/config.h>
#include <stdint.h>
#include <errno.h>
#include <nuttx/arch.h>
#include <nuttx/irq.h>
#include "up_arch.h"
#include "m9s12.h"
#include "m9s12_pim.h"
#include "m9s12_mebi.h"
/****************************************************************************
* Pre-processor Definitions
****************************************************************************/
/****************************************************************************
* Public Data
****************************************************************************/
/****************************************************************************
* Private Data
****************************************************************************/
/****************************************************************************
* Private Functions
****************************************************************************/
/****************************************************************************
* Name: hcs12_gpioirqinitialize
*
* Description:
* Map an IRQ number to a port address and a bit number.
*
****************************************************************************/
#ifdef CONFIG_HCS12_GPIOIRQ
static int hcs12_mapirq(int irq, uint16_t *regaddr, uint8_t *pin)
{
if (irq >= HCC12_IRQ_PGFIRST)
{
/* Port G: Pins 0-7 */
#ifdef CONFIG_HCS12_PORTG_INTS
if (irq < HCC12_IRQ_PHFIRST)
{
*regaddr = HCS12_PIM_PORTG_IE;
*pin = irq - HCC12_IRQ_PGFIRST;
return OK;
}
#endif
/* Port H: Pins 0-6 */
#ifdef CONFIG_HCS12_PORTH_INTS
if (irq < HCC12_IRQ_PJFIRST)
{
*regaddr = HCS12_PIM_PORTH_IE;
*pin = irq - HCC12_IRQ_PHFIRST;
return OK;
}
#endif
/* Port J: Pins 0-3 and 6-7 */
#ifdef CONFIG_HCS12_PORTJ_INTS
if (irq < HCC12_IRQ_PJFIRST)
{
uint8_t pjpin = irq - HCC12_IRQ_PJFIRST;
if (irq >= HCS12_IRQ_PJ6)
{
pjpin += 2;
}
*regaddr = HCS12_PIM_PORTJ_IE;
*pin = pjpin;
return OK;
}
#endif
}
return -EINVAL;
}
#endif /* CONFIG_HCS12_GPIOIRQ */
/****************************************************************************
* Name: up_gpioa/b/cinterrupt
*
* Description:
* Receive GPIOA/B/C interrupts
*
****************************************************************************/
#ifdef CONFIG_HCS12_GPIOIRQ
static int hcs12_interrupt(uint16_t base,
int irq0, uint8_t valid, void *context)
{
uint8_t pending;
uint8_t bit;
int irq;
/* Get the set of enabled (unmasked) interrupts pending on this port */
pending = getreg8(base + HCS12_PIM_IF_OFFSET) &&
getreg8(base + HCS12_PIM_IE_OFFSET);
/* Then check each bit in the set of interrupts */
for (bit = 1, irq = irq0; pending != 0; bit <<= 1)
{
/* We may need to skip over some bits in the interrupt register
* (without incrementing the irq value.
*/
if ((valid & bit) != 0)
{
/* This is a real interrupt bit -- Check if an unmasked interrupt
* is pending.
*/
if ((pending & bit) != 0)
{
/* Yes.. clear the pending interrupt by writing '1' to the
* flags registers.
*/
putreg8(bit, base + HCS12_PIM_IF_OFFSET);
/* Re-deliver the IRQ
* (recurses! We got here from irq_dispatch!)
*/
irq_dispatch(irq, context);
/* Remove this from the set of pending interrupts */
pending &= ~bit;
}
/* Bump up the IRQ number for the next pass through the loop */
irq++;
}
}
return OK;
}
#ifdef CONFIG_HCS12_PORTG_INTS
static int hcs12_pginterrupt(int irq, void *context, FAR void *arg)
{
return hcs12_interrupt(HCS12_PIM_PORTG_BASE, HCS12_IRQ_PG0,
HCS12_IRQ_PGSET, context);
}
#endif
#ifdef CONFIG_HCS12_PORTH_INTS
static int hcs12_phinterrupt(int irq, void *context, FAR void *arg)
{
return hcs12_interrupt(HCS12_PIM_PORTH_BASE, HCS12_IRQ_PH0,
HCS12_IRQ_PHSET, context);
}
#endif
#ifdef CONFIG_HCS12_PORTJ_INTS
static int hcs12_pjinterrupt(int irq, void *context, FAR void *arg)
{
return hcs12_interrupt(HCS12_PIM_PORTJ_BASE, HCS12_IRQ_PJ0,
HCS12_IRQ_PJSET, context);
}
#endif
#endif /* CONFIG_HCS12_GPIOIRQ */
/****************************************************************************
* Public Functions
****************************************************************************/
/****************************************************************************
* Name: hcs12_gpioirqinitialize
*
* Description:
* Initialize logic to support a second level of interrupt decoding for
* GPIO pins.
*
****************************************************************************/
void hcs12_gpioirqinitialize(void)
{
/* Disable all GPIO IRQs -- Ports G, H, and J */
putreg8(0, HCS12_PIM_PORTG_IE);
putreg8(0, HCS12_PIM_PORTH_IE);
putreg8(0, HCS12_PIM_PORTJ_IE);
/* Attach GPIO IRQ interrupt handlers */
#ifdef CONFIG_HCS12_GPIOIRQ
# ifdef CONFIG_HCS12_PORTG_INTS
irq_attach(HCS12_IRQ_VPORTG, hcs12_pginterrupt, NULL);
# endif
# ifdef CONFIG_HCS12_PORTH_INTS
irq_attach(HCS12_IRQ_VPORTH, hcs12_phinterrupt, NULL);
# endif
# ifdef CONFIG_HCS12_PORTJ_INTS
irq_attach(HCS12_IRQ_VPORTJ, hcs12_pjinterrupt, NULL);
# endif
#endif /* CONFIG_HCS12_GPIOIRQ */
}
/****************************************************************************
* Name: hcs12_gpioirqenable
*
* Description:
* Enable the interrupt for specified GPIO IRQ
*
****************************************************************************/
#ifdef CONFIG_HCS12_GPIOIRQ
void hcs12_gpioirqenable(int irq)
{
uint16_t regaddr;
uint8_t pin;
if (hcs12_mapirq(irq, ®addr, &pin) == OK)
{
irqstate_t flags = enter_critical_section();
uint8_t regval = getreg8(regaddr);
regval |= (1 << pin);
putreg8(regval, regaddr);
leave_critical_section(flags);
}
}
#endif /* CONFIG_HCS12_GPIOIRQ */
/****************************************************************************
* Name: hcs12_gpioirqdisable
*
* Description:
* Disable the interrupt for specified GPIO IRQ
*
****************************************************************************/
#ifdef CONFIG_HCS12_GPIOIRQ
void hcs12_gpioirqdisable(int irq)
{
uint16_t regaddr;
uint8_t pin;
if (hcs12_mapirq(irq, ®addr, &pin) == OK)
{
irqstate_t flags = enter_critical_section();
uint8_t regval = getreg8(regaddr);
regval &= ~(1 << pin);
putreg8(regval, regaddr);
leave_critical_section(flags);
}
}
#endif /* CONFIG_HCS12_GPIOIRQ */
| 3,040 |
3,212 | <filename>nifi-commons/nifi-record-path/src/main/java/org/apache/nifi/record/path/util/FieldValueLogicalPathBuilder.java
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.nifi.record.path.util;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Objects;
import org.apache.nifi.record.path.ArrayIndexFieldValue;
import org.apache.nifi.record.path.FieldValue;
import org.apache.nifi.record.path.MapEntryFieldValue;
public class FieldValueLogicalPathBuilder {
private static final CharSequence DEFAULT_DELIMITER = "/";
private static final CharSequence DEFAULT_KEY_INDEX_WRAPPER_LEFT = "[";
private static final CharSequence DEFAULT_KEY_INDEX_WRAPPER_RIGHT = "]";
private final CharSequence pathDelimiter;
private final CharSequence keyLeft;
private final CharSequence keyRight;
private final CharSequence indexLeft;
private final CharSequence indexRight;
public static class Builder {
private CharSequence pathDelimiter = DEFAULT_DELIMITER;
private CharSequence keyLeft = DEFAULT_KEY_INDEX_WRAPPER_LEFT;
private CharSequence keyRight = DEFAULT_KEY_INDEX_WRAPPER_RIGHT;
private CharSequence indexLeft = DEFAULT_KEY_INDEX_WRAPPER_LEFT;
private CharSequence indexRight = DEFAULT_KEY_INDEX_WRAPPER_RIGHT;
public Builder() {
}
public Builder withPathDelimiter(CharSequence delimiter) {
Objects.requireNonNull(delimiter, "delimiter cannot be null");
this.pathDelimiter = delimiter;
return this;
}
public Builder withMapKeyWrapperLeft(CharSequence left) {
Objects.requireNonNull(left, "left cannot be null");
this.keyLeft = left;
return this;
}
public Builder withMapKeyWrapperRight(CharSequence right) {
Objects.requireNonNull(right, "right cannot be null");
this.keyRight = right;
return this;
}
public Builder withArrayIndexWrapperLeft(CharSequence left) {
Objects.requireNonNull(left, "left cannot be null");
this.indexLeft = left;
return this;
}
public Builder withArrayIndexWrapperRight(CharSequence right) {
Objects.requireNonNull(right, "right cannot be null");
this.indexRight = right;
return this;
}
public FieldValueLogicalPathBuilder build() {
return new FieldValueLogicalPathBuilder(pathDelimiter, keyLeft, keyRight, indexLeft,
indexRight);
}
}
private FieldValueLogicalPathBuilder(CharSequence pathDelimiter,
CharSequence leftMapKeyWrapper, CharSequence rightMapKeyMapper, CharSequence leftArrayIndexWrapper,
CharSequence rightArrayIndexWrapper) {
this.keyLeft = leftMapKeyWrapper;
this.keyRight = rightMapKeyMapper;
this.indexLeft = leftArrayIndexWrapper;
this.indexRight = rightArrayIndexWrapper;
this.pathDelimiter = pathDelimiter;
}
/**
* Builds a logical path String using the configured wrappers for array or map values for a given
* {@code FieldValue}
*
* @param fieldValue the Field Value
* @return a String with a path
*/
public String buildLogicalPath(FieldValue fieldValue) {
Objects.requireNonNull(fieldValue, "fieldValue cannot be null");
final List<CharSequence> paths = new ArrayList<>();
FieldValueWalker.walk(fieldValue, (thisFieldValue) -> {
int index = -1;
if (thisFieldValue instanceof ArrayIndexFieldValue) {
index = ((ArrayIndexFieldValue) thisFieldValue).getArrayIndex();
paths.add(keyRight);
paths.add(String.valueOf(index));
paths.add(keyLeft);
} else if (thisFieldValue instanceof MapEntryFieldValue) {
paths.add(indexRight);
paths.add(((MapEntryFieldValue) thisFieldValue).getMapKey());
paths.add(indexLeft);
} else {
thisFieldValue.getParent().ifPresent((parentFieldValue) -> {
paths.add(thisFieldValue.getField().getFieldName());
paths.add(pathDelimiter);
});
}
});
Collections.reverse(paths);
return String.join("",paths);
}
}
| 2,005 |
453 | #ifndef FS_NODE_H
#define FS_NODE_H
#include <std/array_m.h>
typedef enum fs_node_type {
FS_NODE_TYPE_BASE = 0,
FS_NODE_TYPE_ROOT = 1,
FS_NODE_TYPE_INITRD = 2
} fs_node_type_t;
typedef struct fs_base_node {
// Common fields
fs_node_type_t type;
char name[64];
struct fs_base_node* parent;
bool is_directory;
array_m* children;
} fs_base_node_t;
#endif | 189 |
586 | # -*- coding: utf-8 -*-
import pytest
from validators import ValidationFailure
from validators.i18n.es import es_cif, es_doi, es_nie, es_nif
@pytest.mark.parametrize(('value',), [
('B25162520',),
('U4839822F',),
('B96817697',),
('P7067074J',),
('Q7899705C',),
('C75098681',),
('G76061860',),
('C71345375',),
('G20558169',),
('U5021960I',),
])
def test_returns_true_on_valid_cif(value):
assert es_cif(value)
@pytest.mark.parametrize(('value',), [
('12345',),
('ABCDEFGHI',),
('Z5021960I',),
])
def test_returns_false_on_invalid_cif(value):
result = es_cif(value)
assert isinstance(result, ValidationFailure)
@pytest.mark.parametrize(('value',), [
('X0095892M',),
('X8868108K',),
('X2911154K',),
('Y2584969J',),
('X7536157T',),
('Y5840388N',),
('Z2915723H',),
('Y4002236C',),
('X7750702R',),
('Y0408759V',),
])
def test_returns_true_on_valid_nie(value):
assert es_nie(value)
@pytest.mark.parametrize(('value',), [
('K0000023T',),
('L0000024R',),
('M0000025W',),
('00000026A',),
('00000027G',),
('00000028M',),
('00000029Y',),
('00000030F',),
('00000031P',),
('00000032D',),
('00000033X',),
('00000034B',),
('00000035N',),
('00000036J',),
('00000037Z',),
('00000038S',),
('00000039Q',),
('00000040V',),
('00000041H',),
('00000042L',),
('00000043C',),
('00000044K',),
('00000045E',),
])
def test_returns_true_on_valid_nif(value):
assert es_nif(value)
@pytest.mark.parametrize(('value',), [
('12345',),
('X0000000T',),
('00000000T',),
('00000001R',),
])
def test_returns_false_on_invalid_nif(value):
result = es_nif(value)
assert isinstance(result, ValidationFailure)
@pytest.mark.parametrize(('value',), [
# CIFs
('B25162520',),
('U4839822F',),
('B96817697',),
# NIEs
('X0095892M',),
('X8868108K',),
('X2911154K',),
# NIFs
('26643189N',),
('07060225F',),
('49166693F',),
])
def test_returns_true_on_valid_doi(value):
assert es_doi(value)
| 1,051 |
320 | <reponame>strangestroad/interview-techdev-guide
import java.util.*;
import java.lang.*;
class AllPairShortestPath {
private final static int INF = 99999, V = 4;
private void floydWarshall(int[][] graph) {
int[][] dist = new int[V][V];
int i, j, k;
for (i = 0; i < V; i++)
for (j = 0; j < V; j++)
dist[i][j] = graph[i][j];
for (k = 0; k < V; k++) {
// Pick all vertices as source one by one
for (i = 0; i < V; i++) {
for (j = 0; j < V; j++) {
if (dist[i][k] + dist[k][j] < dist[i][j])
dist[i][j] = dist[i][k] + dist[k][j];
}
}
}
// Print the shortest distance matrix
printSolution(dist);
}
private void printSolution(int[][] dist) {
System.out.println("The following matrix shows the shortest " +
"distances between every pair of vertices");
for (int i = 0; i < V; ++i) {
for (int j = 0; j < V; ++j) {
if (dist[i][j] == INF)
System.out.print("INF ");
else
System.out.print(dist[i][j] + " ");
}
System.out.println();
}
}
public static void main(String[] args) {
int[][] graph = {{0, 5, INF, 10},
{INF, 0, 3, INF},
{INF, INF, 0, 1},
{INF, INF, INF, 0}
};
AllPairShortestPath a = new AllPairShortestPath();
a.floydWarshall(graph);
}
} | 884 |
930 | <reponame>avinashkranjan/PraticalPythonProjects<gh_stars>100-1000
# Linkedin My_Connections Scrapper
# Written by XZANATOL
from selenium.webdriver.common.action_chains import ActionChains
from optparse import OptionParser
from selenium import webdriver
import pandas as pd
import time
import sys
import re
pattern_name = "\\n(.+)\\n" # Used to extract names
pattern_headline = 'occupation\\n(.+)\\n' # Used to extract headlines
# Help menu
usage = """
<Script> [Options]
[Options]
-h, --help Show this help message and exit.
-e, --email Enter login email
-p, --password Enter login password
-s, --skills Flag to scrap each profile, and look at its skill set
Operation Modes:
> Basic mode
This will scrap all LinkedIn connections list with there corresponding Name, Headline, and Profile link.
> Skills scrapper mode (-s/--skills)
(Time Consuming mode)
This will do the same job of basic mode but along with visiting each profile and extracting the skills of each.
"""
# Load args
parser = OptionParser()
parser.add_option("-e", "--email", dest="email", help="Enter login email")
parser.add_option("-p", "--password", dest="password",
help="Enter login password")
parser.add_option("-s", "--skills", action="store_true", dest="skills",
help="Flag to scrap each profile, and look at its skill set")
def login(email, password):
"""LinkedIn automated login function"""
# Get LinkedIn login page
driver = webdriver.Chrome("chromedriver.exe")
driver.get("https://www.linkedin.com")
# Locate Username field and fill it
session_key = driver.find_element_by_name("session_key")
session_key.send_keys(email)
# Locate Password field and fill it
session_password = driver.find_element_by_name("session_password")
session_password.send_keys(password)
# Locate Submit button and click it
submit = driver.find_element_by_class_name("sign-in-form__submit-button")
submit.click()
# Check credentials output
if driver.title != "LinkedIn":
print("Provided E-mail/Password is wrong!")
driver.quit()
sys.exit()
# Return session
return driver
def scrap_basic(driver):
"""Returns 3 lists of Names, Headlines, and Profile Links"""
driver.get("https://www.linkedin.com/mynetwork/invite-connect/connections/")
# Bypassing Ajax Call through scrolling the page up and down multiple times
# Base case is when the height of the scroll bar is constant after 2 complete scrolls
time_to_wait = 3 # Best interval for a 512KB/Sec download speed - Change it according to your internet speed
last_height = driver.execute_script("return document.body.scrollHeight")
while True:
# Scroll down to bottom
driver.execute_script(
"window.scrollTo(0, document.body.scrollHeight);")
# This loop is for bypassing a small bug upon scrolling that causes the Ajax call to be cancelled
for i in range(2):
time.sleep(time_to_wait)
driver.execute_script("window.scrollTo(0, 0);") # Scroll up to top
time.sleep(time_to_wait)
# Scroll down to bottom
driver.execute_script(
"window.scrollTo(0, document.body.scrollHeight);")
new_height = driver.execute_script(
"return document.body.scrollHeight") # Update scroll bar height
if new_height == last_height:
break
last_height = new_height
# Extract card without links
extracted_scrap = driver.find_elements_by_class_name(
"mn-connection-card__details")
extracted_scrap = [_.text for _ in extracted_scrap]
# Append data to a seperate list
names = []
headlines = []
for card in extracted_scrap:
# Try statements just in case of headline/name type errors
try:
names.append(re.search(pattern_name, card)[0])
except:
names.append(" ")
try:
headlines.append(re.search(pattern_headline, card)[0])
except:
headlines.append(" ")
# Extract links
extracted_scrap = driver.find_elements_by_tag_name('a')
links = []
for i in extracted_scrap:
link = i.get_attribute("href")
if "https://www.linkedin.com/in" in link and not link in links:
links.append(link)
# Return outputs
return driver, names, headlines, links
def scrap_skills(driver, links):
skill_set = []
length = len(links)
for i in range(length):
link = links[i] # Get profile link
driver.get(link)
# Bypassing Ajax Call through scrolling through profile multiple sections
time_to_wait = 3
last_height = driver.execute_script(
"return document.body.scrollHeight")
while True:
# Scroll down to bottom
driver.execute_script(
"window.scrollTo(0, document.body.scrollHeight);")
# This loop is for bypassing a small bug upon scrolling that causes the Ajax call to be cancelled
for i in range(2):
time.sleep(time_to_wait)
driver.execute_script(
"window.scrollTo(0, document.body.scrollHeight/4);")
driver.execute_script(
"window.scrollTo(0, document.body.scrollHeight/3);")
driver.execute_script(
"window.scrollTo(0, document.body.scrollHeight/2);")
driver.execute_script(
"window.scrollTo(0, document.body.scrollHeight*3/4);")
time.sleep(time_to_wait)
# Scroll down to bottom
driver.execute_script(
"window.scrollTo(0, document.body.scrollHeight);")
new_height = driver.execute_script(
"return document.body.scrollHeight") # Update scroll bar height
if new_height == last_height:
break
last_height = new_height
# Locate button
buttons = driver.find_elements_by_tag_name('button')
length = len(buttons)
for button_num in range(length):
i = buttons[button_num].get_attribute("data-control-name")
if i == "skill_details":
button = buttons[button_num]
break
# Scroll then click the button
actions = ActionChains(driver)
actions.move_to_element(button).click().perform()
# Finally extract the skills
skills = driver.find_elements_by_xpath(
"//*[starts-with(@class,'pv-skill-category-entity__name-text')]")
skill_set_list = []
for skill in skills:
skill_set_list.append(skill.text)
# Append each skill set to its corresponding name
# Appending all to one string
skill_set.append(" -- ".join(skill_set_list))
# Return session & skills
return driver, skill_set
def save_to_csv(names, headlines, links, skills):
# If skills argument was false
if skills is None:
skills = [None]*len(names)
# Make a dataframe and append data to it
df = pd.DataFrame()
for i in range(len(names)):
df = df.append({"Name": names[i], "Headline": headlines[i],
"Link": links[i], "Skills": skills[i]}, ignore_index=True)
# Save to CSV
df.to_csv("scrap.csv", index=False, columns=[
"Name", "Headline", "Link", "Skills"])
# Start checkpoint
if __name__ == "__main__":
(options, args) = parser.parse_args()
# Inputs
email = options.email
password = <PASSWORD>.password
skills = options.skills
driver = login(email, password) # Login Phase
print("Successfull Login!")
print("Commencing 'My-Connections' list scrap...")
driver, names, headlines, links = scrap_basic(driver) # Basic Scrap Phase
print("Finished basic scrap, scrapped {}".format(len(names)))
if skills:
print("Commencing 'Skills' scrap...")
driver, skill_set = scrap_skills(driver, links) # Skills Scrap Phase
print("Finished Skills scrap.")
print("Saving to CSV file...")
save_to_csv(names, headlines, links, skill_set) # Save to CSV
else:
save_to_csv(names, headlines, links, None) # Save to CSV
print("Scrapping session has ended.")
# End Session
driver.quit()
| 3,367 |
945 | <gh_stars>100-1000
// This is core/vnl/algo/tests/test_functions.cxx
#include <iostream>
#include <iomanip>
#include <cmath>
#include "testlib/testlib_test.h"
//:
// \file
//
// Test function results for chi-squared cumulative density function.
// The values are those calculated from this function on solaris, and
// agree (to 3sf) with those from a mathematical table.
//
// Each row is for a different dof, from 1 through 96 in increments of
// 5 (20 rows).
//
// Each column is for a different chi-squared, from 0 though 180 in
// increments of 20 (10 columns).
//
// For readability reasons, the rows are split in 2x 5 rows, with an extra
// indentation for the second half of each row.
#include <vnl/algo/vnl_chi_squared.h>
double cdf_baseline[] = { 0.0,
0.9999922488859869,
0.9999999997449120,
0.9999999999999905,
1.0,
1.0,
1.0,
1.0,
1.0,
1.0,
0.0,
0.9972306042844884,
0.9999995444850495,
0.9999999999549898,
0.9999999999999964,
1.0,
1.0,
1.0,
1.0,
1.0,
0.0,
0.9546593186683575,
0.9999642248743472,
0.9999999907278384,
0.9999999999985243,
0.9999999999999998,
1.0,
1.0,
1.0,
1.0,
0.0,
0.7797793533983011,
0.9992214099174926,
0.9999994766265833,
0.9999999998335990,
0.9999999999999654,
1.0,
1.0,
1.0,
1.0,
0.0,
0.4787387426184652,
0.9925632227010702,
0.9999872283117348,
0.9999999919251898,
0.9999999999971140,
0.9999999999999993,
1.0,
1.0,
1.0,
0.0,
0.2084435236051257,
0.9609880071450072,
0.9998323023504486,
0.9999997903577246,
0.9999999998716507,
0.9999999999999505,
1.0,
1.0,
1.0,
0.0,
0.0641963723612628,
0.8709596508694845,
0.9986502140214748,
0.9999966874714471,
0.9999999965356298,
0.9999999999979206,
0.9999999999999991,
1.0,
1.0,
0.0,
0.0142776135970496,
0.7029716020753263,
0.9927297837948102,
0.9999652953758086,
0.9999999382046935,
0.9999999999423534,
0.9999999999999658,
1.0,
1.0,
0.0,
0.0023551011658856,
0.4850483796984979,
0.9720590475678458,
0.9997436372936986,
0.9999992259610407,
0.9999999988798406,
0.9999999999990349,
0.9999999999999994,
1.0,
0.0,
0.0002957368080061,
0.2793886568739744,
0.9194309789050203,
0.9986010626587202,
0.9999928735024521,
0.9999999840351245,
0.9999999999800138,
0.9999999999999838,
1.0,
0.0,
0.0000289361257476,
0.1331225579748673,
0.8182400707526587,
0.9941486753062340,
0.9999500186862800,
0.9999998271108139,
0.9999999996858998,
0.9999999999996467,
0.9999999999999998,
0.0,
0.0000022535340508,
0.0524807132282665,
0.6671309159544766,
0.9806610209037141,
0.9997251552759223,
0.9999985364764852,
0.9999999961469799,
0.9999999999940054,
0.9999999999999938,
0.0,
0.0000001352148233,
0.0172069984403675,
0.4878147556882595,
0.9481646213453065,
0.9987862811130113,
0.9999900872908165,
0.9999999622464486,
0.9999999999188310,
0.9999999999998879,
0.0,
0.0000000073722077,
0.0047274255385307,
0.3154587502878544,
0.8846964150660444,
0.9956070677677689,
0.9999452153818948,
0.9999996987525844,
0.999999999105923,
0.9999999999983556,
0.0,
0.0,
0.0010974342908522,
0.1790828046260967,
0.7825800096649158,
0.9867347062121498,
0.9997487615314837,
0.9999980101056154,
0.9999999918570519,
0.9999999999800666,
0.0,
0.0000000000116404,
0.0002170786956947,
0.0890129923177431,
0.6453492639226499,
0.9660451059031501,
0.9990298866149123,
0.9999889634183597,
0.9999999378147725,
0.9999999997975395,
0.0,
0.0,
0.0000368857904673,
0.0387583535447800,
0.4894571445703249,
0.9252463664134718,
0.9968048573119753,
0.9999479616312844,
0.9999995969280049,
0.9999999982561550,
0.0,
0.0000000000000097,
0.0000054252284960,
0.0148195154453572,
0.3381826989258588,
0.8564977678812210,
0.9909185509763581,
0.9997891030141784,
0.9999977585673097,
0.9999999871266845,
0.0,
0.0,
0.0000006956282422,
0.0049930250351178,
0.2116235676678424,
0.7565873548899793,
0.9774854404134016,
0.9992580809522048,
0.9999892041010620,
0.9999999177818010,
0.0,
0.0000000000000002,
0.0000000782693028,
0.0014883025723802,
0.1195827092276646,
0.6303318279959211,
0.9508233262755118,
0.9977140253664275,
0.9999545743054137,
0.9999995418724694 };
int
test_functions()
{
int idx = 0;
for (unsigned int n = 1; n < 100; n += 5)
{
for (unsigned int chisq_int = 0; chisq_int < 200; chisq_int += 20)
{
const auto chisq = static_cast<double>(chisq_int);
const double cdf = vnl_chi_squared_cumulative(chisq, n);
const double err = std::fabs(cdf - cdf_baseline[idx++]);
std::cout << "vnl_chi_squared_cumulative(" << chisq << ',' << n << ')';
TEST_NEAR(" CDF", err, 0.0, 2e-15);
if (err >= 2e-15)
std::cout << "Error = " << std::setprecision(16) << err << std::endl;
}
}
std::cout << "cdf(7.88,1) = " << vnl_chi_squared_cumulative(7.88, 1) << " should be about 0.995\n"
<< "cdf(14.8,12) = " << vnl_chi_squared_cumulative(14.8, 12) << " should be about 0.75\n"
<< "cdf(10.1,19) = " << vnl_chi_squared_cumulative(10.1, 19) << " should be about 0.05\n"
<< "cdf(39.3,40) = " << vnl_chi_squared_cumulative(39.3, 40) << " should be about 0.50\n"
<< "cdf(109.1,100) = " << vnl_chi_squared_cumulative(109.1, 100) << " should be about 0.75\n";
// rand() is not always a good random number generator,
// so use the following congruential random number generator - PVr
static auto sample_seed = (unsigned long)std::time(nullptr);
double hist1[20];
for (double & i : hist1)
{
sample_seed = (sample_seed * 16807) % 2147483647L;
double u = double(sample_seed) / 0x7fffffff; // 0x7fffffff == 2147483711L
i = 10.0 + 20.0 * (u - 0.5); // uniform in the interval 0 - 20
}
double chisq = 0;
for (int i = 0; i < 20; i++)
{
std::cout << i << ' ' << hist1[i] << std::endl;
double delta = hist1[i] - 10.0;
chisq += delta * delta / (hist1[i] + 10.0);
}
std::cout << "cdf(" << chisq << ",20) = " << vnl_chi_squared_cumulative(chisq, 20)
<< " so P(same dist) = " << (1.0 - vnl_chi_squared_cumulative(chisq, 20)) << std::endl;
return 0;
}
TESTMAIN(test_functions);
| 7,898 |
1,799 | // Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "lite/backends/x86/math/power.h"
#include <immintrin.h>
#include <cmath>
#include "lite/backends/x86/math/avx/avx_mathfuns.h"
namespace paddle {
namespace lite {
namespace x86 {
namespace math {
template <>
void power<float>(const float* din,
float* dout,
const int num,
float scale_,
float shift_,
float factor_) {
int cnt = num >> 4;
int remain = num % 16;
bool _do_power = true;
bool _do_scale = true;
bool _do_shift = true;
int rem_cnt = remain >> 2;
int rem_rem = remain & 3;
if (fabsf(factor_ - 1.f) < 1e-6f) {
_do_power = false;
}
if (fabsf(scale_ - 1.f) < 1e-6f) {
_do_scale = false;
}
if (fabsf(shift_ - 0.f) < 1e-6f) {
_do_shift = false;
}
#ifdef __AVX__
__m256 vscale_256 = _mm256_set1_ps(scale_);
__m256 vshift_256 = _mm256_set1_ps(shift_);
__m256 vfactor_256 = _mm256_set1_ps(factor_);
#endif
__m128 vscale = _mm_set1_ps(scale_);
__m128 vshift = _mm_set1_ps(shift_);
float* ptr_out = dout;
const float* ptr_in = din;
if (_do_power) {
for (int i = 0; i < cnt; i++) {
#ifdef __AVX__
__m256 vin0 = _mm256_loadu_ps(ptr_in);
__m256 vin1 = _mm256_loadu_ps(ptr_in + 8);
ptr_in += 16;
__m256 vsum0 = _mm256_mul_ps(vin0, vscale_256);
__m256 vsum1 = _mm256_mul_ps(vin1, vscale_256);
__m256 vres0 = _mm256_add_ps(vsum0, vshift_256);
__m256 vres1 = _mm256_add_ps(vsum1, vshift_256);
vres0 = pow256_ps(vres0, vfactor_256);
vres1 = pow256_ps(vres1, vfactor_256);
_mm256_storeu_ps(ptr_out, vres0);
_mm256_storeu_ps(ptr_out + 8, vres1);
#else
__m128 vin0 = _mm_loadu_ps(ptr_in);
__m128 vin1 = _mm_loadu_ps(ptr_in + 4);
__m128 vin2 = _mm_loadu_ps(ptr_in + 8);
__m128 vin3 = _mm_loadu_ps(ptr_in + 12);
__m128 vsum0 = _mm_mul_ps(vin0, vscale);
__m128 vsum1 = _mm_mul_ps(vin1, vscale);
__m128 vsum2 = _mm_mul_ps(vin2, vscale);
__m128 vsum3 = _mm_mul_ps(vin3, vscale);
__m128 vres0 = _mm_add_ps(vsum0, vshift);
__m128 vres1 = _mm_add_ps(vsum1, vshift);
__m128 vres2 = _mm_add_ps(vsum2, vshift);
__m128 vres3 = _mm_add_ps(vsum3, vshift);
ptr_in += 16;
for (int j = 0; j < 4; j++) {
ptr_out[j] = std::pow((reinterpret_cast<float*>(&vres0))[j], factor_);
ptr_out[j + 4] =
std::pow((reinterpret_cast<float*>(&vres1))[j], factor_);
ptr_out[j + 8] =
std::pow((reinterpret_cast<float*>(&vres2))[j], factor_);
ptr_out[j + 12] =
std::pow((reinterpret_cast<float*>(&vres3))[j], factor_);
}
#endif
ptr_out += 16;
}
for (int i = 0; i < rem_cnt; i++) {
__m128 vin0 = _mm_loadu_ps(ptr_in);
ptr_in += 4;
__m128 vsum0 = _mm_mul_ps(vin0, vscale);
__m128 vres0 = _mm_add_ps(vsum0, vshift);
for (int j = 0; j < 4; j++) {
ptr_out[j] = std::pow((reinterpret_cast<float*>(&vres0))[j], factor_);
}
ptr_out += 4;
}
for (int i = 0; i < rem_rem; i++) {
ptr_out[0] = std::pow((ptr_in[0] * scale_ + shift_), factor_);
ptr_in++;
ptr_out++;
}
} else {
for (int i = 0; i < cnt; i++) {
#ifdef __AVX__
__m256 vin0 = _mm256_loadu_ps(ptr_in);
__m256 vin1 = _mm256_loadu_ps(ptr_in + 8);
ptr_in += 16;
__m256 vsum0 = _mm256_mul_ps(vin0, vscale_256);
__m256 vsum1 = _mm256_mul_ps(vin1, vscale_256);
__m256 vres0 = _mm256_add_ps(vsum0, vshift_256);
__m256 vres1 = _mm256_add_ps(vsum1, vshift_256);
_mm256_storeu_ps(ptr_out, vres0);
_mm256_storeu_ps(ptr_out + 8, vres1);
ptr_out += 16;
#else
__m128 vin0 = _mm_loadu_ps(ptr_in);
__m128 vin1 = _mm_loadu_ps(ptr_in + 4);
__m128 vin2 = _mm_loadu_ps(ptr_in + 8);
__m128 vin3 = _mm_loadu_ps(ptr_in + 12);
__m128 vsum0 = _mm_mul_ps(vin0, vscale);
__m128 vsum1 = _mm_mul_ps(vin1, vscale);
__m128 vsum2 = _mm_mul_ps(vin2, vscale);
__m128 vsum3 = _mm_mul_ps(vin3, vscale);
__m128 vres0 = _mm_add_ps(vsum0, vshift);
__m128 vres1 = _mm_add_ps(vsum1, vshift);
__m128 vres2 = _mm_add_ps(vsum2, vshift);
__m128 vres3 = _mm_add_ps(vsum3, vshift);
ptr_in += 16;
_mm_storeu_ps(ptr_out, vres0);
_mm_storeu_ps(ptr_out + 4, vres1);
_mm_storeu_ps(ptr_out + 8, vres2);
_mm_storeu_ps(ptr_out + 12, vres3);
ptr_out += 16;
#endif
}
for (int i = 0; i < rem_cnt; i++) {
__m128 vin0 = _mm_loadu_ps(ptr_in);
ptr_in += 4;
__m128 vsum0 = _mm_mul_ps(vin0, vscale);
__m128 vres0 = _mm_add_ps(vsum0, vshift);
_mm_storeu_ps(ptr_out, vres0);
ptr_out += 4;
}
for (int i = 0; i < rem_rem; i++) {
ptr_out[0] = ptr_in[0] * scale_ + shift_;
ptr_in++;
ptr_out++;
}
}
}
} /* namespace math */
} /* namespace x86 */
} /* namespace lite */
} /* namespace paddle */
| 2,849 |
399 | <reponame>douggie/cointrader<filename>src/test/java/org/cryptocoinpartners/module/FeeUtilTest.java
package org.cryptocoinpartners.module;
import static org.junit.Assert.assertEquals;
import java.math.BigDecimal;
import org.cryptocoinpartners.enumeration.ExecutionInstruction;
import org.cryptocoinpartners.enumeration.FeeMethod;
import org.cryptocoinpartners.enumeration.PositionEffect;
import org.cryptocoinpartners.schema.Asset;
import org.cryptocoinpartners.schema.Currency;
import org.cryptocoinpartners.schema.DecimalAmount;
import org.cryptocoinpartners.schema.Exchange;
import org.cryptocoinpartners.schema.Fill;
import org.cryptocoinpartners.schema.Listing;
import org.cryptocoinpartners.schema.Market;
import org.cryptocoinpartners.schema.Portfolio;
import org.cryptocoinpartners.schema.Prompt;
import org.cryptocoinpartners.schema.SpecificOrder;
import org.cryptocoinpartners.schema.Transaction;
import org.joda.time.Instant;
import org.junit.Test;
public class FeeUtilTest {
// Replay replay = new Replay(false);
// Context context = Context.create(new EventTimeManager());
/*
* protected Injector injector = Guice.createInjector(new AbstractModule() {
* @Override protected void configure() { bind(MockOrderService.class); } }); // @Before // public void setup() { // injector.injectMembers(this);
* // }
* @Inject BaseOrderService orderSerivce;
*/
@Test
public final void test() {
Exchange exchange = new Exchange("OKCOIN_THISWEEK");
exchange.setFeeMethod(FeeMethod.PercentagePerUnit);
Asset usd = new Currency(false, "USD", 0.01);
Asset eos = new Currency(false, "EOS", 0.00000001);
Asset btc = new Currency(false, "BTC", 0.00000001);
Market eosMarket = createMarket("OKCOIN_THISWEEK", eos, usd, new Prompt("THIS_WEEK", 1, 0.01, eos, 1, 0.001, 20, FeeMethod.PercentagePerUnit, 0.0002,
0.0003, FeeMethod.PercentagePerUnit, FeeMethod.PercentagePerUnit), 0.001, 1);
Market btcMarket = createMarket("OKCOIN_THISWEEK", btc, usd, new Prompt("THIS_WEEK", 1, 0.01, btc, 1, 0.01, 20, FeeMethod.PercentagePerUnit, 0.0002,
0.0003, FeeMethod.PercentagePerUnit, FeeMethod.PercentagePerUnit), 0.01, 1);
Market eosCashMarket = createMarket("BITFINEX", eos, usd, 0.01, 0.00000001, 0.002, 0.003, FeeMethod.PercentagePerUnit, 0.03);
eosCashMarket.getExchange().setMargin(3);
Market btcCashMarket = createMarket("BITFINEX", btc, usd, 0.01, 0.00000001, 0.002, 0.003, FeeMethod.PercentagePerUnit, 0.03);
btcCashMarket.getExchange().setMargin(3);
Market eosbtcCashMarket = createMarket("BITFINEX", eos, btc, 0.00000001, 0.00000001, 0.002, 0.003, FeeMethod.PercentagePerUnit, 0.03);
eosbtcCashMarket.getExchange().setMargin(3);
// Market market = Market.forSymbol(marketSymbol);
SpecificOrder eosTestOrder = (new SpecificOrder(new Instant(System.currentTimeMillis() - 2000), new Portfolio(), eosMarket, BigDecimal.valueOf(67),
"test order 1"));
eosTestOrder.withPositionEffect(PositionEffect.OPEN).withExecutionInstruction(ExecutionInstruction.MAKER).withLimitPrice(BigDecimal.valueOf(5.310));
Fill eosTestFill = new Fill(eosTestOrder, new Instant(System.currentTimeMillis() - 2000), new Instant(System.currentTimeMillis() - 2000), eosMarket,
eosTestOrder.getLimitPriceCount(), eosTestOrder.getOpenVolumeCount(), "test");
BigDecimal eosTragetCommsbd = BigDecimal.valueOf(0.0252354);
DecimalAmount eosTragetComms = new DecimalAmount(eosTragetCommsbd).negate();
BigDecimal eosTragetMarginbd = BigDecimal.valueOf(6.308851);
DecimalAmount eosTragetMargin = new DecimalAmount(eosTragetMarginbd).negate();
Transaction eostransaction = new Transaction(eosTestFill, eosTestFill.getTime());
SpecificOrder btcTestOrder = (new SpecificOrder(new Instant(System.currentTimeMillis() - 2000), new Portfolio(), btcMarket, BigDecimal.valueOf(13),
"test order 1"));
btcTestOrder.withPositionEffect(PositionEffect.OPEN).withExecutionInstruction(ExecutionInstruction.MAKER).withLimitPrice(BigDecimal.valueOf(6320.05));
Fill btcTestFill = new Fill(btcTestOrder, new Instant(System.currentTimeMillis() - 2000), new Instant(System.currentTimeMillis() - 2000), btcMarket,
btcTestOrder.getLimitPriceCount(), btcTestOrder.getOpenVolumeCount(), "test");
BigDecimal btcTragetCommsbd = BigDecimal.valueOf(0.00004114);
DecimalAmount btcTragetComms = new DecimalAmount(btcTragetCommsbd).negate();
BigDecimal btcTragetMarginbd = BigDecimal.valueOf(0.01028473);
DecimalAmount btcTragetMargin = new DecimalAmount(btcTragetMarginbd).negate();
Transaction btctransaction = new Transaction(btcTestFill, btcTestFill.getTime());
SpecificOrder btcCashTestOrder = (new SpecificOrder(new Instant(System.currentTimeMillis() - 2000), new Portfolio(), btcCashMarket,
BigDecimal.valueOf(13), "test order 1"));
btcCashTestOrder.withPositionEffect(PositionEffect.OPEN).withExecutionInstruction(ExecutionInstruction.MAKER)
.withLimitPrice(BigDecimal.valueOf(6320.05));
Fill btcCashTestFill = new Fill(btcCashTestOrder, new Instant(System.currentTimeMillis() - 2000), new Instant(System.currentTimeMillis() - 2000),
btcCashMarket, btcCashTestOrder.getLimitPriceCount(), btcCashTestOrder.getOpenVolumeCount(), "test");
BigDecimal btcCashTragetCommsbd = BigDecimal.valueOf(164.33);
DecimalAmount btcCashTragetComms = new DecimalAmount(btcCashTragetCommsbd).negate();
BigDecimal btcCashTragetMarginbd = BigDecimal.valueOf(27386.88);
DecimalAmount btcCashTragetMargin = new DecimalAmount(btcCashTragetMarginbd).negate();
Transaction btctCashransaction = new Transaction(btcCashTestFill, btcCashTestFill.getTime());
SpecificOrder eosbtcCashTestOrder = (new SpecificOrder(new Instant(System.currentTimeMillis() - 2000), new Portfolio(), eosbtcCashMarket,
BigDecimal.valueOf(13), "test order 1"));
eosbtcCashTestOrder.withPositionEffect(PositionEffect.OPEN).withExecutionInstruction(ExecutionInstruction.MAKER)
.withLimitPrice(BigDecimal.valueOf(0.000925));
Fill eosbtcCashTestFill = new Fill(eosbtcCashTestOrder, new Instant(System.currentTimeMillis() - 2000), new Instant(System.currentTimeMillis() - 2000),
eosbtcCashMarket, eosbtcCashTestOrder.getLimitPriceCount(), eosbtcCashTestOrder.getOpenVolumeCount(), "test");
BigDecimal eosbtcCashTragetCommsbd = BigDecimal.valueOf(0.00002405);
DecimalAmount eosbtcCashTragetComms = new DecimalAmount(eosbtcCashTragetCommsbd).negate();
BigDecimal eosbtcCashTragetMarginbd = BigDecimal.valueOf(0.00400834);
DecimalAmount eosbtcCashTragetMargin = new DecimalAmount(eosbtcCashTragetMarginbd).negate();
Transaction eosbtctCashransaction = new Transaction(eosbtcCashTestFill, eosbtcCashTestFill.getTime());
assertEquals(eostransaction.getCommission(), eosTragetComms);
assertEquals(eostransaction.getCommissionDecimal(), eosTragetCommsbd.negate());
assertEquals(eostransaction.getMargin(), eosTragetMargin);
assertEquals(eostransaction.getMarginDecimal(), eosTragetMarginbd.negate());
assertEquals(eostransaction.getCommissionCurrency(), eos);
assertEquals(btctransaction.getCommission(), btcTragetComms);
assertEquals(btctransaction.getCommissionDecimal(), btcTragetCommsbd.negate());
assertEquals(btctransaction.getMargin(), btcTragetMargin);
assertEquals(btctransaction.getMarginDecimal(), btcTragetMarginbd.negate());
assertEquals(btctransaction.getCommissionCurrency(), btc);
assertEquals(btctCashransaction.getCommission(), btcCashTragetComms);
assertEquals(btctCashransaction.getCommissionDecimal(), btcCashTragetCommsbd.negate());
assertEquals(btctCashransaction.getMargin(), btcCashTragetMargin);
assertEquals(btctCashransaction.getMarginDecimal(), btcCashTragetMarginbd.negate());
assertEquals(btctCashransaction.getCommissionCurrency(), usd);
assertEquals(eosbtctCashransaction.getCommission(), eosbtcCashTragetComms);
assertEquals(eosbtctCashransaction.getCommissionDecimal(), eosbtcCashTragetCommsbd.negate());
assertEquals(eosbtctCashransaction.getMargin(), eosbtcCashTragetMargin);
assertEquals(eosbtctCashransaction.getMarginDecimal(), eosbtcCashTragetMarginbd.negate());
assertEquals(eosbtctCashransaction.getCommissionCurrency(), btc);
}
private Market createMarket(String exchangestr, Asset base, Asset quote, Prompt prompt, double priceBasis, double volumeBasis) {
Exchange exchange = new Exchange(exchangestr);
Listing listing = new Listing(base, quote, prompt);
Market market = new Market(exchange, listing, priceBasis, volumeBasis);
return market;
}
private Market createMarket(String exchangestr, Asset base, Asset quote, double priceBasis, double volumeBasis, double makerFeeRate, double takerFeeRate,
FeeMethod feeMethod, double marginFeeRate) {
Exchange exchange = new Exchange(exchangestr, 3, makerFeeRate, takerFeeRate, priceBasis, volumeBasis, feeMethod, marginFeeRate, feeMethod, volumeBasis,
false);
Listing listing = new Listing(base, quote);
Market market = new Market(exchange, listing, priceBasis, volumeBasis);
return market;
}
}
| 3,153 |
511 | <filename>framework/src/bluetooth/adaptation/include/bt-adaptation-device.h
/*
* Copyright (c) 2011 Samsung Electronics Co., Ltd All Rights Reserved
*
* Licensed under the Apache License, Version 2.0 (the License);
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an AS IS BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef __BT_ADAPTATION_DEVICE_H__
#define __BT_ADAPTATION_DEVICE_H__
#include <bluetooth/bluetooth_type.h>
#include <bluetooth/bluetooth_type_internal.h>
#include "bluetooth_private.h"
#ifdef __cplusplus
extern "C"
{
#endif /* __cplusplus */
#define BLUETOOTH_ADDRESS_LENGTH 6 /**< This specifies bluetooth device address length */
#define BLUETOOTH_ADVERTISING_DATA_LENGTH_MAX 31 /**< This specifies maximum AD data length */
#define BLUETOOTH_DEVICE_NAME_LENGTH_MAX 248 /**< This specifies maximum device name length */
#define BLUETOOTH_MANUFACTURER_DATA_LENGTH_MAX 240 /**< This specifies maximum manufacturer data length */
#define BLUETOOTH_MAX_SERVICES_FOR_DEVICE 40 /**< This specifies maximum number of services
a device can support */
#define BLUETOOTH_UUID_STRING_MAX 50
/**
* Connected state types
*/
typedef enum {
BLUETOOTH_CONNECTED_LINK_NONE = 0x00,
BLUETOOTH_CONNECTED_LINK_BREDR = 0x01,
BLUETOOTH_CONNECTED_LINK_LE = 0x02,
BLUETOOTH_CONNECTED_LINK_BREDR_LE = 0x03,
} bluetooth_connected_link_t;
/**
* Major device class (part of Class of Device)
*/
typedef enum {
BLUETOOTH_DEVICE_MAJOR_CLASS_MISC = 0x00, /**< Miscellaneous major device class*/
BLUETOOTH_DEVICE_MAJOR_CLASS_COMPUTER = 0x01, /**< Computer major device class*/
BLUETOOTH_DEVICE_MAJOR_CLASS_PHONE = 0x02, /**< Phone major device class*/
BLUETOOTH_DEVICE_MAJOR_CLASS_LAN_ACCESS_POINT = 0x03, /**< LAN major device class*/
BLUETOOTH_DEVICE_MAJOR_CLASS_AUDIO = 0x04, /**< AUDIO major device class*/
BLUETOOTH_DEVICE_MAJOR_CLASS_PERIPHERAL = 0x05, /**< Peripheral major device class*/
BLUETOOTH_DEVICE_MAJOR_CLASS_IMAGING = 0x06, /**< Imaging major device class*/
BLUETOOTH_DEVICE_MAJOR_CLASS_WEARABLE = 0x07, /**< Wearable device class*/
BLUETOOTH_DEVICE_MAJOR_CLASS_TOY = 0x08, /**< Toy device class*/
BLUETOOTH_DEVICE_MAJOR_CLASS_HEALTH = 0x09, /**< Health device class*/
BLUETOOTH_DEVICE_MAJOR_CLASS_UNCLASSIFIED = 0x1F /**< Unknown major device class*/
} bluetooth_device_major_class_t;
typedef enum {
BLUETOOTH_DEVICE_MINOR_CLASS_UNCLASSIFIED = 0x00, /**< unclassified minor class */
/* About Computer Major class */
BLUETOOTH_DEVICE_MINOR_CLASS_DESKTOP_WORKSTATION = 0x04, /**< desktop workstation
minor class */
BLUETOOTH_DEVICE_MINOR_CLASS_SERVER_CLASS_COMPUTER = 0x08, /**< server minor class */
BLUETOOTH_DEVICE_MINOR_CLASS_LAPTOP = 0x0C, /**< laptop minor class */
BLUETOOTH_DEVICE_MINOR_CLASS_HANDHELD_PC_OR_PDA = 0x10, /**< PDA minor class */
BLUETOOTH_DEVICE_MINOR_CLASS_PALM_SIZED_PC_OR_PDA = 0x14, /**< PALM minor class */
BLUETOOTH_DEVICE_MINOR_CLASS_WEARABLE_COMPUTER = 0x18, /**< Wearable PC minor class */
/* About Phone Major class */
BLUETOOTH_DEVICE_MINOR_CLASS_CELLULAR = 0x04, /**< Cellular minor class */
BLUETOOTH_DEVICE_MINOR_CLASS_CORDLESS = 0x08, /**< cordless minor class */
BLUETOOTH_DEVICE_MINOR_CLASS_SMART_PHONE = 0x0C, /**< smart phone minor class */
BLUETOOTH_DEVICE_MINOR_CLASS_WIRED_MODEM_OR_VOICE_GATEWAY = 0x10,
/**< voice gateway minor class */
BLUETOOTH_DEVICE_MINOR_CLASS_COMMON_ISDN_ACCESS = 0x14, /**< ISDN minor class */
/* About LAN/Network Access Point Major class */
BLUETOOTH_DEVICE_MINOR_CLASS_FULLY_AVAILABLE = 0x04, /**< Fully available minor class */
BLUETOOTH_DEVICE_MINOR_CLASS_1_TO_17_PERCENT_UTILIZED = 0x20, /**< 1-17% utilized minor class */
BLUETOOTH_DEVICE_MINOR_CLASS_17_TO_33_PERCENT_UTILIZED = 0x40, /**< 17-33% utilized minor class */
BLUETOOTH_DEVICE_MINOR_CLASS_33_TO_50_PERCENT_UTILIZED = 0x60, /**< 33-50% utilized minor class */
BLUETOOTH_DEVICE_MINOR_CLASS_50_to_67_PERCENT_UTILIZED = 0x80, /**< 50-67% utilized minor class */
BLUETOOTH_DEVICE_MINOR_CLASS_67_TO_83_PERCENT_UTILIZED = 0xA0, /**< 67-83% utilized minor class */
BLUETOOTH_DEVICE_MINOR_CLASS_83_TO_99_PERCENT_UTILIZED = 0xC0, /**< 83-99% utilized minor class */
BLUETOOTH_DEVICE_MINOR_CLASS_NO_SERVICE_AVAILABLE = 0xE0, /**< No service available minor class */
/* About Audio/Video Major class */
BLUETOOTH_DEVICE_MINOR_CLASS_HEADSET_PROFILE = 0x04, /**< Headset minor class */
BLUETOOTH_DEVICE_MINOR_CLASS_HANDSFREE = 0x08, /**< Handsfree minor class*/
BLUETOOTH_DEVICE_MINOR_CLASS_MICROPHONE = 0x10, /**< Microphone minor class */
BLUETOOTH_DEVICE_MINOR_CLASS_LOUD_SPEAKER = 0x14, /**< Loud Speaker minor class */
BLUETOOTH_DEVICE_MINOR_CLASS_HEADPHONES = 0x18, /**< Headphones minor class */
BLUETOOTH_DEVICE_MINOR_CLASS_PORTABLE_AUDIO = 0x1C, /**< Portable Audio minor class */
BLUETOOTH_DEVICE_MINOR_CLASS_CAR_AUDIO = 0x20, /**< Car Audio minor class */
BLUETOOTH_DEVICE_MINOR_CLASS_SET_TOP_BOX = 0x24, /**< Set top box minor class */
BLUETOOTH_DEVICE_MINOR_CLASS_HIFI_AUDIO_DEVICE = 0x28, /**< Hifi minor class */
BLUETOOTH_DEVICE_MINOR_CLASS_VCR = 0x2C, /**< VCR minor class */
BLUETOOTH_DEVICE_MINOR_CLASS_VIDEO_CAMERA = 0x30, /**< Video Camera minor class */
BLUETOOTH_DEVICE_MINOR_CLASS_CAM_CORDER = 0x34, /**< CAM Corder minor class */
BLUETOOTH_DEVICE_MINOR_CLASS_VIDEO_MONITOR = 0x38, /**<Video Monitor minor class */
BLUETOOTH_DEVICE_MINOR_CLASS_VIDEO_DISPLAY_AND_LOUD_SPEAKER = 0x3C,
/**< Video Display and Loud
Speaker minor class */
BLUETOOTH_DEVICE_MINOR_CLASS_VIDEO_CONFERENCING = 0x40, /**< Video Conferencing minor
class */
BLUETOOTH_DEVICE_MINOR_CLASS_GAMING_OR_TOY = 0x48, /**< Gaming or toy minor class */
/* About Peripheral Major class */
BLUETOOTH_DEVICE_MINOR_CLASS_KEY_BOARD = 0x40, /**< Key board minor class */
BLUETOOTH_DEVICE_MINOR_CLASS_POINTING_DEVICE = 0x80, /**< Pointing Device minor class */
BLUETOOTH_DEVICE_MINOR_CLASS_COMBO_KEYBOARD_OR_POINTING_DEVICE = 0xC0,
/**< Combo Keyboard or pointing
device minorclass */
BLUETOOTH_DEVICE_MINOR_CLASS_JOYSTICK = 0x04, /**< JoyStick minor class */
BLUETOOTH_DEVICE_MINOR_CLASS_GAME_PAD = 0x08, /**< Game Pad minor class */
BLUETOOTH_DEVICE_MINOR_CLASS_REMOTE_CONTROL = 0x0C, /**< Remote Control minor class */
BLUETOOTH_DEVICE_MINOR_CLASS_SENSING_DEVICE = 0x10, /**< Sensing Device minor class */
BLUETOOTH_DEVICE_MINOR_CLASS_DIGITIZER_TABLET = 0x14, /**< Digitizer minor class */
BLUETOOTH_DEVICE_MINOR_CLASS_CARD_READER = 0x18, /**< Card Reader minor class */
BLUETOOTH_DEVICE_MINOR_CLASS_DIGITAL_PEN = 0x1C, /**< Digital pen minor class */
BLUETOOTH_DEVICE_MINOR_CLASS_HANDHELD_SCANNER = 0x20, /**< Handheld scanner for bar-codes, RFID minor class */
BLUETOOTH_DEVICE_MINOR_CLASS_HANDHELD_GESTURAL_INPUT_DEVICE = 0x24, /**< Handheld gestural input device minor class */
/* About Imaging Major class */
BLUETOOTH_DEVICE_MINOR_CLASS_DISPLAY = 0x10, /**< Display minor class */
BLUETOOTH_DEVICE_MINOR_CLASS_CAMERA = 0x20, /**< Camera minor class */
BLUETOOTH_DEVICE_MINOR_CLASS_SCANNER = 0x40, /**< Scanner minor class */
BLUETOOTH_DEVICE_MINOR_CLASS_PRINTER = 0x80, /**< Printer minor class */
/* About Wearable Major class */
BLUETOOTH_DEVICE_MINOR_CLASS_WRIST_WATCH = 0x04, /**< Wrist watch minor class */
BLUETOOTH_DEVICE_MINOR_CLASS_PAGER = 0x08, /**< Pager minor class */
BLUETOOTH_DEVICE_MINOR_CLASS_JACKET = 0x0C, /**< Jacket minor class */
BLUETOOTH_DEVICE_MINOR_CLASS_HELMET = 0x10, /**< Helmet minor class */
BLUETOOTH_DEVICE_MINOR_CLASS_GLASSES = 0x14, /**< Glasses minor class */
/* About Toy Major class */
BLUETOOTH_DEVICE_MINOR_CLASS_ROBOT = 0x04, /**< Robot minor class */
BLUETOOTH_DEVICE_MINOR_CLASS_VEHICLE = 0x08, /**< Vehicle minor class */
BLUETOOTH_DEVICE_MINOR_CLASS_DOLL_OR_ACTION = 0x0C, /**< Doll or Action minor class */
BLUETOOTH_DEVICE_MINOR_CLASS_CONTROLLER = 0x10, /**< Controller minor class */
BLUETOOTH_DEVICE_MINOR_CLASS_GAME = 0x14, /**< Game minor class */
/* About Health Major class */
BLUETOOTH_DEVICE_MINOR_CLASS_BLOOD_PRESSURE_MONITOR = 0x04, /**< Blood Pressure minor class */
BLUETOOTH_DEVICE_MINOR_CLASS_THERMOMETER = 0x08, /**< Thermometer minor class */
BLUETOOTH_DEVICE_MINOR_CLASS_WEIGHING_SCALE = 0x0C, /**< Weighing Scale minor class */
BLUETOOTH_DEVICE_MINOR_CLASS_GLUCOSE_METER = 0x10, /**< Glucose minor class */
BLUETOOTH_DEVICE_MINOR_CLASS_PULSE_OXIMETER = 0x14, /**< Pulse Oximeter minor class */
BLUETOOTH_DEVICE_MINOR_CLASS_HEART_OR_PULSE_RATE_MONITOR = 0x18,/**< Heart or pulse rate monitor minor class */
BLUETOOTH_DEVICE_MINOR_CLASS_MEDICAL_DATA_DISPLAY = 0x1C, /**< Medical minor class */
BLUETOOTH_DEVICE_MINOR_CLASS_STEP_COUNTER = 0x20, /**< Step Counter minor class */
BLUETOOTH_DEVICE_MINOR_CLASS_BODY_COMPOSITION_ANALYZER = 0x24, /**< Body composition analyzer minor class */
BLUETOOTH_DEVICE_MINOR_CLASS_PEAK_FLOW_MONITOR = 0x28, /**< Peak flow monitor minor class */
BLUETOOTH_DEVICE_MINOR_CLASS_MEDICATION_MONITOR = 0x2C, /**< Medication monitor minor class */
BLUETOOTH_DEVICE_MINOR_CLASS_KNEE_PROSTHESIS = 0x30, /**< Knee prosthesis minor class */
BLUETOOTH_DEVICE_MINOR_CLASS_ANKLE_PROSTHESIS = 0x34, /**< Ankle prosthesis minor class */
} bluetooth_device_minor_class_t;
/**
* Service class part of class of device returned from device discovery
*/
typedef enum {
BLUETOOTH_DEVICE_SERVICE_CLASS_LIMITED_DISCOVERABLE_MODE = 0x002000,
BLUETOOTH_DEVICE_SERVICE_CLASS_POSITIONING = 0x010000, /**< */
BLUETOOTH_DEVICE_SERVICE_CLASS_NETWORKING = 0x020000, /**< */
BLUETOOTH_DEVICE_SERVICE_CLASS_RENDERING = 0x040000, /**< */
BLUETOOTH_DEVICE_SERVICE_CLASS_CAPTURING = 0x080000, /**< */
BLUETOOTH_DEVICE_SERVICE_CLASS_OBJECT_TRANSFER = 0x100000, /**< */
BLUETOOTH_DEVICE_SERVICE_CLASS_AUDIO = 0x200000, /**< */
BLUETOOTH_DEVICE_SERVICE_CLASS_TELEPHONY = 0x400000, /**< */
BLUETOOTH_DEVICE_SERVICE_CLASS_INFORMATION = 0x800000, /**< */
} bluetooth_device_service_class_t;
/**
* structure to hold the device information
*/
typedef struct {
bluetooth_device_major_class_t major_class; /**< major device class */
bluetooth_device_minor_class_t minor_class; /**< minor device class */
bluetooth_device_service_class_t service_class;
/**< service device class */
} bluetooth_device_class_t;
/**
* This is Bluetooth device address type, fixed to 6 bytes ##:##:##:##:##:##
*/
typedef struct {
unsigned char addr[BLUETOOTH_ADDRESS_LENGTH];
} bluetooth_device_address_t;
/**
* This is Bluetooth device name type, maximum size of Bluetooth device name is 248 bytes
*/
typedef struct {
char name[BLUETOOTH_DEVICE_NAME_LENGTH_MAX + 1];
} bluetooth_device_name_t;
/**
* This is Bluetooth manufacturer specific data, maximum size of data is 240 bytes
*/
typedef struct {
int data_len; /**< manafacturer specific data length */
char data[BLUETOOTH_MANUFACTURER_DATA_LENGTH_MAX];
} bluetooth_manufacturer_data_t;
/**
* structure to hold the device information
*/
typedef struct {
bluetooth_device_address_t device_address; /**< device address */
bluetooth_device_name_t device_name; /**< device name */
bluetooth_device_class_t device_class; /**< device class */
char uuids[BLUETOOTH_MAX_SERVICES_FOR_DEVICE][BLUETOOTH_UUID_STRING_MAX];
unsigned int service_list_array[BLUETOOTH_MAX_SERVICES_FOR_DEVICE]; /**< Use enum values in bt_service_uuid_list_t */
int service_index;
int rssi; /**< received strength signal*/
bool paired; /**< paired flag */
bluetooth_connected_link_t connected; /**< connected link type */
bool trust; /**< trust flag */
bool is_alias_set; /** is device alias set flag**/
bluetooth_manufacturer_data_t manufacturer_data; /**< manafacturer specific class */
} bluetooth_device_info_t;
/**
* Advertising data
*/
typedef struct {
uint8_t data[BLUETOOTH_ADVERTISING_DATA_LENGTH_MAX];
} bluetooth_advertising_data_t;
/**
* structure to hold the LE device information
*/
typedef struct {
int data_len; /**< manafacturer specific data length */
bluetooth_advertising_data_t data; /**< manafacturer specific data */
} bluetooth_le_advertising_data_t;
typedef struct {
bluetooth_device_address_t device_address; /**< device address */
int addr_type; /**< address type*/
int rssi; /**< received strength signal*/
bluetooth_le_advertising_data_t adv_ind_data;
bluetooth_le_advertising_data_t scan_resp_data;
} bluetooth_le_device_info_t;
int bt_adapt_bond_device(const char *device_address);
int bt_adapt_unbond_device(const char *device_address);
int bt_adapt_get_bonded_device(const char *device_address,
bt_device_info_s *device_info);
int bt_adapt_is_device_connected(const char *device_address,
bt_profile_e bt_profile,
bool *is_connected);
int bt_adapt_connect_le(const char *device_address);
int bt_adapt_disconnect_le(const char *device_address);
int bt_adapt_le_conn_update(const char *device_address,
const bt_le_conn_update_s *parameters);
int bt_adapt_update_le_connection_mode(const char *device_address,
bt_device_le_connection_mode_e mode);
int bt_adapt_request_att_mtu(const char *device_address, unsigned int mtu);
int bt_adapt_get_att_mtu(const char *device_address, unsigned int *mtu);
#ifdef __cplusplus
}
#endif /* __cplusplus */
#endif /* __BT_ADAPTATION_DEVICE_H__ */
| 5,478 |
963 | <reponame>brandstaetter/high-performance-java-persistence
package com.vladmihalcea.book.hpjp.hibernate.identifier;
import com.vladmihalcea.book.hpjp.util.AbstractTest;
import org.hibernate.Session;
import org.hibernate.annotations.GenericGenerator;
import org.junit.Test;
import javax.persistence.*;
import java.sql.Statement;
public class AssignedIdentityGeneratorTest extends AbstractTest {
@Override
protected Class<?>[] entities() {
return new Class<?>[] {
Post.class,
};
}
@Test
public void test() {
LOGGER.debug("test");
doInJPA(entityManager -> {
Session session = entityManager.unwrap(Session.class);
session.doWork(connection -> {
try(Statement statement = connection.createStatement()) {
statement.executeUpdate("ALTER TABLE post ALTER COLUMN id bigint generated by default as identity (start with 1)");
}
});
});
doInJPA(entityManager -> {
entityManager.persist(new Post());
entityManager.persist(new Post(-1L));
entityManager.persist(new Post());
entityManager.persist(new Post(-2L));
});
}
@Entity(name = "Post")
@Table(name = "post")
public static class Post implements Identifiable<Long> {
@Id
@GenericGenerator(
name = "assigned-identity",
strategy = "com.vladmihalcea.book.hpjp.hibernate.identifier.AssignedIdentityGenerator"
)
@GeneratedValue(generator = "assigned-identity", strategy = GenerationType.IDENTITY)
private Long id;
@Version
private Integer version;
public Post() {
}
public Post(Long id) {
this.id = id;
}
@Override
public Long getId() {
return id;
}
}
}
| 851 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.