text
stringlengths 2
99.9k
| meta
dict |
---|---|
/*
* MD5
* (C) 1999-2008 Jack Lloyd
*
* Distributed under the terms of the Botan license
*/
#include <botan/md5.h>
#include <botan/loadstor.h>
#include <botan/rotate.h>
namespace Botan {
namespace {
/*
* MD5 FF Function
*/
inline void FF(u32bit& A, u32bit B, u32bit C, u32bit D, u32bit msg,
byte S, u32bit magic)
{
A += (D ^ (B & (C ^ D))) + msg + magic;
A = rotate_left(A, S) + B;
}
/*
* MD5 GG Function
*/
inline void GG(u32bit& A, u32bit B, u32bit C, u32bit D, u32bit msg,
byte S, u32bit magic)
{
A += (C ^ (D & (B ^ C))) + msg + magic;
A = rotate_left(A, S) + B;
}
/*
* MD5 HH Function
*/
inline void HH(u32bit& A, u32bit B, u32bit C, u32bit D, u32bit msg,
byte S, u32bit magic)
{
A += (B ^ C ^ D) + msg + magic;
A = rotate_left(A, S) + B;
}
/*
* MD5 II Function
*/
inline void II(u32bit& A, u32bit B, u32bit C, u32bit D, u32bit msg,
byte S, u32bit magic)
{
A += (C ^ (B | ~D)) + msg + magic;
A = rotate_left(A, S) + B;
}
}
/*
* MD5 Compression Function
*/
void MD5::compress_n(const byte input[], size_t blocks)
{
u32bit A = digest[0], B = digest[1], C = digest[2], D = digest[3];
for(size_t i = 0; i != blocks; ++i)
{
load_le(&M[0], input, M.size());
FF(A,B,C,D,M[ 0], 7,0xD76AA478); FF(D,A,B,C,M[ 1],12,0xE8C7B756);
FF(C,D,A,B,M[ 2],17,0x242070DB); FF(B,C,D,A,M[ 3],22,0xC1BDCEEE);
FF(A,B,C,D,M[ 4], 7,0xF57C0FAF); FF(D,A,B,C,M[ 5],12,0x4787C62A);
FF(C,D,A,B,M[ 6],17,0xA8304613); FF(B,C,D,A,M[ 7],22,0xFD469501);
FF(A,B,C,D,M[ 8], 7,0x698098D8); FF(D,A,B,C,M[ 9],12,0x8B44F7AF);
FF(C,D,A,B,M[10],17,0xFFFF5BB1); FF(B,C,D,A,M[11],22,0x895CD7BE);
FF(A,B,C,D,M[12], 7,0x6B901122); FF(D,A,B,C,M[13],12,0xFD987193);
FF(C,D,A,B,M[14],17,0xA679438E); FF(B,C,D,A,M[15],22,0x49B40821);
GG(A,B,C,D,M[ 1], 5,0xF61E2562); GG(D,A,B,C,M[ 6], 9,0xC040B340);
GG(C,D,A,B,M[11],14,0x265E5A51); GG(B,C,D,A,M[ 0],20,0xE9B6C7AA);
GG(A,B,C,D,M[ 5], 5,0xD62F105D); GG(D,A,B,C,M[10], 9,0x02441453);
GG(C,D,A,B,M[15],14,0xD8A1E681); GG(B,C,D,A,M[ 4],20,0xE7D3FBC8);
GG(A,B,C,D,M[ 9], 5,0x21E1CDE6); GG(D,A,B,C,M[14], 9,0xC33707D6);
GG(C,D,A,B,M[ 3],14,0xF4D50D87); GG(B,C,D,A,M[ 8],20,0x455A14ED);
GG(A,B,C,D,M[13], 5,0xA9E3E905); GG(D,A,B,C,M[ 2], 9,0xFCEFA3F8);
GG(C,D,A,B,M[ 7],14,0x676F02D9); GG(B,C,D,A,M[12],20,0x8D2A4C8A);
HH(A,B,C,D,M[ 5], 4,0xFFFA3942); HH(D,A,B,C,M[ 8],11,0x8771F681);
HH(C,D,A,B,M[11],16,0x6D9D6122); HH(B,C,D,A,M[14],23,0xFDE5380C);
HH(A,B,C,D,M[ 1], 4,0xA4BEEA44); HH(D,A,B,C,M[ 4],11,0x4BDECFA9);
HH(C,D,A,B,M[ 7],16,0xF6BB4B60); HH(B,C,D,A,M[10],23,0xBEBFBC70);
HH(A,B,C,D,M[13], 4,0x289B7EC6); HH(D,A,B,C,M[ 0],11,0xEAA127FA);
HH(C,D,A,B,M[ 3],16,0xD4EF3085); HH(B,C,D,A,M[ 6],23,0x04881D05);
HH(A,B,C,D,M[ 9], 4,0xD9D4D039); HH(D,A,B,C,M[12],11,0xE6DB99E5);
HH(C,D,A,B,M[15],16,0x1FA27CF8); HH(B,C,D,A,M[ 2],23,0xC4AC5665);
II(A,B,C,D,M[ 0], 6,0xF4292244); II(D,A,B,C,M[ 7],10,0x432AFF97);
II(C,D,A,B,M[14],15,0xAB9423A7); II(B,C,D,A,M[ 5],21,0xFC93A039);
II(A,B,C,D,M[12], 6,0x655B59C3); II(D,A,B,C,M[ 3],10,0x8F0CCC92);
II(C,D,A,B,M[10],15,0xFFEFF47D); II(B,C,D,A,M[ 1],21,0x85845DD1);
II(A,B,C,D,M[ 8], 6,0x6FA87E4F); II(D,A,B,C,M[15],10,0xFE2CE6E0);
II(C,D,A,B,M[ 6],15,0xA3014314); II(B,C,D,A,M[13],21,0x4E0811A1);
II(A,B,C,D,M[ 4], 6,0xF7537E82); II(D,A,B,C,M[11],10,0xBD3AF235);
II(C,D,A,B,M[ 2],15,0x2AD7D2BB); II(B,C,D,A,M[ 9],21,0xEB86D391);
A = (digest[0] += A);
B = (digest[1] += B);
C = (digest[2] += C);
D = (digest[3] += D);
input += hash_block_size();
}
}
/*
* Copy out the digest
*/
void MD5::copy_out(byte output[])
{
for(size_t i = 0; i != output_length(); i += 4)
store_le(digest[i/4], output + i);
}
/*
* Clear memory of sensitive data
*/
void MD5::clear()
{
MDx_HashFunction::clear();
zeroise(M);
digest[0] = 0x67452301;
digest[1] = 0xEFCDAB89;
digest[2] = 0x98BADCFE;
digest[3] = 0x10325476;
}
}
| {
"pile_set_name": "Github"
} |
<HTML>
<HEAD>
<META Name="Generator" Content="Lotus Word Pro"></META>
<TITLE>RFP-0005 Services Framework Remote Administration (1999-09-17)</TITLE>
</HEAD>
<BODY BGCOLOR="#FFFFFF">
<CENTER>
<TABLE CELLSPACING="0" CELLPADDING="0" WIDTH="719">
<TR>
<TD WIDTH="29%" ALIGN="LEFT" VALIGN="MIDDLE"> </TD>
<TD WIDTH="43%" ALIGN="LEFT" VALIGN="MIDDLE"> </TD>
<TD ALIGN="LEFT" VALIGN="MIDDLE"> </TD></TR>
<TR>
<TD ALIGN="LEFT" VALIGN="MIDDLE">Java Expert Group </TD>
<TD ALIGN="LEFT" VALIGN="MIDDLE"> </TD>
<TD ALIGN="LEFT" VALIGN="MIDDLE">Carter/Grimm </TD></TR>
<TR>
<TD ALIGN="LEFT" VALIGN="MIDDLE">Request for Proposal: 0005 </TD>
<TD ALIGN="LEFT" VALIGN="MIDDLE"> </TD>
<TD ALIGN="LEFT" VALIGN="MIDDLE">IBM Corporation </TD></TR>
<TR>
<TD ALIGN="LEFT" VALIGN="MIDDLE">Expires 20 days after: 17-Sep-1999 </TD>
<TD ALIGN="LEFT" VALIGN="MIDDLE"> </TD>
<TD ALIGN="LEFT" VALIGN="MIDDLE">1999-09-17 </TD></TR>
<TR>
<TD ALIGN="LEFT" VALIGN="MIDDLE">rfc-0005.html </TD>
<TD ALIGN="LEFT" VALIGN="MIDDLE"> </TD>
<TD ALIGN="LEFT" VALIGN="MIDDLE"> </TD></TR>
<TR>
<TD ALIGN="LEFT" VALIGN="MIDDLE"> </TD>
<TD ALIGN="LEFT" VALIGN="MIDDLE"> </TD>
<TD ALIGN="LEFT" VALIGN="MIDDLE"> </TD></TR>
</TABLE></CENTER>
<H2 ALIGN="CENTER">Services Framework Remote Administration RFP</H2>
<H3>Status of this Memo </H3>
<P>This document suggests the following extension to the OSGi specification for the Open Services Gateway Initiative, and requests discussion. Distribution of this document is unlimited.
<P>
<H3>Abstract </H3>
<P>A key aspect of the Open Services Gateway Initiative Services Framework[1] is the ability to administrate the framework under the control of a remote managing system. This RFP provides some general goals and guidelines and solicits technical submissions that will lead to defining an administration capability and API for the Services Framework.
<P>
<H3>Acknowledgment </H3>
<P>The author wants to acknowledge the contributions of the members of the Connected Alliance and OSGi.
<P>
<H3>Introduction </H3>
<P>The Remote Administration API enables a managing system to remotely manage and configure the bundles and services on a target device.
<P>
<H3>Motivation and Rational </H3>
<P>The primary goal of the Remote Administration API is to remotely manage and configure bundles and services on a target device. The Remote Administration API should meet this goal as follows. First, it should provide a consistent application programming interface (API) to use during development of device management applications. Second, it should provide the flexibility for implementors to "plug-in" different device management protocols below the Remote Administration API.
<P>
<P>Another goal of the Remote Administration API is to enable implementors to deploy implementations in a variety of devices; the different hardware characteristics of the devices affect the amount of code that can run on these devices. Therefore, the Remote Administration API should require a minimum of components on the managed device; however, an implementor has the flexibility to utilize additional components in devices that can accommodate these components.
<P>
<H3>Technical Discussion </H3>
<P>Administration of the Services Framework must provide, at a minimum, the following capabilities and features:
<P>
<UL TYPE="disc">
<LI>Remotely administer the Services Framework
<LI>Remotely configure services on a managed device.
<LI>Minimize the software requirements for a managed device.
<ul>
<LI>The framework should only require the Java profile[2] for the Services Framework.
<LI>Enable services to share common configuration properties.
</ul>
<LI>Scalable for mass deployment and configuration.
<ul>
<LI>Services must be remotely configurable without a user interface.
<LI>Services may be remotely configurable with a user interface.
<LI>Enables configuration of services when the Services Framework on the target device is down or unavailable.
<LI>Enables an administrator to configure properties en masse for groups of devices.
<LI>Does not require downloading of communication classes from every managed device.
</ul>
<LI>Upward compatible.
<ul>
<LI>De-couples the configuration state from services.
<LI>Does not require serialization because a serialized object might not be compatible with all current or future JVM's.
</ul>
<LI>Protocol neutral.
</UL>
<P>
<H3>Security Considerations </H3>
<P>As a request for specification proposals, this document has no direct security considerations. However, the resulting Administration capability and API should undergo a security review before approval of the API.
<P>
<H3>References </H3>
<P>[1] Jim Mathis, "Services Framework RFP", OSGi RFP-0001, 1999-09-16, rfp-0001.html
<P>[2] Jim Mathis, "Services Framework J2ME Profile RFP", OSGi RFP-0006, 1999-09-17, rfp-0006.html
<P>[3] Keith Carter/Jon Grimm, "Services Framework Configuration RFP", OSGi RFP-0007, 1999-09-17, rfp-0007.html
<H3>Author's Address </H3>
<H3></H3>
<TABLE CELLSPACING="0" CELLPADDING="5" WIDTH="636">
<TR>
<TD WIDTH="231" ALIGN="LEFT" VALIGN="TOP">First Name:</TD>
<TD WIDTH="179" ALIGN="LEFT" VALIGN="TOP">Keith</TD>
<TD WIDTH="182" ALIGN="LEFT" VALIGN="TOP">Jon</TD></TR>
<TR>
<TD ALIGN="LEFT" VALIGN="TOP">Last Name: </TD>
<TD ALIGN="LEFT" VALIGN="TOP">Carter</TD>
<TD ALIGN="LEFT" VALIGN="TOP">Grimm</TD></TR>
<TR>
<TD ALIGN="LEFT" VALIGN="TOP">Company Name: </TD>
<TD ALIGN="LEFT" VALIGN="TOP">IBM Corporation</TD>
<TD ALIGN="LEFT" VALIGN="TOP">IBM Corporation</TD></TR>
<TR>
<TD ALIGN="LEFT" VALIGN="TOP">Company Division: </TD>
<TD ALIGN="LEFT" VALIGN="TOP">Pervasive Computing</TD>
<TD ALIGN="LEFT" VALIGN="TOP">Pervasive Computing</TD></TR>
<TR>
<TD ALIGN="LEFT" VALIGN="TOP">Street Address: </TD>
<TD ALIGN="LEFT" VALIGN="TOP">11400 Burnet Road</TD>
<TD ALIGN="LEFT" VALIGN="TOP">11400 Burnet Road</TD></TR>
<TR>
<TD ALIGN="LEFT" VALIGN="TOP">City, State, Country, Postal Code: </TD>
<TD ALIGN="LEFT" VALIGN="TOP">Austin, TX USA 78758</TD>
<TD ALIGN="LEFT" VALIGN="TOP">Austin, TX USA 78758</TD></TR>
<TR>
<TD ALIGN="LEFT" VALIGN="TOP">voice: </TD>
<TD ALIGN="LEFT" VALIGN="TOP">+1 512 838 2155</TD>
<TD ALIGN="LEFT" VALIGN="TOP">+1 512 838 9203</TD></TR>
<TR>
<TD ALIGN="LEFT" VALIGN="TOP">fax: </TD>
<TD ALIGN="LEFT" VALIGN="TOP">+1 512 838 0169</TD>
<TD ALIGN="LEFT" VALIGN="TOP">+1 512 838 0169</TD></TR>
<TR>
<TD ALIGN="LEFT" VALIGN="TOP">e-mail:</TD>
<TD ALIGN="LEFT" VALIGN="TOP"><A HREF="mailto:[email protected]" TARGET="_top">[email protected]</A></TD>
<TD ALIGN="LEFT" VALIGN="TOP"><A HREF="mailto:[email protected]" TARGET="_top">[email protected]</A></TD></TR>
</TABLE>
<H3></H3>
<H3>Full Copyright Statement </H3>
<P>Copyright (C) The Open Services Gateway Initiative (1999).
<P>All Rights Reserved.
<P>This document and translations of it may be copied and furnished to others, and derivative works that comment on or otherwise explain it may be prepared, copied, published and distributed, in whole or in part, without restriction of any kind, provided that the above copyright notice and this paragraph are included on all such copies and derivative works. However, this document itself may not be modified in any way, such as by removing the copyright notice or references to the Open Services Gateway Initiative.
<P>The limited permissions granted above are perpetual and will not be revoked by the Open Services Gateway Initiative or its successors or assigns.
<P>This document and the information contained herein is provided on an "AS IS" basis and THE OPEN SERVICES GATEWAY INITIATIVE DISCLAIMS ALL WARRANTIES, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO ANY WARRANTY THAT THE USE OF THE INFORMATION HEREIN WILL NOT INFRINGE ANY RIGHTS OR ANY IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE.
<HR>
<P>Expires 20 days after: 1999-09-17
</BODY>
</HTML> | {
"pile_set_name": "Github"
} |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.api.java;
import org.apache.flink.annotation.Public;
import org.apache.flink.annotation.PublicEvolving;
import org.apache.flink.api.common.InvalidProgramException;
import org.apache.flink.api.common.JobExecutionResult;
import org.apache.flink.api.common.accumulators.SerializedListAccumulator;
import org.apache.flink.api.common.functions.FilterFunction;
import org.apache.flink.api.common.functions.FlatMapFunction;
import org.apache.flink.api.common.functions.GroupCombineFunction;
import org.apache.flink.api.common.functions.GroupReduceFunction;
import org.apache.flink.api.common.functions.InvalidTypesException;
import org.apache.flink.api.common.functions.MapFunction;
import org.apache.flink.api.common.functions.MapPartitionFunction;
import org.apache.flink.api.common.functions.Partitioner;
import org.apache.flink.api.common.functions.ReduceFunction;
import org.apache.flink.api.common.io.FileOutputFormat;
import org.apache.flink.api.common.io.OutputFormat;
import org.apache.flink.api.common.operators.Keys;
import org.apache.flink.api.common.operators.Order;
import org.apache.flink.api.common.operators.base.CrossOperatorBase.CrossHint;
import org.apache.flink.api.common.operators.base.JoinOperatorBase.JoinHint;
import org.apache.flink.api.common.operators.base.PartitionOperatorBase.PartitionMethod;
import org.apache.flink.api.common.typeinfo.TypeInformation;
import org.apache.flink.api.common.typeutils.TypeSerializer;
import org.apache.flink.api.java.aggregation.Aggregations;
import org.apache.flink.api.java.functions.FirstReducer;
import org.apache.flink.api.java.functions.FormattingMapper;
import org.apache.flink.api.java.functions.KeySelector;
import org.apache.flink.api.java.functions.SelectByMaxFunction;
import org.apache.flink.api.java.functions.SelectByMinFunction;
import org.apache.flink.api.java.io.CsvOutputFormat;
import org.apache.flink.api.java.io.PrintingOutputFormat;
import org.apache.flink.api.java.io.TextOutputFormat;
import org.apache.flink.api.java.io.TextOutputFormat.TextFormatter;
import org.apache.flink.api.java.operators.AggregateOperator;
import org.apache.flink.api.java.operators.CoGroupOperator;
import org.apache.flink.api.java.operators.CoGroupOperator.CoGroupOperatorSets;
import org.apache.flink.api.java.operators.CrossOperator;
import org.apache.flink.api.java.operators.CustomUnaryOperation;
import org.apache.flink.api.java.operators.DataSink;
import org.apache.flink.api.java.operators.DeltaIteration;
import org.apache.flink.api.java.operators.DistinctOperator;
import org.apache.flink.api.java.operators.FilterOperator;
import org.apache.flink.api.java.operators.FlatMapOperator;
import org.apache.flink.api.java.operators.GroupCombineOperator;
import org.apache.flink.api.java.operators.GroupReduceOperator;
import org.apache.flink.api.java.operators.IterativeDataSet;
import org.apache.flink.api.java.operators.JoinOperator.JoinOperatorSets;
import org.apache.flink.api.java.operators.MapOperator;
import org.apache.flink.api.java.operators.MapPartitionOperator;
import org.apache.flink.api.java.operators.PartitionOperator;
import org.apache.flink.api.java.operators.ProjectOperator;
import org.apache.flink.api.java.operators.ProjectOperator.Projection;
import org.apache.flink.api.java.operators.ReduceOperator;
import org.apache.flink.api.java.operators.SortPartitionOperator;
import org.apache.flink.api.java.operators.SortedGrouping;
import org.apache.flink.api.java.operators.UnionOperator;
import org.apache.flink.api.java.operators.UnsortedGrouping;
import org.apache.flink.api.java.operators.join.JoinOperatorSetsBase;
import org.apache.flink.api.java.operators.join.JoinType;
import org.apache.flink.api.java.tuple.Tuple;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.api.java.typeutils.InputTypeConfigurable;
import org.apache.flink.api.java.typeutils.MissingTypeInfo;
import org.apache.flink.api.java.typeutils.TupleTypeInfo;
import org.apache.flink.api.java.typeutils.TypeExtractor;
import org.apache.flink.core.fs.FileSystem.WriteMode;
import org.apache.flink.core.fs.Path;
import org.apache.flink.util.AbstractID;
import org.apache.flink.util.Preconditions;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
/**
* A DataSet represents a collection of elements of the same type.
*
* <p>A DataSet can be transformed into another DataSet by applying a transformation as for example
* <ul>
* <li>{@link DataSet#map(org.apache.flink.api.common.functions.MapFunction)},</li>
* <li>{@link DataSet#reduce(org.apache.flink.api.common.functions.ReduceFunction)},</li>
* <li>{@link DataSet#join(DataSet)}, or</li>
* <li>{@link DataSet#coGroup(DataSet)}.</li>
* </ul>
*
* @param <T> The type of the DataSet, i.e., the type of the elements of the DataSet.
*/
@Public
public abstract class DataSet<T> {
protected final ExecutionEnvironment context;
// NOTE: the type must not be accessed directly, but only via getType()
private TypeInformation<T> type;
private boolean typeUsed = false;
protected DataSet(ExecutionEnvironment context, TypeInformation<T> typeInfo) {
if (context == null) {
throw new NullPointerException("context is null");
}
if (typeInfo == null) {
throw new NullPointerException("typeInfo is null");
}
this.context = context;
this.type = typeInfo;
}
/**
* Returns the {@link ExecutionEnvironment} in which this DataSet is registered.
*
* @return The ExecutionEnvironment in which this DataSet is registered.
*
* @see ExecutionEnvironment
*/
public ExecutionEnvironment getExecutionEnvironment() {
return this.context;
}
// --------------------------------------------------------------------------------------------
// Type Information handling
// --------------------------------------------------------------------------------------------
/**
* Tries to fill in the type information. Type information can be filled in later when the program uses
* a type hint. This method checks whether the type information has ever been accessed before and does not
* allow modifications if the type was accessed already. This ensures consistency by making sure different
* parts of the operation do not assume different type information.
*
* @param typeInfo The type information to fill in.
*
* @throws IllegalStateException Thrown, if the type information has been accessed before.
*/
protected void fillInType(TypeInformation<T> typeInfo) {
if (typeUsed) {
throw new IllegalStateException("TypeInformation cannot be filled in for the type after it has been used. "
+ "Please make sure that the type info hints are the first call after the transformation function, "
+ "before any access to types or semantic properties, etc.");
}
this.type = typeInfo;
}
/**
* Returns the {@link TypeInformation} for the type of this DataSet.
*
* @return The TypeInformation for the type of this DataSet.
*
* @see TypeInformation
*/
public TypeInformation<T> getType() {
if (type instanceof MissingTypeInfo) {
MissingTypeInfo typeInfo = (MissingTypeInfo) type;
throw new InvalidTypesException("The return type of function '" + typeInfo.getFunctionName()
+ "' could not be determined automatically, due to type erasure. "
+ "You can give type information hints by using the returns(...) method on the result of "
+ "the transformation call, or by letting your function implement the 'ResultTypeQueryable' "
+ "interface.", typeInfo.getTypeException());
}
typeUsed = true;
return this.type;
}
public <F> F clean(F f) {
if (getExecutionEnvironment().getConfig().isClosureCleanerEnabled()) {
ClosureCleaner.clean(f, getExecutionEnvironment().getConfig().getClosureCleanerLevel(), true);
} else {
ClosureCleaner.ensureSerializable(f);
}
return f;
}
// --------------------------------------------------------------------------------------------
// Filter & Transformations
// --------------------------------------------------------------------------------------------
/**
* Applies a Map transformation on this DataSet.
*
* <p>The transformation calls a {@link org.apache.flink.api.common.functions.MapFunction} for each element of the DataSet.
* Each MapFunction call returns exactly one element.
*
* @param mapper The MapFunction that is called for each element of the DataSet.
* @return A MapOperator that represents the transformed DataSet.
*
* @see org.apache.flink.api.common.functions.MapFunction
* @see org.apache.flink.api.common.functions.RichMapFunction
* @see MapOperator
*/
public <R> MapOperator<T, R> map(MapFunction<T, R> mapper) {
if (mapper == null) {
throw new NullPointerException("Map function must not be null.");
}
String callLocation = Utils.getCallLocationName();
TypeInformation<R> resultType = TypeExtractor.getMapReturnTypes(mapper, getType(), callLocation, true);
return new MapOperator<>(this, resultType, clean(mapper), callLocation);
}
/**
* Applies a Map-style operation to the entire partition of the data.
* The function is called once per parallel partition of the data,
* and the entire partition is available through the given Iterator.
* The number of elements that each instance of the MapPartition function
* sees is non deterministic and depends on the parallelism of the operation.
*
* <p>This function is intended for operations that cannot transform individual elements,
* requires no grouping of elements. To transform individual elements,
* the use of {@code map()} and {@code flatMap()} is preferable.
*
* @param mapPartition The MapPartitionFunction that is called for the full DataSet.
* @return A MapPartitionOperator that represents the transformed DataSet.
*
* @see MapPartitionFunction
* @see MapPartitionOperator
*/
public <R> MapPartitionOperator<T, R> mapPartition(MapPartitionFunction<T, R> mapPartition) {
if (mapPartition == null) {
throw new NullPointerException("MapPartition function must not be null.");
}
String callLocation = Utils.getCallLocationName();
TypeInformation<R> resultType = TypeExtractor.getMapPartitionReturnTypes(mapPartition, getType(), callLocation, true);
return new MapPartitionOperator<>(this, resultType, clean(mapPartition), callLocation);
}
/**
* Applies a FlatMap transformation on a {@link DataSet}.
*
* <p>The transformation calls a {@link org.apache.flink.api.common.functions.RichFlatMapFunction} for each element of the DataSet.
* Each FlatMapFunction call can return any number of elements including none.
*
* @param flatMapper The FlatMapFunction that is called for each element of the DataSet.
* @return A FlatMapOperator that represents the transformed DataSet.
*
* @see org.apache.flink.api.common.functions.RichFlatMapFunction
* @see FlatMapOperator
* @see DataSet
*/
public <R> FlatMapOperator<T, R> flatMap(FlatMapFunction<T, R> flatMapper) {
if (flatMapper == null) {
throw new NullPointerException("FlatMap function must not be null.");
}
String callLocation = Utils.getCallLocationName();
TypeInformation<R> resultType = TypeExtractor.getFlatMapReturnTypes(flatMapper, getType(), callLocation, true);
return new FlatMapOperator<>(this, resultType, clean(flatMapper), callLocation);
}
/**
* Applies a Filter transformation on a {@link DataSet}.
*
* <p>The transformation calls a {@link org.apache.flink.api.common.functions.RichFilterFunction} for each element of the DataSet
* and retains only those element for which the function returns true. Elements for
* which the function returns false are filtered.
*
* @param filter The FilterFunction that is called for each element of the DataSet.
* @return A FilterOperator that represents the filtered DataSet.
*
* @see org.apache.flink.api.common.functions.RichFilterFunction
* @see FilterOperator
* @see DataSet
*/
public FilterOperator<T> filter(FilterFunction<T> filter) {
if (filter == null) {
throw new NullPointerException("Filter function must not be null.");
}
return new FilterOperator<>(this, clean(filter), Utils.getCallLocationName());
}
// --------------------------------------------------------------------------------------------
// Projections
// --------------------------------------------------------------------------------------------
/**
* Applies a Project transformation on a {@link Tuple} {@link DataSet}.
*
* <p><b>Note: Only Tuple DataSets can be projected using field indexes.</b>
*
* <p>The transformation projects each Tuple of the DataSet onto a (sub)set of fields.
*
* <p>Additional fields can be added to the projection by calling {@link ProjectOperator#project(int[])}.
*
* <b>Note: With the current implementation, the Project transformation looses type information.</b>
*
* @param fieldIndexes The field indexes of the input tuple that are retained.
* The order of fields in the output tuple corresponds to the order of field indexes.
* @return A ProjectOperator that represents the projected DataSet.
*
* @see Tuple
* @see DataSet
* @see ProjectOperator
*/
public <OUT extends Tuple> ProjectOperator<?, OUT> project(int... fieldIndexes) {
return new Projection<>(this, fieldIndexes).projectTupleX();
}
// --------------------------------------------------------------------------------------------
// Non-grouped aggregations
// --------------------------------------------------------------------------------------------
/**
* Applies an Aggregate transformation on a non-grouped {@link Tuple} {@link DataSet}.
*
* <p><b>Note: Only Tuple DataSets can be aggregated.</b>
* The transformation applies a built-in {@link Aggregations Aggregation} on a specified field
* of a Tuple DataSet. Additional aggregation functions can be added to the resulting
* {@link AggregateOperator} by calling {@link AggregateOperator#and(Aggregations, int)}.
*
* @param agg The built-in aggregation function that is computed.
* @param field The index of the Tuple field on which the aggregation function is applied.
* @return An AggregateOperator that represents the aggregated DataSet.
*
* @see Tuple
* @see Aggregations
* @see AggregateOperator
* @see DataSet
*/
public AggregateOperator<T> aggregate(Aggregations agg, int field) {
return new AggregateOperator<>(this, agg, field, Utils.getCallLocationName());
}
/**
* Syntactic sugar for aggregate (SUM, field).
* @param field The index of the Tuple field on which the aggregation function is applied.
* @return An AggregateOperator that represents the summed DataSet.
*
* @see org.apache.flink.api.java.operators.AggregateOperator
*/
public AggregateOperator<T> sum(int field) {
return aggregate(Aggregations.SUM, field);
}
/**
* Syntactic sugar for {@link #aggregate(Aggregations, int)} using {@link Aggregations#MAX} as
* the aggregation function.
*
* <p><strong>Note:</strong> This operation is not to be confused with {@link #maxBy(int...)},
* which selects one element with maximum value at the specified field positions.
*
* @param field The index of the Tuple field on which the aggregation function is applied.
* @return An AggregateOperator that represents the max'ed DataSet.
*
* @see #aggregate(Aggregations, int)
* @see #maxBy(int...)
*/
public AggregateOperator<T> max(int field) {
return aggregate(Aggregations.MAX, field);
}
/**
* Syntactic sugar for {@link #aggregate(Aggregations, int)} using {@link Aggregations#MIN} as
* the aggregation function.
*
* <p><strong>Note:</strong> This operation is not to be confused with {@link #minBy(int...)},
* which selects one element with the minimum value at the specified field positions.
*
* @param field The index of the Tuple field on which the aggregation function is applied.
* @return An AggregateOperator that represents the min'ed DataSet.
*
* @see #aggregate(Aggregations, int)
* @see #minBy(int...)
*/
public AggregateOperator<T> min(int field) {
return aggregate(Aggregations.MIN, field);
}
/**
* Convenience method to get the count (number of elements) of a DataSet.
*
* @return A long integer that represents the number of elements in the data set.
*/
public long count() throws Exception {
final String id = new AbstractID().toString();
output(new Utils.CountHelper<T>(id)).name("count()");
JobExecutionResult res = getExecutionEnvironment().execute();
return res.<Long> getAccumulatorResult(id);
}
/**
* Convenience method to get the elements of a DataSet as a List.
* As DataSet can contain a lot of data, this method should be used with caution.
*
* @return A List containing the elements of the DataSet
*/
public List<T> collect() throws Exception {
final String id = new AbstractID().toString();
final TypeSerializer<T> serializer = getType().createSerializer(getExecutionEnvironment().getConfig());
this.output(new Utils.CollectHelper<>(id, serializer)).name("collect()");
JobExecutionResult res = getExecutionEnvironment().execute();
ArrayList<byte[]> accResult = res.getAccumulatorResult(id);
if (accResult != null) {
try {
return SerializedListAccumulator.deserializeList(accResult, serializer);
} catch (ClassNotFoundException e) {
throw new RuntimeException("Cannot find type class of collected data type.", e);
} catch (IOException e) {
throw new RuntimeException("Serialization error while deserializing collected data", e);
}
} else {
throw new RuntimeException("The call to collect() could not retrieve the DataSet.");
}
}
/**
* Applies a Reduce transformation on a non-grouped {@link DataSet}.
*
* <p>The transformation consecutively calls a {@link org.apache.flink.api.common.functions.RichReduceFunction}
* until only a single element remains which is the result of the transformation.
* A ReduceFunction combines two elements into one new element of the same type.
*
* @param reducer The ReduceFunction that is applied on the DataSet.
* @return A ReduceOperator that represents the reduced DataSet.
*
* @see org.apache.flink.api.common.functions.RichReduceFunction
* @see ReduceOperator
* @see DataSet
*/
public ReduceOperator<T> reduce(ReduceFunction<T> reducer) {
if (reducer == null) {
throw new NullPointerException("Reduce function must not be null.");
}
return new ReduceOperator<>(this, clean(reducer), Utils.getCallLocationName());
}
/**
* Applies a GroupReduce transformation on a non-grouped {@link DataSet}.
*
* <p>The transformation calls a {@link org.apache.flink.api.common.functions.RichGroupReduceFunction} once with the full DataSet.
* The GroupReduceFunction can iterate over all elements of the DataSet and emit any
* number of output elements including none.
*
* @param reducer The GroupReduceFunction that is applied on the DataSet.
* @return A GroupReduceOperator that represents the reduced DataSet.
*
* @see org.apache.flink.api.common.functions.RichGroupReduceFunction
* @see org.apache.flink.api.java.operators.GroupReduceOperator
* @see DataSet
*/
public <R> GroupReduceOperator<T, R> reduceGroup(GroupReduceFunction<T, R> reducer) {
if (reducer == null) {
throw new NullPointerException("GroupReduce function must not be null.");
}
String callLocation = Utils.getCallLocationName();
TypeInformation<R> resultType = TypeExtractor.getGroupReduceReturnTypes(reducer, getType(), callLocation, true);
return new GroupReduceOperator<>(this, resultType, clean(reducer), callLocation);
}
/**
* Applies a GroupCombineFunction on a non-grouped {@link DataSet}.
* A CombineFunction is similar to a GroupReduceFunction but does not perform a full data exchange. Instead, the
* CombineFunction calls the combine method once per partition for combining a group of results. This
* operator is suitable for combining values into an intermediate format before doing a proper groupReduce where
* the data is shuffled across the node for further reduction. The GroupReduce operator can also be supplied with
* a combiner by implementing the RichGroupReduce function. The combine method of the RichGroupReduce function
* demands input and output type to be the same. The CombineFunction, on the other side, can have an arbitrary
* output type.
* @param combiner The GroupCombineFunction that is applied on the DataSet.
* @return A GroupCombineOperator which represents the combined DataSet.
*/
public <R> GroupCombineOperator<T, R> combineGroup(GroupCombineFunction<T, R> combiner) {
if (combiner == null) {
throw new NullPointerException("GroupCombine function must not be null.");
}
String callLocation = Utils.getCallLocationName();
TypeInformation<R> resultType = TypeExtractor.getGroupCombineReturnTypes(combiner, getType(), callLocation, true);
return new GroupCombineOperator<>(this, resultType, clean(combiner), callLocation);
}
/**
* Selects an element with minimum value.
*
* <p>The minimum is computed over the specified fields in lexicographical order.
*
* <p><strong>Example 1</strong>: Given a data set with elements <code>[0, 1], [1, 0]</code>, the
* results will be:
* <ul>
* <li><code>minBy(0)</code>: <code>[0, 1]</code></li>
* <li><code>minBy(1)</code>: <code>[1, 0]</code></li>
* </ul>
*
* <p><strong>Example 2</strong>: Given a data set with elements <code>[0, 0], [0, 1]</code>, the
* results will be:
* <ul>
* <li><code>minBy(0, 1)</code>: <code>[0, 0]</code></li>
* </ul>
*
* <p>If multiple values with minimum value at the specified fields exist, a random one will be
* picked.
*
* <p>Internally, this operation is implemented as a {@link ReduceFunction}.
*
* @param fields Field positions to compute the minimum over
* @return A {@link ReduceOperator} representing the minimum
*/
@SuppressWarnings({ "unchecked", "rawtypes" })
public ReduceOperator<T> minBy(int... fields) {
if (!getType().isTupleType() || !(getType() instanceof TupleTypeInfo)) {
throw new InvalidProgramException("DataSet#minBy(int...) only works on Tuple types.");
}
return new ReduceOperator<>(this, new SelectByMinFunction(
(TupleTypeInfo) getType(), fields), Utils.getCallLocationName());
}
/**
* Selects an element with maximum value.
*
* <p>The maximum is computed over the specified fields in lexicographical order.
*
* <p><strong>Example 1</strong>: Given a data set with elements <code>[0, 1], [1, 0]</code>, the
* results will be:
* <ul>
* <li><code>maxBy(0)</code>: <code>[1, 0]</code></li>
* <li><code>maxBy(1)</code>: <code>[0, 1]</code></li>
* </ul>
*
* <p><strong>Example 2</strong>: Given a data set with elements <code>[0, 0], [0, 1]</code>, the
* results will be:
* <ul>
* <li><code>maxBy(0, 1)</code>: <code>[0, 1]</code></li>
* </ul>
*
* <p>If multiple values with maximum value at the specified fields exist, a random one will be
* picked.
*
* <p>Internally, this operation is implemented as a {@link ReduceFunction}.
*
* @param fields Field positions to compute the maximum over
* @return A {@link ReduceOperator} representing the maximum
*/
@SuppressWarnings({ "unchecked", "rawtypes" })
public ReduceOperator<T> maxBy(int... fields) {
if (!getType().isTupleType() || !(getType() instanceof TupleTypeInfo)) {
throw new InvalidProgramException("DataSet#maxBy(int...) only works on Tuple types.");
}
return new ReduceOperator<>(this, new SelectByMaxFunction(
(TupleTypeInfo) getType(), fields), Utils.getCallLocationName());
}
/**
* Returns a new set containing the first n elements in this {@link DataSet}.
*
* @param n The desired number of elements.
* @return A ReduceGroupOperator that represents the DataSet containing the elements.
*/
public GroupReduceOperator<T, T> first(int n) {
if (n < 1) {
throw new InvalidProgramException("Parameter n of first(n) must be at least 1.");
}
return reduceGroup(new FirstReducer<T>(n));
}
// --------------------------------------------------------------------------------------------
// distinct
// --------------------------------------------------------------------------------------------
/**
* Returns a distinct set of a {@link DataSet} using a {@link KeySelector} function.
*
* <p>The KeySelector function is called for each element of the DataSet and extracts a single key value on which the
* decision is made if two items are distinct or not.
*
* @param keyExtractor The KeySelector function which extracts the key values from the DataSet on which the
* distinction of the DataSet is decided.
* @return A DistinctOperator that represents the distinct DataSet.
*/
public <K> DistinctOperator<T> distinct(KeySelector<T, K> keyExtractor) {
TypeInformation<K> keyType = TypeExtractor.getKeySelectorTypes(keyExtractor, getType());
return new DistinctOperator<>(this, new Keys.SelectorFunctionKeys<>(keyExtractor, getType(), keyType), Utils.getCallLocationName());
}
/**
* Returns a distinct set of a {@link Tuple} {@link DataSet} using field position keys.
*
* <p>The field position keys specify the fields of Tuples on which the decision is made if two Tuples are distinct or
* not.
*
* <p>Note: Field position keys can only be specified for Tuple DataSets.
*
* @param fields One or more field positions on which the distinction of the DataSet is decided.
* @return A DistinctOperator that represents the distinct DataSet.
*/
public DistinctOperator<T> distinct(int... fields) {
return new DistinctOperator<>(this, new Keys.ExpressionKeys<>(fields, getType()), Utils.getCallLocationName());
}
/**
* Returns a distinct set of a {@link DataSet} using expression keys.
*
* <p>The field expression keys specify the fields of a {@link org.apache.flink.api.common.typeutils.CompositeType}
* (e.g., Tuple or Pojo type) on which the decision is made if two elements are distinct or not.
* In case of a {@link org.apache.flink.api.common.typeinfo.AtomicType}, only the wildcard expression ("*") is valid.
*
* @param fields One or more field expressions on which the distinction of the DataSet is decided.
* @return A DistinctOperator that represents the distinct DataSet.
*/
public DistinctOperator<T> distinct(String... fields) {
return new DistinctOperator<>(this, new Keys.ExpressionKeys<>(fields, getType()), Utils.getCallLocationName());
}
/**
* Returns a distinct set of a {@link DataSet}.
*
* <p>If the input is a {@link org.apache.flink.api.common.typeutils.CompositeType} (Tuple or Pojo type),
* distinct is performed on all fields and each field must be a key type
*
* @return A DistinctOperator that represents the distinct DataSet.
*/
public DistinctOperator<T> distinct() {
return new DistinctOperator<>(this, null, Utils.getCallLocationName());
}
// --------------------------------------------------------------------------------------------
// Grouping
// --------------------------------------------------------------------------------------------
/**
* Groups a {@link DataSet} using a {@link KeySelector} function.
* The KeySelector function is called for each element of the DataSet and extracts a single
* key value on which the DataSet is grouped.
*
* <p>This method returns an {@link UnsortedGrouping} on which one of the following grouping transformation
* can be applied.
* <ul>
* <li>{@link UnsortedGrouping#sortGroup(int, org.apache.flink.api.common.operators.Order)} to get a {@link SortedGrouping}.
* <li>{@link UnsortedGrouping#aggregate(Aggregations, int)} to apply an Aggregate transformation.
* <li>{@link UnsortedGrouping#reduce(org.apache.flink.api.common.functions.ReduceFunction)} to apply a Reduce transformation.
* <li>{@link UnsortedGrouping#reduceGroup(org.apache.flink.api.common.functions.GroupReduceFunction)} to apply a GroupReduce transformation.
* </ul>
*
* @param keyExtractor The KeySelector function which extracts the key values from the DataSet on which it is grouped.
* @return An UnsortedGrouping on which a transformation needs to be applied to obtain a transformed DataSet.
*
* @see KeySelector
* @see UnsortedGrouping
* @see AggregateOperator
* @see ReduceOperator
* @see org.apache.flink.api.java.operators.GroupReduceOperator
* @see DataSet
*/
public <K> UnsortedGrouping<T> groupBy(KeySelector<T, K> keyExtractor) {
TypeInformation<K> keyType = TypeExtractor.getKeySelectorTypes(keyExtractor, getType());
return new UnsortedGrouping<>(this, new Keys.SelectorFunctionKeys<>(clean(keyExtractor), getType(), keyType));
}
/**
* Groups a {@link Tuple} {@link DataSet} using field position keys.
*
* <p><b>Note: Field position keys only be specified for Tuple DataSets.</b>
*
* <p>The field position keys specify the fields of Tuples on which the DataSet is grouped.
* This method returns an {@link UnsortedGrouping} on which one of the following grouping transformation
* can be applied.
* <ul>
* <li>{@link UnsortedGrouping#sortGroup(int, org.apache.flink.api.common.operators.Order)} to get a {@link SortedGrouping}.
* <li>{@link UnsortedGrouping#aggregate(Aggregations, int)} to apply an Aggregate transformation.
* <li>{@link UnsortedGrouping#reduce(org.apache.flink.api.common.functions.ReduceFunction)} to apply a Reduce transformation.
* <li>{@link UnsortedGrouping#reduceGroup(org.apache.flink.api.common.functions.GroupReduceFunction)} to apply a GroupReduce transformation.
* </ul>
*
* @param fields One or more field positions on which the DataSet will be grouped.
* @return A Grouping on which a transformation needs to be applied to obtain a transformed DataSet.
*
* @see Tuple
* @see UnsortedGrouping
* @see AggregateOperator
* @see ReduceOperator
* @see org.apache.flink.api.java.operators.GroupReduceOperator
* @see DataSet
*/
public UnsortedGrouping<T> groupBy(int... fields) {
return new UnsortedGrouping<>(this, new Keys.ExpressionKeys<>(fields, getType()));
}
/**
* Groups a {@link DataSet} using field expressions. A field expression is either the name of a public field
* or a getter method with parentheses of the {@link DataSet}S underlying type. A dot can be used to drill down
* into objects, as in {@code "field1.getInnerField2()" }.
* This method returns an {@link UnsortedGrouping} on which one of the following grouping transformation
* can be applied.
* <ul>
* <li>{@link UnsortedGrouping#sortGroup(int, org.apache.flink.api.common.operators.Order)} to get a {@link SortedGrouping}.
* <li>{@link UnsortedGrouping#aggregate(Aggregations, int)} to apply an Aggregate transformation.
* <li>{@link UnsortedGrouping#reduce(org.apache.flink.api.common.functions.ReduceFunction)} to apply a Reduce transformation.
* <li>{@link UnsortedGrouping#reduceGroup(org.apache.flink.api.common.functions.GroupReduceFunction)} to apply a GroupReduce transformation.
* </ul>
*
* @param fields One or more field expressions on which the DataSet will be grouped.
* @return A Grouping on which a transformation needs to be applied to obtain a transformed DataSet.
*
* @see Tuple
* @see UnsortedGrouping
* @see AggregateOperator
* @see ReduceOperator
* @see org.apache.flink.api.java.operators.GroupReduceOperator
* @see DataSet
*/
public UnsortedGrouping<T> groupBy(String... fields) {
return new UnsortedGrouping<>(this, new Keys.ExpressionKeys<>(fields, getType()));
}
// --------------------------------------------------------------------------------------------
// Joining
// --------------------------------------------------------------------------------------------
/**
* Initiates a Join transformation.
*
* <p>A Join transformation joins the elements of two
* {@link DataSet DataSets} on key equality and provides multiple ways to combine
* joining elements into one DataSet.
*
* <p>This method returns a {@link JoinOperatorSets} on which one of the {@code where} methods
* can be called to define the join key of the first joining (i.e., this) DataSet.
*
* @param other The other DataSet with which this DataSet is joined.
* @return A JoinOperatorSets to continue the definition of the Join transformation.
*
* @see JoinOperatorSets
* @see DataSet
*/
public <R> JoinOperatorSets<T, R> join(DataSet<R> other) {
return new JoinOperatorSets<>(this, other);
}
/**
* Initiates a Join transformation.
*
* <p>A Join transformation joins the elements of two
* {@link DataSet DataSets} on key equality and provides multiple ways to combine
* joining elements into one DataSet.
*
* <p>This method returns a {@link JoinOperatorSets} on which one of the {@code where} methods
* can be called to define the join key of the first joining (i.e., this) DataSet.
*
* @param other The other DataSet with which this DataSet is joined.
* @param strategy The strategy that should be used execute the join. If {@code null} is given, then the
* optimizer will pick the join strategy.
* @return A JoinOperatorSets to continue the definition of the Join transformation.
*
* @see JoinOperatorSets
* @see DataSet
*/
public <R> JoinOperatorSets<T, R> join(DataSet<R> other, JoinHint strategy) {
return new JoinOperatorSets<>(this, other, strategy);
}
/**
* Initiates a Join transformation.
*
* <p>A Join transformation joins the elements of two
* {@link DataSet DataSets} on key equality and provides multiple ways to combine
* joining elements into one DataSet.
*
* <p>This method also gives the hint to the optimizer that the second DataSet to join is much
* smaller than the first one.
*
* <p>This method returns a {@link JoinOperatorSets} on which
* {@link JoinOperatorSets#where(String...)} needs to be called to define the join key of the first
* joining (i.e., this) DataSet.
*
* @param other The other DataSet with which this DataSet is joined.
* @return A JoinOperatorSets to continue the definition of the Join transformation.
*
* @see JoinOperatorSets
* @see DataSet
*/
public <R> JoinOperatorSets<T, R> joinWithTiny(DataSet<R> other) {
return new JoinOperatorSets<>(this, other, JoinHint.BROADCAST_HASH_SECOND);
}
/**
* Initiates a Join transformation.
*
* <p>A Join transformation joins the elements of two
* {@link DataSet DataSets} on key equality and provides multiple ways to combine
* joining elements into one DataSet.
*
* <p>This method also gives the hint to the optimizer that the second DataSet to join is much
* larger than the first one.
*
* <p>This method returns a {@link JoinOperatorSets} on which one of the {@code where} methods
* can be called to define the join key of the first joining (i.e., this) DataSet.
*
* @param other The other DataSet with which this DataSet is joined.
* @return A JoinOperatorSet to continue the definition of the Join transformation.
*
* @see JoinOperatorSets
* @see DataSet
*/
public <R> JoinOperatorSets<T, R> joinWithHuge(DataSet<R> other) {
return new JoinOperatorSets<>(this, other, JoinHint.BROADCAST_HASH_FIRST);
}
/**
* Initiates a Left Outer Join transformation.
*
* <p>An Outer Join transformation joins two elements of two
* {@link DataSet DataSets} on key equality and provides multiple ways to combine
* joining elements into one DataSet.
*
* <p>Elements of the <b>left</b> DataSet (i.e. {@code this}) that do not have a matching
* element on the other side are joined with {@code null} and emitted to the
* resulting DataSet.
*
* @param other The other DataSet with which this DataSet is joined.
* @return A JoinOperatorSet to continue the definition of the Join transformation.
*
* @see org.apache.flink.api.java.operators.join.JoinOperatorSetsBase
* @see DataSet
*/
public <R> JoinOperatorSetsBase<T, R> leftOuterJoin(DataSet<R> other) {
return new JoinOperatorSetsBase<>(this, other, JoinHint.OPTIMIZER_CHOOSES, JoinType.LEFT_OUTER);
}
/**
* Initiates a Left Outer Join transformation.
*
* <p>An Outer Join transformation joins two elements of two
* {@link DataSet DataSets} on key equality and provides multiple ways to combine
* joining elements into one DataSet.
*
* <p>Elements of the <b>left</b> DataSet (i.e. {@code this}) that do not have a matching
* element on the other side are joined with {@code null} and emitted to the
* resulting DataSet.
*
* @param other The other DataSet with which this DataSet is joined.
* @param strategy The strategy that should be used execute the join. If {@code null} is given, then the
* optimizer will pick the join strategy.
* @return A JoinOperatorSet to continue the definition of the Join transformation.
*
* @see org.apache.flink.api.java.operators.join.JoinOperatorSetsBase
* @see DataSet
*/
public <R> JoinOperatorSetsBase<T, R> leftOuterJoin(DataSet<R> other, JoinHint strategy) {
switch(strategy) {
case OPTIMIZER_CHOOSES:
case REPARTITION_SORT_MERGE:
case REPARTITION_HASH_FIRST:
case REPARTITION_HASH_SECOND:
case BROADCAST_HASH_SECOND:
return new JoinOperatorSetsBase<>(this, other, strategy, JoinType.LEFT_OUTER);
default:
throw new InvalidProgramException("Invalid JoinHint for LeftOuterJoin: " + strategy);
}
}
/**
* Initiates a Right Outer Join transformation.
*
* <p>An Outer Join transformation joins two elements of two
* {@link DataSet DataSets} on key equality and provides multiple ways to combine
* joining elements into one DataSet.
*
* <p>Elements of the <b>right</b> DataSet (i.e. {@code other}) that do not have a matching
* element on {@code this} side are joined with {@code null} and emitted to the
* resulting DataSet.
*
* @param other The other DataSet with which this DataSet is joined.
* @return A JoinOperatorSet to continue the definition of the Join transformation.
*
* @see org.apache.flink.api.java.operators.join.JoinOperatorSetsBase
* @see DataSet
*/
public <R> JoinOperatorSetsBase<T, R> rightOuterJoin(DataSet<R> other) {
return new JoinOperatorSetsBase<>(this, other, JoinHint.OPTIMIZER_CHOOSES, JoinType.RIGHT_OUTER);
}
/**
* Initiates a Right Outer Join transformation.
*
* <p>An Outer Join transformation joins two elements of two
* {@link DataSet DataSets} on key equality and provides multiple ways to combine
* joining elements into one DataSet.
*
* <p>Elements of the <b>right</b> DataSet (i.e. {@code other}) that do not have a matching
* element on {@code this} side are joined with {@code null} and emitted to the
* resulting DataSet.
*
* @param other The other DataSet with which this DataSet is joined.
* @param strategy The strategy that should be used execute the join. If {@code null} is given, then the
* optimizer will pick the join strategy.
* @return A JoinOperatorSet to continue the definition of the Join transformation.
*
* @see org.apache.flink.api.java.operators.join.JoinOperatorSetsBase
* @see DataSet
*/
public <R> JoinOperatorSetsBase<T, R> rightOuterJoin(DataSet<R> other, JoinHint strategy) {
switch(strategy) {
case OPTIMIZER_CHOOSES:
case REPARTITION_SORT_MERGE:
case REPARTITION_HASH_FIRST:
case REPARTITION_HASH_SECOND:
case BROADCAST_HASH_FIRST:
return new JoinOperatorSetsBase<>(this, other, strategy, JoinType.RIGHT_OUTER);
default:
throw new InvalidProgramException("Invalid JoinHint for RightOuterJoin: " + strategy);
}
}
/**
* Initiates a Full Outer Join transformation.
*
* <p>An Outer Join transformation joins two elements of two
* {@link DataSet DataSets} on key equality and provides multiple ways to combine
* joining elements into one DataSet.
*
* <p>Elements of <b>both</b> DataSets that do not have a matching
* element on the opposing side are joined with {@code null} and emitted to the
* resulting DataSet.
*
* @param other The other DataSet with which this DataSet is joined.
* @return A JoinOperatorSet to continue the definition of the Join transformation.
*
* @see org.apache.flink.api.java.operators.join.JoinOperatorSetsBase
* @see DataSet
*/
public <R> JoinOperatorSetsBase<T, R> fullOuterJoin(DataSet<R> other) {
return new JoinOperatorSetsBase<>(this, other, JoinHint.OPTIMIZER_CHOOSES, JoinType.FULL_OUTER);
}
/**
* Initiates a Full Outer Join transformation.
*
* <p>An Outer Join transformation joins two elements of two
* {@link DataSet DataSets} on key equality and provides multiple ways to combine
* joining elements into one DataSet.
*
* <p>Elements of <b>both</b> DataSets that do not have a matching
* element on the opposing side are joined with {@code null} and emitted to the
* resulting DataSet.
*
* @param other The other DataSet with which this DataSet is joined.
* @param strategy The strategy that should be used execute the join. If {@code null} is given, then the
* optimizer will pick the join strategy.
* @return A JoinOperatorSet to continue the definition of the Join transformation.
*
* @see org.apache.flink.api.java.operators.join.JoinOperatorSetsBase
* @see DataSet
*/
public <R> JoinOperatorSetsBase<T, R> fullOuterJoin(DataSet<R> other, JoinHint strategy) {
switch(strategy) {
case OPTIMIZER_CHOOSES:
case REPARTITION_SORT_MERGE:
case REPARTITION_HASH_FIRST:
case REPARTITION_HASH_SECOND:
return new JoinOperatorSetsBase<>(this, other, strategy, JoinType.FULL_OUTER);
default:
throw new InvalidProgramException("Invalid JoinHint for FullOuterJoin: " + strategy);
}
}
// --------------------------------------------------------------------------------------------
// Co-Grouping
// --------------------------------------------------------------------------------------------
/**
* Initiates a CoGroup transformation.
*
* <p>A CoGroup transformation combines the elements of
* two {@link DataSet DataSets} into one DataSet. It groups each DataSet individually on a key and
* gives groups of both DataSets with equal keys together into a {@link org.apache.flink.api.common.functions.RichCoGroupFunction}.
* If a DataSet has a group with no matching key in the other DataSet, the CoGroupFunction
* is called with an empty group for the non-existing group.
*
* <p>The CoGroupFunction can iterate over the elements of both groups and return any number
* of elements including none.
*
* <p>This method returns a {@link CoGroupOperatorSets} on which one of the {@code where} methods
* can be called to define the join key of the first joining (i.e., this) DataSet.
*
* @param other The other DataSet of the CoGroup transformation.
* @return A CoGroupOperatorSets to continue the definition of the CoGroup transformation.
*
* @see CoGroupOperatorSets
* @see CoGroupOperator
* @see DataSet
*/
public <R> CoGroupOperator.CoGroupOperatorSets<T, R> coGroup(DataSet<R> other) {
return new CoGroupOperator.CoGroupOperatorSets<>(this, other);
}
// --------------------------------------------------------------------------------------------
// Cross
// --------------------------------------------------------------------------------------------
/**
* Continues a Join transformation and defines the {@link Tuple} fields of the second join
* {@link DataSet} that should be used as join keys.
*
* <p><b>Note: Fields can only be selected as join keys on Tuple DataSets.</b>
*
* <p>The resulting {@link DefaultJoin} wraps each pair of joining elements into a {@link Tuple2}, with
* the element of the first input being the first field of the tuple and the element of the
* second input being the second field of the tuple.
*
* @param fields The indexes of the Tuple fields of the second join DataSet that should be used as keys.
* @return A DefaultJoin that represents the joined DataSet.
*/
/**
* Initiates a Cross transformation.
*
* <p>A Cross transformation combines the elements of two
* {@link DataSet DataSets} into one DataSet. It builds all pair combinations of elements of
* both DataSets, i.e., it builds a Cartesian product.
*
*
* <p>The resulting {@link org.apache.flink.api.java.operators.CrossOperator.DefaultCross} wraps each pair of crossed elements into a {@link Tuple2}, with
* the element of the first input being the first field of the tuple and the element of the
* second input being the second field of the tuple.
*
*
* <p>Call {@link org.apache.flink.api.java.operators.CrossOperator.DefaultCross#with(org.apache.flink.api.common.functions.CrossFunction)} to define a
* {@link org.apache.flink.api.common.functions.CrossFunction} which is called for
* each pair of crossed elements. The CrossFunction returns a exactly one element for each pair of input elements.
*
* @param other The other DataSet with which this DataSet is crossed.
* @return A DefaultCross that returns a Tuple2 for each pair of crossed elements.
*
* @see org.apache.flink.api.java.operators.CrossOperator.DefaultCross
* @see org.apache.flink.api.common.functions.CrossFunction
* @see DataSet
* @see Tuple2
*/
public <R> CrossOperator.DefaultCross<T, R> cross(DataSet<R> other) {
return new CrossOperator.DefaultCross<>(this, other, CrossHint.OPTIMIZER_CHOOSES, Utils.getCallLocationName());
}
/**
* Initiates a Cross transformation.
*
* <p>A Cross transformation combines the elements of two
* {@link DataSet DataSets} into one DataSet. It builds all pair combinations of elements of
* both DataSets, i.e., it builds a Cartesian product.
* This method also gives the hint to the optimizer that the second DataSet to cross is much
* smaller than the first one.
*
*
* <p>The resulting {@link org.apache.flink.api.java.operators.CrossOperator.DefaultCross} wraps each pair of crossed elements into a {@link Tuple2}, with
* the element of the first input being the first field of the tuple and the element of the
* second input being the second field of the tuple.
*
*
* <p>Call {@link org.apache.flink.api.java.operators.CrossOperator.DefaultCross#with(org.apache.flink.api.common.functions.CrossFunction)} to define a
* {@link org.apache.flink.api.common.functions.CrossFunction} which is called for
* each pair of crossed elements. The CrossFunction returns a exactly one element for each pair of input elements.
*
* @param other The other DataSet with which this DataSet is crossed.
* @return A DefaultCross that returns a Tuple2 for each pair of crossed elements.
*
* @see org.apache.flink.api.java.operators.CrossOperator.DefaultCross
* @see org.apache.flink.api.common.functions.CrossFunction
* @see DataSet
* @see Tuple2
*/
public <R> CrossOperator.DefaultCross<T, R> crossWithTiny(DataSet<R> other) {
return new CrossOperator.DefaultCross<>(this, other, CrossHint.SECOND_IS_SMALL, Utils.getCallLocationName());
}
/**
* Initiates a Cross transformation.
*
* <p>A Cross transformation combines the elements of two
* {@link DataSet DataSets} into one DataSet. It builds all pair combinations of elements of
* both DataSets, i.e., it builds a Cartesian product.
* This method also gives the hint to the optimizer that the second DataSet to cross is much
* larger than the first one.
*
*
* <p>The resulting {@link org.apache.flink.api.java.operators.CrossOperator.DefaultCross} wraps each pair of crossed elements into a {@link Tuple2}, with
* the element of the first input being the first field of the tuple and the element of the
* second input being the second field of the tuple.
*
*
* <p>Call {@link org.apache.flink.api.java.operators.CrossOperator.DefaultCross#with(org.apache.flink.api.common.functions.CrossFunction)} to define a
* {@link org.apache.flink.api.common.functions.CrossFunction} which is called for
* each pair of crossed elements. The CrossFunction returns a exactly one element for each pair of input elements.
*
* @param other The other DataSet with which this DataSet is crossed.
* @return A DefaultCross that returns a Tuple2 for each pair of crossed elements.
*
* @see org.apache.flink.api.java.operators.CrossOperator.DefaultCross
* @see org.apache.flink.api.common.functions.CrossFunction
* @see DataSet
* @see Tuple2
*/
public <R> CrossOperator.DefaultCross<T, R> crossWithHuge(DataSet<R> other) {
return new CrossOperator.DefaultCross<>(this, other, CrossHint.FIRST_IS_SMALL, Utils.getCallLocationName());
}
// --------------------------------------------------------------------------------------------
// Iterations
// --------------------------------------------------------------------------------------------
/**
* Initiates an iterative part of the program that executes multiple times and feeds back data sets.
* The iterative part needs to be closed by calling {@link org.apache.flink.api.java.operators.IterativeDataSet#closeWith(DataSet)}. The data set
* given to the {@code closeWith(DataSet)} method is the data set that will be fed back and used as the input
* to the next iteration. The return value of the {@code closeWith(DataSet)} method is the resulting
* data set after the iteration has terminated.
*
* <p>An example of an iterative computation is as follows:
*
* <pre>
* {@code
* DataSet<Double> input = ...;
*
* DataSet<Double> startOfIteration = input.iterate(10);
* DataSet<Double> toBeFedBack = startOfIteration
* .map(new MyMapper())
* .groupBy(...).reduceGroup(new MyReducer());
* DataSet<Double> result = startOfIteration.closeWith(toBeFedBack);
* }
* </pre>
*
* <p>The iteration has a maximum number of times that it executes. A dynamic termination can be realized by using a
* termination criterion (see {@link org.apache.flink.api.java.operators.IterativeDataSet#closeWith(DataSet, DataSet)}).
*
* @param maxIterations The maximum number of times that the iteration is executed.
* @return An IterativeDataSet that marks the start of the iterative part and needs to be closed by
* {@link org.apache.flink.api.java.operators.IterativeDataSet#closeWith(DataSet)}.
*
* @see org.apache.flink.api.java.operators.IterativeDataSet
*/
public IterativeDataSet<T> iterate(int maxIterations) {
return new IterativeDataSet<>(getExecutionEnvironment(), getType(), this, maxIterations);
}
/**
* Initiates a delta iteration. A delta iteration is similar to a regular iteration (as started by {@link #iterate(int)},
* but maintains state across the individual iteration steps. The Solution set, which represents the current state
* at the beginning of each iteration can be obtained via {@link org.apache.flink.api.java.operators.DeltaIteration#getSolutionSet()} ()}.
* It can be be accessed by joining (or CoGrouping) with it. The DataSet that represents the workset of an iteration
* can be obtained via {@link org.apache.flink.api.java.operators.DeltaIteration#getWorkset()}.
* The solution set is updated by producing a delta for it, which is merged into the solution set at the end of each
* iteration step.
*
* <p>The delta iteration must be closed by calling {@link org.apache.flink.api.java.operators.DeltaIteration#closeWith(DataSet, DataSet)}. The two
* parameters are the delta for the solution set and the new workset (the data set that will be fed back).
* The return value of the {@code closeWith(DataSet, DataSet)} method is the resulting
* data set after the iteration has terminated. Delta iterations terminate when the feed back data set
* (the workset) is empty. In addition, a maximum number of steps is given as a fall back termination guard.
*
* <p>Elements in the solution set are uniquely identified by a key. When merging the solution set delta, contained elements
* with the same key are replaced.
*
* <p><b>NOTE:</b> Delta iterations currently support only tuple valued data types. This restriction
* will be removed in the future. The key is specified by the tuple position.
*
* <p>A code example for a delta iteration is as follows
* <pre>
* {@code
* DeltaIteration<Tuple2<Long, Long>, Tuple2<Long, Long>> iteration =
* initialState.iterateDelta(initialFeedbackSet, 100, 0);
*
* DataSet<Tuple2<Long, Long>> delta = iteration.groupBy(0).aggregate(Aggregations.AVG, 1)
* .join(iteration.getSolutionSet()).where(0).equalTo(0)
* .flatMap(new ProjectAndFilter());
*
* DataSet<Tuple2<Long, Long>> feedBack = delta.join(someOtherSet).where(...).equalTo(...).with(...);
*
* // close the delta iteration (delta and new workset are identical)
* DataSet<Tuple2<Long, Long>> result = iteration.closeWith(delta, feedBack);
* }
* </pre>
*
* @param workset The initial version of the data set that is fed back to the next iteration step (the workset).
* @param maxIterations The maximum number of iteration steps, as a fall back safeguard.
* @param keyPositions The position of the tuple fields that is used as the key of the solution set.
*
* @return The DeltaIteration that marks the start of a delta iteration.
*
* @see org.apache.flink.api.java.operators.DeltaIteration
*/
public <R> DeltaIteration<T, R> iterateDelta(DataSet<R> workset, int maxIterations, int... keyPositions) {
Preconditions.checkNotNull(workset);
Preconditions.checkNotNull(keyPositions);
Keys.ExpressionKeys<T> keys = new Keys.ExpressionKeys<>(keyPositions, getType());
return new DeltaIteration<>(getExecutionEnvironment(), getType(), this, workset, keys, maxIterations);
}
// --------------------------------------------------------------------------------------------
// Custom Operators
// -------------------------------------------------------------------------------------------
/**
* Runs a {@link CustomUnaryOperation} on the data set. Custom operations are typically complex
* operators that are composed of multiple steps.
*
* @param operation The operation to run.
* @return The data set produced by the operation.
*/
public <X> DataSet<X> runOperation(CustomUnaryOperation<T, X> operation) {
Preconditions.checkNotNull(operation, "The custom operator must not be null.");
operation.setInput(this);
return operation.createResult();
}
// --------------------------------------------------------------------------------------------
// Union
// --------------------------------------------------------------------------------------------
/**
* Creates a union of this DataSet with an other DataSet. The other DataSet must be of the same data type.
*
* @param other The other DataSet which is unioned with the current DataSet.
* @return The resulting DataSet.
*/
public UnionOperator<T> union(DataSet<T> other){
return new UnionOperator<>(this, other, Utils.getCallLocationName());
}
// --------------------------------------------------------------------------------------------
// Partitioning
// --------------------------------------------------------------------------------------------
/**
* Hash-partitions a DataSet on the specified key fields.
*
* <p><b>Important:</b>This operation shuffles the whole DataSet over the network and can take significant amount of time.
*
* @param fields The field indexes on which the DataSet is hash-partitioned.
* @return The partitioned DataSet.
*/
public PartitionOperator<T> partitionByHash(int... fields) {
return new PartitionOperator<>(this, PartitionMethod.HASH, new Keys.ExpressionKeys<>(fields, getType()), Utils.getCallLocationName());
}
/**
* Hash-partitions a DataSet on the specified key fields.
*
* <p><b>Important:</b>This operation shuffles the whole DataSet over the network and can take significant amount of time.
*
* @param fields The field expressions on which the DataSet is hash-partitioned.
* @return The partitioned DataSet.
*/
public PartitionOperator<T> partitionByHash(String... fields) {
return new PartitionOperator<>(this, PartitionMethod.HASH, new Keys.ExpressionKeys<>(fields, getType()), Utils.getCallLocationName());
}
/**
* Partitions a DataSet using the specified KeySelector.
*
* <p><b>Important:</b>This operation shuffles the whole DataSet over the network and can take significant amount of time.
*
* @param keyExtractor The KeyExtractor with which the DataSet is hash-partitioned.
* @return The partitioned DataSet.
*
* @see KeySelector
*/
public <K extends Comparable<K>> PartitionOperator<T> partitionByHash(KeySelector<T, K> keyExtractor) {
final TypeInformation<K> keyType = TypeExtractor.getKeySelectorTypes(keyExtractor, getType());
return new PartitionOperator<>(this, PartitionMethod.HASH, new Keys.SelectorFunctionKeys<>(clean(keyExtractor), this.getType(), keyType), Utils.getCallLocationName());
}
/**
* Range-partitions a DataSet on the specified key fields.
*
* <p><b>Important:</b>This operation requires an extra pass over the DataSet to compute the range boundaries and
* shuffles the whole DataSet over the network. This can take significant amount of time.
*
* @param fields The field indexes on which the DataSet is range-partitioned.
* @return The partitioned DataSet.
*/
public PartitionOperator<T> partitionByRange(int... fields) {
return new PartitionOperator<>(this, PartitionMethod.RANGE, new Keys.ExpressionKeys<>(fields, getType()), Utils.getCallLocationName());
}
/**
* Range-partitions a DataSet on the specified key fields.
*
* <p><b>Important:</b>This operation requires an extra pass over the DataSet to compute the range boundaries and
* shuffles the whole DataSet over the network. This can take significant amount of time.
*
* @param fields The field expressions on which the DataSet is range-partitioned.
* @return The partitioned DataSet.
*/
public PartitionOperator<T> partitionByRange(String... fields) {
return new PartitionOperator<>(this, PartitionMethod.RANGE, new Keys.ExpressionKeys<>(fields, getType()), Utils.getCallLocationName());
}
/**
* Range-partitions a DataSet using the specified KeySelector.
*
* <p><b>Important:</b>This operation requires an extra pass over the DataSet to compute the range boundaries and
* shuffles the whole DataSet over the network. This can take significant amount of time.
*
* @param keyExtractor The KeyExtractor with which the DataSet is range-partitioned.
* @return The partitioned DataSet.
*
* @see KeySelector
*/
public <K extends Comparable<K>> PartitionOperator<T> partitionByRange(KeySelector<T, K> keyExtractor) {
final TypeInformation<K> keyType = TypeExtractor.getKeySelectorTypes(keyExtractor, getType());
return new PartitionOperator<>(this, PartitionMethod.RANGE, new Keys.SelectorFunctionKeys<>(clean(keyExtractor), this.getType(), keyType), Utils.getCallLocationName());
}
/**
* Partitions a tuple DataSet on the specified key fields using a custom partitioner.
* This method takes the key position to partition on, and a partitioner that accepts the key type.
*
* <p>Note: This method works only on single field keys.
*
* @param partitioner The partitioner to assign partitions to keys.
* @param field The field index on which the DataSet is to partitioned.
* @return The partitioned DataSet.
*/
public <K> PartitionOperator<T> partitionCustom(Partitioner<K> partitioner, int field) {
return new PartitionOperator<>(this, new Keys.ExpressionKeys<>(new int[] {field}, getType()), clean(partitioner), Utils.getCallLocationName());
}
/**
* Partitions a POJO DataSet on the specified key fields using a custom partitioner.
* This method takes the key expression to partition on, and a partitioner that accepts the key type.
*
* <p>Note: This method works only on single field keys.
*
* @param partitioner The partitioner to assign partitions to keys.
* @param field The field index on which the DataSet is to partitioned.
* @return The partitioned DataSet.
*/
public <K> PartitionOperator<T> partitionCustom(Partitioner<K> partitioner, String field) {
return new PartitionOperator<>(this, new Keys.ExpressionKeys<>(new String[] {field}, getType()), clean(partitioner), Utils.getCallLocationName());
}
/**
* Partitions a DataSet on the key returned by the selector, using a custom partitioner.
* This method takes the key selector to get the key to partition on, and a partitioner that
* accepts the key type.
*
* <p>Note: This method works only on single field keys, i.e. the selector cannot return tuples
* of fields.
*
* @param partitioner The partitioner to assign partitions to keys.
* @param keyExtractor The KeyExtractor with which the DataSet is partitioned.
* @return The partitioned DataSet.
*
* @see KeySelector
*/
public <K extends Comparable<K>> PartitionOperator<T> partitionCustom(Partitioner<K> partitioner, KeySelector<T, K> keyExtractor) {
final TypeInformation<K> keyType = TypeExtractor.getKeySelectorTypes(keyExtractor, getType());
return new PartitionOperator<>(this, new Keys.SelectorFunctionKeys<>(keyExtractor, getType(), keyType), clean(partitioner), Utils.getCallLocationName());
}
/**
* Enforces a re-balancing of the DataSet, i.e., the DataSet is evenly distributed over all parallel instances of the
* following task. This can help to improve performance in case of heavy data skew and compute intensive operations.
*
* <p><b>Important:</b>This operation shuffles the whole DataSet over the network and can take significant amount of time.
*
* @return The re-balanced DataSet.
*/
public PartitionOperator<T> rebalance() {
return new PartitionOperator<>(this, PartitionMethod.REBALANCE, Utils.getCallLocationName());
}
// --------------------------------------------------------------------------------------------
// Sorting
// --------------------------------------------------------------------------------------------
/**
* Locally sorts the partitions of the DataSet on the specified field in the specified order.
* DataSet can be sorted on multiple fields by chaining sortPartition() calls.
*
* @param field The field index on which the DataSet is sorted.
* @param order The order in which the DataSet is sorted.
* @return The DataSet with sorted local partitions.
*/
public SortPartitionOperator<T> sortPartition(int field, Order order) {
return new SortPartitionOperator<>(this, field, order, Utils.getCallLocationName());
}
/**
* Locally sorts the partitions of the DataSet on the specified field in the specified order.
* DataSet can be sorted on multiple fields by chaining sortPartition() calls.
*
* @param field The field expression referring to the field on which the DataSet is sorted.
* @param order The order in which the DataSet is sorted.
* @return The DataSet with sorted local partitions.
*/
public SortPartitionOperator<T> sortPartition(String field, Order order) {
return new SortPartitionOperator<>(this, field, order, Utils.getCallLocationName());
}
/**
* Locally sorts the partitions of the DataSet on the extracted key in the specified order.
* The DataSet can be sorted on multiple values by returning a tuple from the KeySelector.
*
* <p>Note that no additional sort keys can be appended to a KeySelector sort keys. To sort
* the partitions by multiple values using KeySelector, the KeySelector must return a tuple
* consisting of the values.
*
* @param keyExtractor The KeySelector function which extracts the key values from the DataSet
* on which the DataSet is sorted.
* @param order The order in which the DataSet is sorted.
* @return The DataSet with sorted local partitions.
*/
public <K> SortPartitionOperator<T> sortPartition(KeySelector<T, K> keyExtractor, Order order) {
final TypeInformation<K> keyType = TypeExtractor.getKeySelectorTypes(keyExtractor, getType());
return new SortPartitionOperator<>(this, new Keys.SelectorFunctionKeys<>(clean(keyExtractor), getType(), keyType), order, Utils.getCallLocationName());
}
// --------------------------------------------------------------------------------------------
// Top-K
// --------------------------------------------------------------------------------------------
// --------------------------------------------------------------------------------------------
// Result writing
// --------------------------------------------------------------------------------------------
/**
* Writes a DataSet as text file(s) to the specified location.
*
* <p>For each element of the DataSet the result of {@link Object#toString()} is written.<br/>
* <br/>
* <span class="strong">Output files and directories</span><br/>
* What output how writeAsText() method produces is depending on other circumstance
* <ul>
* <li>
* A directory is created and multiple files are written underneath. (Default behavior)<br/>
* This sink creates a directory called "path1", and files "1", "2" ... are writen underneath depending on <a href="https://flink.apache.org/faq.html#what-is-the-parallelism-how-do-i-set-it">parallelism</a>
* <pre>{@code .
* └── path1/
* ├── 1
* ├── 2
* └── ...}</pre>
* Code Example
* <pre>{@code dataset.writeAsText("file:///path1");}</pre>
* </li>
* <li>
* A single file called "path1" is created when parallelism is set to 1
* <pre>{@code .
* └── path1 }</pre>
* Code Example
* <pre>{@code // Parallelism is set to only this particular operation
*dataset.writeAsText("file:///path1").setParallelism(1);
*
* // This will creates the same effect but note all operators' parallelism are set to one
*env.setParallelism(1);
*...
*dataset.writeAsText("file:///path1"); }</pre>
* </li>
* <li>
* A directory is always created when <a href="https://ci.apache.org/projects/flink/flink-docs-master/setup/config.html#file-systems">fs.output.always-create-directory</a>
* is set to true in flink-conf.yaml file, even when parallelism is set to 1.
* <pre>{@code .
* └── path1/
* └── 1 }</pre>
* Code Example
* <pre>{@code // fs.output.always-create-directory = true
*dataset.writeAsText("file:///path1").setParallelism(1); }</pre>
* </li>
* </ul>
*
* @param filePath The path pointing to the location the text file or files under the directory is written to.
* @return The DataSink that writes the DataSet.
*
* @see TextOutputFormat
*/
public DataSink<T> writeAsText(String filePath) {
return output(new TextOutputFormat<T>(new Path(filePath)));
}
/**
* Writes a DataSet as text file(s) to the specified location.
*
* <p>For each element of the DataSet the result of {@link Object#toString()} is written.
*
* @param filePath The path pointing to the location the text file is written to.
* @param writeMode Control the behavior for existing files. Options are NO_OVERWRITE and OVERWRITE.
* @return The DataSink that writes the DataSet.
*
* @see TextOutputFormat
* @see DataSet#writeAsText(String) Output files and directories
*/
public DataSink<T> writeAsText(String filePath, WriteMode writeMode) {
TextOutputFormat<T> tof = new TextOutputFormat<>(new Path(filePath));
tof.setWriteMode(writeMode);
return output(tof);
}
/**
* Writes a DataSet as text file(s) to the specified location.
*
* <p>For each element of the DataSet the result of {@link TextFormatter#format(Object)} is written.
*
* @param filePath The path pointing to the location the text file is written to.
* @param formatter formatter that is applied on every element of the DataSet.
* @return The DataSink that writes the DataSet.
*
* @see TextOutputFormat
* @see DataSet#writeAsText(String) Output files and directories
*/
public DataSink<String> writeAsFormattedText(String filePath, TextFormatter<T> formatter) {
return map(new FormattingMapper<>(clean(formatter))).writeAsText(filePath);
}
/**
* Writes a DataSet as text file(s) to the specified location.
*
* <p>For each element of the DataSet the result of {@link TextFormatter#format(Object)} is written.
*
* @param filePath The path pointing to the location the text file is written to.
* @param writeMode Control the behavior for existing files. Options are NO_OVERWRITE and OVERWRITE.
* @param formatter formatter that is applied on every element of the DataSet.
* @return The DataSink that writes the DataSet.
*
* @see TextOutputFormat
* @see DataSet#writeAsText(String) Output files and directories
*/
public DataSink<String> writeAsFormattedText(String filePath, WriteMode writeMode, TextFormatter<T> formatter) {
return map(new FormattingMapper<>(clean(formatter))).writeAsText(filePath, writeMode);
}
/**
* Writes a {@link Tuple} DataSet as CSV file(s) to the specified location.
*
* <p><b>Note: Only a Tuple DataSet can written as a CSV file.</b>
*
* <p>For each Tuple field the result of {@link Object#toString()} is written.
* Tuple fields are separated by the default field delimiter {@code "comma" (,)}.
*
* <p>Tuples are are separated by the newline character ({@code \n}).
*
* @param filePath The path pointing to the location the CSV file is written to.
* @return The DataSink that writes the DataSet.
*
* @see Tuple
* @see CsvOutputFormat
* @see DataSet#writeAsText(String) Output files and directories
*/
public DataSink<T> writeAsCsv(String filePath) {
return writeAsCsv(filePath, CsvOutputFormat.DEFAULT_LINE_DELIMITER, CsvOutputFormat.DEFAULT_FIELD_DELIMITER);
}
/**
* Writes a {@link Tuple} DataSet as CSV file(s) to the specified location.
*
* <p><b>Note: Only a Tuple DataSet can written as a CSV file.</b>
*
* <p>For each Tuple field the result of {@link Object#toString()} is written.
* Tuple fields are separated by the default field delimiter {@code "comma" (,)}.
*
* <p>Tuples are are separated by the newline character ({@code \n}).
*
* @param filePath The path pointing to the location the CSV file is written to.
* @param writeMode The behavior regarding existing files. Options are NO_OVERWRITE and OVERWRITE.
* @return The DataSink that writes the DataSet.
*
* @see Tuple
* @see CsvOutputFormat
* @see DataSet#writeAsText(String) Output files and directories
*/
public DataSink<T> writeAsCsv(String filePath, WriteMode writeMode) {
return internalWriteAsCsv(new Path(filePath), CsvOutputFormat.DEFAULT_LINE_DELIMITER, CsvOutputFormat.DEFAULT_FIELD_DELIMITER, writeMode);
}
/**
* Writes a {@link Tuple} DataSet as CSV file(s) to the specified location with the specified field and line delimiters.
*
* <p><b>Note: Only a Tuple DataSet can written as a CSV file.</b>
*
* <p>For each Tuple field the result of {@link Object#toString()} is written.
*
* @param filePath The path pointing to the location the CSV file is written to.
* @param rowDelimiter The row delimiter to separate Tuples.
* @param fieldDelimiter The field delimiter to separate Tuple fields.
*
* @see Tuple
* @see CsvOutputFormat
* @see DataSet#writeAsText(String) Output files and directories
*/
public DataSink<T> writeAsCsv(String filePath, String rowDelimiter, String fieldDelimiter) {
return internalWriteAsCsv(new Path(filePath), rowDelimiter, fieldDelimiter, null);
}
/**
* Writes a {@link Tuple} DataSet as CSV file(s) to the specified location with the specified field and line delimiters.
*
* <p><b>Note: Only a Tuple DataSet can written as a CSV file.</b>
* For each Tuple field the result of {@link Object#toString()} is written.
*
* @param filePath The path pointing to the location the CSV file is written to.
* @param rowDelimiter The row delimiter to separate Tuples.
* @param fieldDelimiter The field delimiter to separate Tuple fields.
* @param writeMode The behavior regarding existing files. Options are NO_OVERWRITE and OVERWRITE.
*
* @see Tuple
* @see CsvOutputFormat
* @see DataSet#writeAsText(String) Output files and directories
*/
public DataSink<T> writeAsCsv(String filePath, String rowDelimiter, String fieldDelimiter, WriteMode writeMode) {
return internalWriteAsCsv(new Path(filePath), rowDelimiter, fieldDelimiter, writeMode);
}
@SuppressWarnings("unchecked")
private <X extends Tuple> DataSink<T> internalWriteAsCsv(Path filePath, String rowDelimiter, String fieldDelimiter, WriteMode wm) {
Preconditions.checkArgument(getType().isTupleType(), "The writeAsCsv() method can only be used on data sets of tuples.");
CsvOutputFormat<X> of = new CsvOutputFormat<>(filePath, rowDelimiter, fieldDelimiter);
if (wm != null) {
of.setWriteMode(wm);
}
return output((OutputFormat<T>) of);
}
/**
* Prints the elements in a DataSet to the standard output stream {@link System#out} of the JVM that calls
* the print() method. For programs that are executed in a cluster, this method needs
* to gather the contents of the DataSet back to the client, to print it there.
*
* <p>The string written for each element is defined by the {@link Object#toString()} method.
*
* <p>This method immediately triggers the program execution, similar to the
* {@link #collect()} and {@link #count()} methods.
*
* @see #printToErr()
* @see #printOnTaskManager(String)
*/
public void print() throws Exception {
List<T> elements = collect();
for (T e: elements) {
System.out.println(e);
}
}
/**
* Prints the elements in a DataSet to the standard error stream {@link System#err} of the JVM that calls
* the print() method. For programs that are executed in a cluster, this method needs
* to gather the contents of the DataSet back to the client, to print it there.
*
* <p>The string written for each element is defined by the {@link Object#toString()} method.
*
* <p>This method immediately triggers the program execution, similar to the
* {@link #collect()} and {@link #count()} methods.
*
* @see #print()
* @see #printOnTaskManager(String)
*/
public void printToErr() throws Exception {
List<T> elements = collect();
for (T e: elements) {
System.err.println(e);
}
}
/**
* Writes a DataSet to the standard output streams (stdout) of the TaskManagers that execute
* the program (or more specifically, the data sink operators). On a typical cluster setup, the
* data will appear in the TaskManagers' <i>.out</i> files.
*
* <p>To print the data to the console or stdout stream of the client process instead, use the
* {@link #print()} method.
*
* <p>For each element of the DataSet the result of {@link Object#toString()} is written.
*
* @param prefix The string to prefix each line of the output with. This helps identifying outputs
* from different printing sinks.
* @return The DataSink operator that writes the DataSet.
*
* @see #print()
*/
public DataSink<T> printOnTaskManager(String prefix) {
return output(new PrintingOutputFormat<T>(prefix, false));
}
/**
* Writes a DataSet to the standard output stream (stdout).
*
* <p>For each element of the DataSet the result of {@link Object#toString()} is written.
*
* @param sinkIdentifier The string to prefix the output with.
* @return The DataSink that writes the DataSet.
*
* @deprecated Use {@link #printOnTaskManager(String)} instead.
*/
@Deprecated
@PublicEvolving
public DataSink<T> print(String sinkIdentifier) {
return output(new PrintingOutputFormat<T>(sinkIdentifier, false));
}
/**
* Writes a DataSet to the standard error stream (stderr).
*
* <p>For each element of the DataSet the result of {@link Object#toString()} is written.
*
* @param sinkIdentifier The string to prefix the output with.
* @return The DataSink that writes the DataSet.
*
* @deprecated Use {@link #printOnTaskManager(String)} instead, or the {@link PrintingOutputFormat}.
*/
@Deprecated
@PublicEvolving
public DataSink<T> printToErr(String sinkIdentifier) {
return output(new PrintingOutputFormat<T>(sinkIdentifier, true));
}
/**
* Writes a DataSet using a {@link FileOutputFormat} to a specified location.
* This method adds a data sink to the program.
*
* @param outputFormat The FileOutputFormat to write the DataSet.
* @param filePath The path to the location where the DataSet is written.
* @return The DataSink that writes the DataSet.
*
* @see FileOutputFormat
*/
public DataSink<T> write(FileOutputFormat<T> outputFormat, String filePath) {
Preconditions.checkNotNull(filePath, "File path must not be null.");
Preconditions.checkNotNull(outputFormat, "Output format must not be null.");
outputFormat.setOutputFilePath(new Path(filePath));
return output(outputFormat);
}
/**
* Writes a DataSet using a {@link FileOutputFormat} to a specified location.
* This method adds a data sink to the program.
*
* @param outputFormat The FileOutputFormat to write the DataSet.
* @param filePath The path to the location where the DataSet is written.
* @param writeMode The mode of writing, indicating whether to overwrite existing files.
* @return The DataSink that writes the DataSet.
*
* @see FileOutputFormat
*/
public DataSink<T> write(FileOutputFormat<T> outputFormat, String filePath, WriteMode writeMode) {
Preconditions.checkNotNull(filePath, "File path must not be null.");
Preconditions.checkNotNull(writeMode, "Write mode must not be null.");
Preconditions.checkNotNull(outputFormat, "Output format must not be null.");
outputFormat.setOutputFilePath(new Path(filePath));
outputFormat.setWriteMode(writeMode);
return output(outputFormat);
}
/**
* Emits a DataSet using an {@link OutputFormat}. This method adds a data sink to the program.
* Programs may have multiple data sinks. A DataSet may also have multiple consumers (data sinks
* or transformations) at the same time.
*
* @param outputFormat The OutputFormat to process the DataSet.
* @return The DataSink that processes the DataSet.
*
* @see OutputFormat
* @see DataSink
*/
public DataSink<T> output(OutputFormat<T> outputFormat) {
Preconditions.checkNotNull(outputFormat);
// configure the type if needed
if (outputFormat instanceof InputTypeConfigurable) {
((InputTypeConfigurable) outputFormat).setInputType(getType(), context.getConfig());
}
DataSink<T> sink = new DataSink<>(this, outputFormat, getType());
this.context.registerDataSink(sink);
return sink;
}
// --------------------------------------------------------------------------------------------
// Utilities
// --------------------------------------------------------------------------------------------
protected static void checkSameExecutionContext(DataSet<?> set1, DataSet<?> set2) {
if (set1.getExecutionEnvironment() != set2.getExecutionEnvironment()) {
throw new IllegalArgumentException("The two inputs have different execution contexts.");
}
}
}
| {
"pile_set_name": "Github"
} |
# Event 376 - Debugger_Intellisense_Menu_Filter
###### Version: 1
## Description
None
## Data Dictionary
|Standard Name|Field Name|Type|Description|Sample Value|
|---|---|---|---|---|
|TBD|Key|UInt32|None|`None`|
|TBD|TraceDescription|UnicodeString|None|`None`|
## Tags
* etw_level_Informational
* etw_opcode_Start
* etw_task_Debugger_Intellisense_Menu_Filter | {
"pile_set_name": "Github"
} |
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE article PUBLIC "-//Boost//DTD BoostBook XML V1.0//EN" "http://www.boost.org/tools/boostbook/dtd/boostbook.dtd">
<article id="callout_tests" last-revision="DEBUG MODE Date: 2000/12/20 12:00:00 $"
xmlns:xi="http://www.w3.org/2001/XInclude">
<title>Callout Tests</title>
<para>
Example 1:
</para>
<para>
Now we can define a function that simulates an ordinary six-sided die.
</para>
<para>
<programlisting><phrase role="keyword">int</phrase> <phrase role="identifier">roll_die</phrase><phrase role="special">()</phrase> <phrase role="special">{</phrase>
<phrase role="identifier">boost</phrase><phrase role="special">::</phrase><phrase role="identifier">uniform_int</phrase><phrase role="special"><></phrase> <phrase role="identifier">dist</phrase><phrase role="special">(</phrase><phrase role="number">1</phrase><phrase role="special">,</phrase> <phrase role="number">6</phrase><phrase role="special">);</phrase> <co id="callout_tests.c0" linkends="callout_tests.c1" />
<phrase role="special">}</phrase>
</programlisting>
</para>
<calloutlist>
<callout arearefs="callout_tests.c0" id="callout_tests.c1">
<para>
create a uniform_int distribution
</para>
</callout>
</calloutlist>
<para>
Example 2:
</para>
<para>
<programlisting><phrase role="keyword">int</phrase> <phrase role="identifier">roll_die</phrase><phrase role="special">()</phrase> <phrase role="special">{</phrase>
<co id="callout_tests.c2" linkends="callout_tests.c3" /><phrase role="identifier">boost</phrase><phrase role="special">::</phrase><phrase role="identifier">variate_generator</phrase><phrase role="special"><</phrase><phrase role="identifier">boost</phrase><phrase role="special">::</phrase><phrase role="identifier">mt19937</phrase><phrase role="special">&,</phrase> <phrase role="identifier">boost</phrase><phrase role="special">::</phrase><phrase role="identifier">uniform_int</phrase><phrase role="special"><></phrase> <phrase role="special">></phrase> <phrase role="identifier">die</phrase><phrase role="special">(</phrase><phrase role="identifier">gen</phrase><phrase role="special">,</phrase> <phrase role="identifier">dist</phrase><phrase role="special">);</phrase>
<phrase role="special">}</phrase>
</programlisting>
</para>
<calloutlist>
<callout arearefs="callout_tests.c2" id="callout_tests.c3">
<important>
<para>
test
</para>
</important>
</callout>
</calloutlist>
<para>
Example 3:
</para>
<para>
<programlisting><phrase role="keyword">int</phrase> <phrase role="identifier">roll_die</phrase><phrase role="special">()</phrase> <phrase role="special">{</phrase>
<co id="callout_tests.c4" linkends="callout_tests.c5" /><phrase role="identifier">boost</phrase><phrase role="special">::</phrase><phrase role="identifier">variate_generator</phrase><phrase role="special"><</phrase><phrase role="identifier">boost</phrase><phrase role="special">::</phrase><phrase role="identifier">mt19937</phrase><phrase role="special">&,</phrase> <phrase role="identifier">boost</phrase><phrase role="special">::</phrase><phrase role="identifier">uniform_int</phrase><phrase role="special"><></phrase> <phrase role="special">></phrase> <phrase role="identifier">die</phrase><phrase role="special">(</phrase><phrase role="identifier">gen</phrase><phrase role="special">,</phrase> <phrase role="identifier">dist</phrase><phrase role="special">);</phrase>
<phrase role="special">}</phrase>
</programlisting>
</para>
<calloutlist>
<callout arearefs="callout_tests.c4" id="callout_tests.c5">
<important>
<para>
test
</para>
</important>
</callout>
</calloutlist>
<para>
Example 3 (again!):
</para>
<para>
<programlisting><phrase role="keyword">int</phrase> <phrase role="identifier">roll_die</phrase><phrase role="special">()</phrase> <phrase role="special">{</phrase>
<co id="callout_tests.c6" linkends="callout_tests.c7" /><phrase role="identifier">boost</phrase><phrase role="special">::</phrase><phrase role="identifier">variate_generator</phrase><phrase role="special"><</phrase><phrase role="identifier">boost</phrase><phrase role="special">::</phrase><phrase role="identifier">mt19937</phrase><phrase role="special">&,</phrase> <phrase role="identifier">boost</phrase><phrase role="special">::</phrase><phrase role="identifier">uniform_int</phrase><phrase role="special"><></phrase> <phrase role="special">></phrase> <phrase role="identifier">die</phrase><phrase role="special">(</phrase><phrase role="identifier">gen</phrase><phrase role="special">,</phrase> <phrase role="identifier">dist</phrase><phrase role="special">);</phrase>
<phrase role="special">}</phrase>
</programlisting>
</para>
<calloutlist>
<callout arearefs="callout_tests.c6" id="callout_tests.c7">
<important>
<para>
test
</para>
</important>
</callout>
</calloutlist>
<para>
Example 4:
</para>
<para>
<programlisting><phrase role="keyword">int</phrase> <phrase role="identifier">roll_die</phrase><phrase role="special">()</phrase> <phrase role="special">{</phrase>
<co id="callout_tests.c8" linkends="callout_tests.c9" /><phrase role="identifier">boost</phrase><phrase role="special">::</phrase><phrase role="identifier">variate_generator</phrase><phrase role="special"><</phrase><phrase role="identifier">boost</phrase><phrase role="special">::</phrase><phrase role="identifier">mt19937</phrase><phrase role="special">&,</phrase> <phrase role="identifier">boost</phrase><phrase role="special">::</phrase><phrase role="identifier">uniform_int</phrase><phrase role="special"><></phrase> <phrase role="special">></phrase> <phrase role="identifier">die</phrase><phrase role="special">(</phrase><phrase role="identifier">gen</phrase><phrase role="special">,</phrase> <phrase role="identifier">dist</phrase><phrase role="special">);</phrase>
<co id="callout_tests.c10" linkends="callout_tests.c11" /><phrase role="identifier">boost</phrase><phrase role="special">::</phrase><phrase role="identifier">uniform_int</phrase><phrase role="special"><></phrase> <phrase role="identifier">dist</phrase><phrase role="special">(</phrase><phrase role="number">1</phrase><phrase role="special">,</phrase> <phrase role="number">6</phrase><phrase role="special">);</phrase> <co id="callout_tests.c12" linkends="callout_tests.c13" />
<phrase role="special">}</phrase>
</programlisting>
</para>
<calloutlist>
<callout arearefs="callout_tests.c8" id="callout_tests.c9">
<para>
callout 1
</para>
</callout>
<callout arearefs="callout_tests.c10" id="callout_tests.c11">
<para>
callout 2
</para>
</callout>
<callout arearefs="callout_tests.c12" id="callout_tests.c13">
<para>
create a uniform_int distribution
</para>
</callout>
</calloutlist>
<para>
<programlisting><co id="callout_tests.c14" linkends="callout_tests.c15" /><phrase role="identifier">boost</phrase><phrase role="special">::</phrase><phrase role="identifier">uniform_int</phrase><phrase role="special"><></phrase> <phrase role="identifier">dist</phrase><phrase role="special">(</phrase><phrase role="number">1</phrase><phrase role="special">,</phrase> <phrase role="number">6</phrase><phrase role="special">);</phrase> <co id="callout_tests.c16" linkends="callout_tests.c17" />
</programlisting>
</para>
<calloutlist>
<callout arearefs="callout_tests.c14" id="callout_tests.c15">
<para>
callout 2
</para>
</callout>
<callout arearefs="callout_tests.c16" id="callout_tests.c17">
<para>
create a uniform_int distribution
</para>
</callout>
</calloutlist>
<section id="callout_tests.test_section">
<title><link linkend="callout_tests.test_section">Try callouts in a section</link></title>
<para>
Example 1:
</para>
<para>
Now we can define a function that simulates an ordinary six-sided die.
</para>
<para>
<programlisting><phrase role="keyword">int</phrase> <phrase role="identifier">roll_die</phrase><phrase role="special">()</phrase> <phrase role="special">{</phrase>
<phrase role="identifier">boost</phrase><phrase role="special">::</phrase><phrase role="identifier">uniform_int</phrase><phrase role="special"><></phrase> <phrase role="identifier">dist</phrase><phrase role="special">(</phrase><phrase role="number">1</phrase><phrase role="special">,</phrase> <phrase role="number">6</phrase><phrase role="special">);</phrase> <co id="callout_tests.test_section.c0" linkends="callout_tests.test_section.c1" />
<phrase role="special">}</phrase>
</programlisting>
</para>
<calloutlist>
<callout arearefs="callout_tests.test_section.c0" id="callout_tests.test_section.c1">
<para>
create a uniform_int distribution
</para>
</callout>
</calloutlist>
<para>
Example 2:
</para>
<para>
<programlisting><phrase role="keyword">int</phrase> <phrase role="identifier">roll_die</phrase><phrase role="special">()</phrase> <phrase role="special">{</phrase>
<co id="callout_tests.test_section.c2" linkends="callout_tests.test_section.c3" /><phrase role="identifier">boost</phrase><phrase role="special">::</phrase><phrase role="identifier">variate_generator</phrase><phrase role="special"><</phrase><phrase role="identifier">boost</phrase><phrase role="special">::</phrase><phrase role="identifier">mt19937</phrase><phrase role="special">&,</phrase> <phrase role="identifier">boost</phrase><phrase role="special">::</phrase><phrase role="identifier">uniform_int</phrase><phrase role="special"><></phrase> <phrase role="special">></phrase> <phrase role="identifier">die</phrase><phrase role="special">(</phrase><phrase role="identifier">gen</phrase><phrase role="special">,</phrase> <phrase role="identifier">dist</phrase><phrase role="special">);</phrase>
<phrase role="special">}</phrase>
</programlisting>
</para>
<calloutlist>
<callout arearefs="callout_tests.test_section.c2" id="callout_tests.test_section.c3">
<important>
<para>
test
</para>
</important>
</callout>
</calloutlist>
<para>
Example 3:
</para>
<para>
<programlisting><phrase role="keyword">int</phrase> <phrase role="identifier">roll_die</phrase><phrase role="special">()</phrase> <phrase role="special">{</phrase>
<co id="callout_tests.test_section.c4" linkends="callout_tests.test_section.c5" /><phrase role="identifier">boost</phrase><phrase role="special">::</phrase><phrase role="identifier">variate_generator</phrase><phrase role="special"><</phrase><phrase role="identifier">boost</phrase><phrase role="special">::</phrase><phrase role="identifier">mt19937</phrase><phrase role="special">&,</phrase> <phrase role="identifier">boost</phrase><phrase role="special">::</phrase><phrase role="identifier">uniform_int</phrase><phrase role="special"><></phrase> <phrase role="special">></phrase> <phrase role="identifier">die</phrase><phrase role="special">(</phrase><phrase role="identifier">gen</phrase><phrase role="special">,</phrase> <phrase role="identifier">dist</phrase><phrase role="special">);</phrase>
<phrase role="special">}</phrase>
</programlisting>
</para>
<calloutlist>
<callout arearefs="callout_tests.test_section.c4" id="callout_tests.test_section.c5">
<important>
<para>
test
</para>
</important>
</callout>
</calloutlist>
<para>
Example 3 (again!):
</para>
<para>
<programlisting><phrase role="keyword">int</phrase> <phrase role="identifier">roll_die</phrase><phrase role="special">()</phrase> <phrase role="special">{</phrase>
<co id="callout_tests.test_section.c6" linkends="callout_tests.test_section.c7" /><phrase role="identifier">boost</phrase><phrase role="special">::</phrase><phrase role="identifier">variate_generator</phrase><phrase role="special"><</phrase><phrase role="identifier">boost</phrase><phrase role="special">::</phrase><phrase role="identifier">mt19937</phrase><phrase role="special">&,</phrase> <phrase role="identifier">boost</phrase><phrase role="special">::</phrase><phrase role="identifier">uniform_int</phrase><phrase role="special"><></phrase> <phrase role="special">></phrase> <phrase role="identifier">die</phrase><phrase role="special">(</phrase><phrase role="identifier">gen</phrase><phrase role="special">,</phrase> <phrase role="identifier">dist</phrase><phrase role="special">);</phrase>
<phrase role="special">}</phrase>
</programlisting>
</para>
<calloutlist>
<callout arearefs="callout_tests.test_section.c6" id="callout_tests.test_section.c7">
<important>
<para>
test
</para>
</important>
</callout>
</calloutlist>
<para>
Example 4:
</para>
<para>
<programlisting><phrase role="keyword">int</phrase> <phrase role="identifier">roll_die</phrase><phrase role="special">()</phrase> <phrase role="special">{</phrase>
<co id="callout_tests.test_section.c8" linkends="callout_tests.test_section.c9" /><phrase role="identifier">boost</phrase><phrase role="special">::</phrase><phrase role="identifier">variate_generator</phrase><phrase role="special"><</phrase><phrase role="identifier">boost</phrase><phrase role="special">::</phrase><phrase role="identifier">mt19937</phrase><phrase role="special">&,</phrase> <phrase role="identifier">boost</phrase><phrase role="special">::</phrase><phrase role="identifier">uniform_int</phrase><phrase role="special"><></phrase> <phrase role="special">></phrase> <phrase role="identifier">die</phrase><phrase role="special">(</phrase><phrase role="identifier">gen</phrase><phrase role="special">,</phrase> <phrase role="identifier">dist</phrase><phrase role="special">);</phrase>
<co id="callout_tests.test_section.c10" linkends="callout_tests.test_section.c11" /><phrase role="identifier">boost</phrase><phrase role="special">::</phrase><phrase role="identifier">uniform_int</phrase><phrase role="special"><></phrase> <phrase role="identifier">dist</phrase><phrase role="special">(</phrase><phrase role="number">1</phrase><phrase role="special">,</phrase> <phrase role="number">6</phrase><phrase role="special">);</phrase> <co id="callout_tests.test_section.c12" linkends="callout_tests.test_section.c13" />
<phrase role="special">}</phrase>
</programlisting>
</para>
<calloutlist>
<callout arearefs="callout_tests.test_section.c8" id="callout_tests.test_section.c9">
<para>
callout 1
</para>
</callout>
<callout arearefs="callout_tests.test_section.c10" id="callout_tests.test_section.c11">
<para>
callout 2
</para>
</callout>
<callout arearefs="callout_tests.test_section.c12" id="callout_tests.test_section.c13">
<para>
create a uniform_int distribution
</para>
</callout>
</calloutlist>
<para>
<programlisting><co id="callout_tests.test_section.c14" linkends="callout_tests.test_section.c15" /><phrase role="identifier">boost</phrase><phrase role="special">::</phrase><phrase role="identifier">uniform_int</phrase><phrase role="special"><></phrase> <phrase role="identifier">dist</phrase><phrase role="special">(</phrase><phrase role="number">1</phrase><phrase role="special">,</phrase> <phrase role="number">6</phrase><phrase role="special">);</phrase> <co id="callout_tests.test_section.c16" linkends="callout_tests.test_section.c17" />
</programlisting>
</para>
<calloutlist>
<callout arearefs="callout_tests.test_section.c14" id="callout_tests.test_section.c15">
<para>
callout 2
</para>
</callout>
<callout arearefs="callout_tests.test_section.c16" id="callout_tests.test_section.c17">
<para>
create a uniform_int distribution
</para>
</callout>
</calloutlist>
</section>
</article>
| {
"pile_set_name": "Github"
} |
<!--
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
-->
# Release Notes
### 1.0.0 (Mar 25, 2015)
* CB-8739 added missing license headers
* Add @Override to CustomConfigXmlParser methods
* Change ID to cordova-plugin-whitelist rather than reverse-DNS-style
* Tweak CSP examples in README
* CB-8660 remove extra commas from package.json
| {
"pile_set_name": "Github"
} |
# -*- coding: utf-8 -*-
#
# SPDX-FileCopyrightText: © 2017 The glucometerutils Authors
# SPDX-License-Identifier: MIT
"""Driver for FreeStyle Libre devices.
Supported features:
- get readings (sensor, flash and blood glucose), including comments;
- get and set date and time;
- get serial number and software version;
- get and set patient name;
- memory reset (caution!)
Expected device path: /dev/hidraw9 or similar HID device. Optional when using
HIDAPI.
Further information on the device protocol can be found at
https://protocols.glucometers.tech/abbott/freestyle-libre
"""
import datetime
from typing import Dict, Generator, Mapping, Optional, Sequence, Tuple, Type
from glucometerutils import common
from glucometerutils.support import freestyle
# Fields of the records returned by both $history and $arresult?
# Tuple of pairs of idx and field name
_BASE_ENTRY_MAP = (
(0, "device_id"),
(1, "type"),
(2, "month"),
(3, "day"),
(4, "year"), # 2-digits
(5, "hour"),
(6, "minute"),
(7, "second"),
)
# Fields of the records returned by $history?
_HISTORY_ENTRY_MAP = _BASE_ENTRY_MAP + (
(13, "value"),
(15, "errors"),
)
# Fields of the results returned by $arresult? where type = 2
_ARRESULT_TYPE2_ENTRY_MAP = (
(9, "reading-type"), # 0 = glucose blood strip,
# 1 = ketone blood strip,
# 2 = glucose sensor
(12, "value"),
(15, "sport-flag"),
(16, "medication-flag"),
(17, "rapid-acting-flag"), # see _ARRESULT_RAPID_INSULIN_ENTRY_MAP
(18, "long-acting-flag"),
(19, "custom-comments-bitfield"),
(23, "double-long-acting-insulin"),
(25, "food-flag"),
(26, "food-carbs-grams"),
(28, "errors"),
)
_ARRESULT_TIME_ADJUSTMENT_ENTRY_MAP = (
(9, "old_month"),
(10, "old_day"),
(11, "old_year"),
(12, "old_hour"),
(13, "old_minute"),
(14, "old_second"),
)
# Fields only valid when rapid-acting-flag is "1"
_ARRESULT_RAPID_INSULIN_ENTRY_MAP = ((43, "double-rapid-acting-insulin"),)
def _parse_record(
record: Sequence[str], entry_map: Sequence[Tuple[int, str]]
) -> Dict[str, int]:
"""Parses a list of string fields into a dictionary of integers."""
if not record:
return {}
try:
return {key: int(record[idx]) for idx, key in entry_map}
except IndexError:
return {}
def _extract_timestamp(
parsed_record: Mapping[str, int], prefix: str = ""
) -> datetime.datetime:
"""Extract the timestamp from a parsed record.
This leverages the fact that all the records have the same base structure.
"""
return datetime.datetime(
parsed_record[prefix + "year"] + 2000,
parsed_record[prefix + "month"],
parsed_record[prefix + "day"],
parsed_record[prefix + "hour"],
parsed_record[prefix + "minute"],
parsed_record[prefix + "second"],
)
def _parse_arresult(record: Sequence[str]) -> Optional[common.AnyReading]:
"""Takes an array of string fields as input and parses it into a Reading."""
parsed_record = _parse_record(record, _BASE_ENTRY_MAP)
# There are other record types, but we don't currently need to expose these.
if not parsed_record:
return None
elif parsed_record["type"] == 2:
parsed_record.update(_parse_record(record, _ARRESULT_TYPE2_ENTRY_MAP))
elif parsed_record["type"] == 5:
parsed_record.update(_parse_record(record, _ARRESULT_TIME_ADJUSTMENT_ENTRY_MAP))
return common.TimeAdjustment(
_extract_timestamp(parsed_record),
_extract_timestamp(parsed_record, "old_"),
extra_data={"device_id": parsed_record["device_id"]},
)
else:
return None
# Check right away if we have rapid insulin
if parsed_record["rapid-acting-flag"]:
parsed_record.update(_parse_record(record, _ARRESULT_RAPID_INSULIN_ENTRY_MAP))
if parsed_record["errors"]:
return None
comment_parts = []
measure_method: Optional[common.MeasurementMethod] = None
cls: Optional[Type[common.AnyReading]] = None
value: Optional[float] = None
if parsed_record["reading-type"] == 2:
comment_parts.append("(Scan)")
measure_method = common.MeasurementMethod.CGM
cls = common.GlucoseReading
value = parsed_record["value"]
elif parsed_record["reading-type"] == 0:
comment_parts.append("(Blood)")
measure_method = common.MeasurementMethod.BLOOD_SAMPLE
cls = common.GlucoseReading
value = parsed_record["value"]
elif parsed_record["reading-type"] == 1:
comment_parts.append("(Ketone)")
measure_method = common.MeasurementMethod.BLOOD_SAMPLE
cls = common.KetoneReading
# automatically convert the raw value in mmol/L
raw_value = parsed_record["value"]
if raw_value is None:
raise ValueError(f"Invalid Ketone value: {parsed_record!r}")
value = freestyle.convert_ketone_unit(raw_value)
else:
# unknown reading
return None
custom_comments = record[29:35]
for comment_index in range(6):
if parsed_record["custom-comments-bitfield"] & (1 << comment_index):
comment_parts.append(custom_comments[comment_index][1:-1])
if parsed_record["sport-flag"]:
comment_parts.append("Sport")
if parsed_record["medication-flag"]:
comment_parts.append("Medication")
if parsed_record["food-flag"]:
grams = parsed_record["food-carbs-grams"]
if grams:
comment_parts.append(f"Food ({grams} g)")
else:
comment_parts.append("Food")
if parsed_record["long-acting-flag"]:
insulin = parsed_record["double-long-acting-insulin"] / 2
if insulin:
comment_parts.append(f"Long-acting insulin ({insulin:.1f})")
else:
comment_parts.append("Long-acting insulin")
if parsed_record["rapid-acting-flag"]:
# This record does not always exist, so calculate it only when present.
if "double-rapid-acting-insulin" in parsed_record:
rapid_insulin = parsed_record["double-rapid-acting-insulin"] / 2
comment_parts.append(f"Rapid-acting insulin ({rapid_insulin:.1f})")
else:
comment_parts.append("Rapid-acting insulin")
reading = cls(
_extract_timestamp(parsed_record),
value,
comment="; ".join(comment_parts),
measure_method=measure_method,
extra_data={"device_id": parsed_record["device_id"]},
)
return reading
class Device(freestyle.FreeStyleHidDevice):
"""Glucometer driver for FreeStyle Libre devices."""
def __init__(self, device_path: Optional[str]) -> None:
super().__init__(0x3650, device_path)
def get_meter_info(self) -> common.MeterInfo:
"""Return the device information in structured form."""
return common.MeterInfo(
"FreeStyle Libre",
serial_number=self.get_serial_number(),
version_info=("Software version: " + self._get_version(),),
native_unit=self.get_glucose_unit(),
patient_name=self.get_patient_name(),
)
def get_serial_number(self) -> str:
"""Overridden function as the command is not compatible."""
return self._session.send_text_command(b"$sn?").rstrip("\r\n")
def get_glucose_unit(self) -> common.Unit: # pylint: disable=no-self-use
"""Returns the glucose unit of the device."""
# TODO(Flameeyes): figure out how to identify the actual unit on the
# device.
return common.Unit.MG_DL
def get_readings(self) -> Generator[common.AnyReading, None, None]:
# First of all get the usually longer list of sensor readings, and
# convert them to Readings objects.
for record in self._session.query_multirecord(b"$history?"):
parsed_record = _parse_record(record, _HISTORY_ENTRY_MAP)
if not parsed_record or parsed_record["errors"] != 0:
# The reading is considered invalid, so ignore it.
continue
yield common.GlucoseReading(
_extract_timestamp(parsed_record),
parsed_record["value"],
comment="(Sensor)",
measure_method=common.MeasurementMethod.CGM,
extra_data={"device_id": parsed_record["device_id"]},
)
# Then get the results of explicit scans and blood tests (and other
# events).
for record in self._session.query_multirecord(b"$arresult?"):
reading = _parse_arresult(record)
if reading:
yield reading
def zero_log(self) -> None:
self._session.send_text_command(b"$resetpatient")
| {
"pile_set_name": "Github"
} |
TIMESTAMP = 1591186952
SHA256 (rubygem/azure_mgmt_devtestlabs-0.18.1.gem) = 15e16bda310cc363dc4988d5c748f999e661a1f4f704dbf27b1d5d8d1013616f
SIZE (rubygem/azure_mgmt_devtestlabs-0.18.1.gem) = 154624
| {
"pile_set_name": "Github"
} |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkbaas.endpoint import endpoint_data
class DescribeOrganizationMembersRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Baas', '2018-07-31', 'DescribeOrganizationMembers')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_OrganizationId(self):
return self.get_body_params().get('OrganizationId')
def set_OrganizationId(self,OrganizationId):
self.add_body_params('OrganizationId', OrganizationId)
def get_Location(self):
return self.get_body_params().get('Location')
def set_Location(self,Location):
self.add_body_params('Location', Location) | {
"pile_set_name": "Github"
} |
/*
* This file is part of Flowplayer, http://flowplayer.org
*
* By: Anssi Piirainen, <[email protected]>
* Copyright (c) 2008-2011 Flowplayer Oy
* H.264 support by: Arjen Wagenaar, <[email protected]>
* Copyright (c) 2009 CodeShop B.V.
*
* Released under the MIT License:
* http://www.opensource.org/licenses/mit-license.php
*/
package org.flowplayer.pseudostreaming {
import org.flowplayer.model.Clip;
public class H264SeekDataStore extends DefaultSeekDataStore {
override protected function extractKeyFrameTimes(metaData:Object):Array {
var times:Array = new Array();
for (var j:Number = 0; j != metaData.seekpoints.length; ++j) {
times[j] = Number(metaData.seekpoints[j]['time']);
// log.debug("keyFrame[" + j + "] = " + _keyFrameTimes[j]);
}
return times;
}
override protected function queryParamValue(pos:Number):Number {
return _keyFrameTimes[pos] + 0.01;
}
}
} | {
"pile_set_name": "Github"
} |
<Project Sdk="Microsoft.NET.Sdk.Web">
<PropertyGroup>
<TargetFramework>netcoreapp2.1</TargetFramework>
</PropertyGroup>
<ItemGroup>
<PackageReference Include="Microsoft.AspNetCore.App" />
</ItemGroup>
</Project>
| {
"pile_set_name": "Github"
} |
// +build !windows
/*
* Copyright (C) 2020 The "MysteriumNetwork/node" Authors.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package logconfig
func defaultLogPath() (string, error) {
return "/var/log/myst_supervisor", nil
}
| {
"pile_set_name": "Github"
} |
.class interface abstract Landroid/support/v4/app/NotificationCompat$NotificationCompatImpl;
.super Ljava/lang/Object;
# virtual methods
.method public abstract build(Landroid/support/v4/app/NotificationCompat$Builder;Landroid/support/v4/app/NotificationCompat$BuilderExtender;)Landroid/app/Notification;
.end method
.method public abstract getAction(Landroid/app/Notification;I)Landroid/support/v4/app/NotificationCompat$Action;
.end method
.method public abstract getActionCount(Landroid/app/Notification;)I
.end method
.method public abstract getActionsFromParcelableArrayList(Ljava/util/ArrayList;)[Landroid/support/v4/app/NotificationCompat$Action;
.end method
.method public abstract getBundleForUnreadConversation(Landroid/support/v4/app/NotificationCompatBase$UnreadConversation;)Landroid/os/Bundle;
.end method
.method public abstract getCategory(Landroid/app/Notification;)Ljava/lang/String;
.end method
.method public abstract getExtras(Landroid/app/Notification;)Landroid/os/Bundle;
.end method
.method public abstract getGroup(Landroid/app/Notification;)Ljava/lang/String;
.end method
.method public abstract getLocalOnly(Landroid/app/Notification;)Z
.end method
.method public abstract getParcelableArrayListForActions([Landroid/support/v4/app/NotificationCompat$Action;)Ljava/util/ArrayList;
.end method
.method public abstract getSortKey(Landroid/app/Notification;)Ljava/lang/String;
.end method
.method public abstract getUnreadConversationFromBundle(Landroid/os/Bundle;Landroid/support/v4/app/NotificationCompatBase$UnreadConversation$Factory;Landroid/support/v4/app/RemoteInputCompatBase$RemoteInput$Factory;)Landroid/support/v4/app/NotificationCompatBase$UnreadConversation;
.end method
.method public abstract isGroupSummary(Landroid/app/Notification;)Z
.end method
| {
"pile_set_name": "Github"
} |
/*!
* AngularJS Material Design
* https://github.com/angular/material
* @license MIT
* v1.2.1-master-19c6c75
*/
md-progress-linear.md-THEME_NAME-theme .md-container {
background-color: '{{primary-100}}'; }
md-progress-linear.md-THEME_NAME-theme .md-bar {
background-color: '{{primary-color}}'; }
md-progress-linear.md-THEME_NAME-theme.md-warn .md-container {
background-color: '{{warn-100}}'; }
md-progress-linear.md-THEME_NAME-theme.md-warn .md-bar {
background-color: '{{warn-color}}'; }
md-progress-linear.md-THEME_NAME-theme.md-accent .md-container {
background-color: '{{accent-100}}'; }
md-progress-linear.md-THEME_NAME-theme.md-accent .md-bar {
background-color: '{{accent-color}}'; }
md-progress-linear.md-THEME_NAME-theme[md-mode=buffer].md-primary .md-bar1 {
background-color: '{{primary-100}}'; }
md-progress-linear.md-THEME_NAME-theme[md-mode=buffer].md-primary .md-dashed:before {
background: radial-gradient("{{primary-100}}" 0%, "{{primary-100}}" 16%, transparent 42%); }
md-progress-linear.md-THEME_NAME-theme[md-mode=buffer].md-warn .md-bar1 {
background-color: '{{warn-100}}'; }
md-progress-linear.md-THEME_NAME-theme[md-mode=buffer].md-warn .md-dashed:before {
background: radial-gradient("{{warn-100}}" 0%, "{{warn-100}}" 16%, transparent 42%); }
md-progress-linear.md-THEME_NAME-theme[md-mode=buffer].md-accent .md-bar1 {
background-color: '{{accent-100}}'; }
md-progress-linear.md-THEME_NAME-theme[md-mode=buffer].md-accent .md-dashed:before {
background: radial-gradient("{{accent-100}}" 0%, "{{accent-100}}" 16%, transparent 42%); }
| {
"pile_set_name": "Github"
} |
/*
Application : Ring Lessons
Author : Mahmoud Fayed
Date : 2017.12.15
*/
? iswindows()
? islinux()
? ismacosx()
? ismsdos()
? iswindows64()
? isandroid()
cDir = currentdir()
? cDir
chdir("b:\ring")
? currentdir()
chdir(cDir)
? currentdir()
| {
"pile_set_name": "Github"
} |
/***************************************************************
【文件名】 setipwindow.h
【功能模块和目的】 设置服务端ip地址的窗口
【开发者及日期】 cnDengyu, 2020/04/29
【更改记录】 (若修改过则必需注明)
****************************************************************/
#ifndef SETIPWINDOW_H
#define SETIPWINDOW_H
#include <QMainWindow>
namespace Ui
{
class SetIPWindow;
}
class QtGUIAdaptor;
/***************************************************************
【类名】 SetIPWindow
【功能】 学生端设置服务端ip地址的窗口
【接口说明】 无接口
【开发者及日期】 cnDengyu, 2020/04/29
【更改记录】 (若修改过则必需注明)
****************************************************************/
class SetIPWindow: public QMainWindow {
Q_OBJECT
public:
explicit SetIPWindow(QtGUIAdaptor*, QWidget* parent = nullptr);
~SetIPWindow();
private slots:
void on_loginButton_clicked();
private:
Ui::SetIPWindow* ui;
QtGUIAdaptor* guiAdaptor;
};
#endif // SETIPWINDOW_H
| {
"pile_set_name": "Github"
} |
/*******************************************************************************
* Copyright (c) 2000, 2014 QNX Software Systems and others.
*
* This program and the accompanying materials
* are made available under the terms of the Eclipse Public License 2.0
* which accompanies this distribution, and is available at
* https://www.eclipse.org/legal/epl-2.0/
*
* SPDX-License-Identifier: EPL-2.0
*
* Contributors:
* QNX Software Systems - Initial API and implementation
* Sergey Prigogin (Google)
* Anton Leherbauer (Wind River Systems)
*******************************************************************************/
package org.eclipse.cdt.internal.ui.preferences.formatter;
import java.util.HashMap;
import java.util.Map;
import java.util.Observable;
import org.eclipse.cdt.core.CCorePlugin;
import org.eclipse.cdt.core.CCorePreferenceConstants;
import org.eclipse.cdt.internal.ui.ICHelpContextIds;
import org.eclipse.cdt.internal.ui.preferences.PreferencesAccess;
import org.eclipse.cdt.ui.CUIPlugin;
import org.eclipse.cdt.utils.ui.controls.ControlFactory;
import org.eclipse.core.resources.IProject;
import org.eclipse.core.runtime.IConfigurationElement;
import org.eclipse.core.runtime.IExtension;
import org.eclipse.core.runtime.IExtensionPoint;
import org.eclipse.core.runtime.Platform;
import org.eclipse.core.runtime.preferences.IEclipsePreferences;
import org.eclipse.core.runtime.preferences.IScopeContext;
import org.eclipse.jface.resource.JFaceResources;
import org.eclipse.swt.SWT;
import org.eclipse.swt.events.SelectionAdapter;
import org.eclipse.swt.events.SelectionEvent;
import org.eclipse.swt.layout.GridData;
import org.eclipse.swt.widgets.Combo;
import org.eclipse.swt.widgets.Composite;
import org.eclipse.swt.widgets.Control;
import org.eclipse.ui.PlatformUI;
/**
* Allows to choose the formatter in a combo box.
* If no formatter is contributed, nothing is shown.
*/
public class CustomCodeFormatterBlock extends Observable {
private final Map<String, String> idMap = new HashMap<>();
private IEclipsePreferences fPrefs;
private String fDefaultFormatterId;
private Combo fFormatterCombo;
private static final String ATTR_NAME = "name"; //$NON-NLS-1$
private static final String ATTR_ID = "id"; //$NON-NLS-1$
private static final String DEFAULT = FormatterMessages.CustomCodeFormatterBlock_default_formatter;
public CustomCodeFormatterBlock(IProject project, PreferencesAccess access) {
final IScopeContext scope;
final IEclipsePreferences defaults;
if (project != null) {
scope = access.getProjectScope(project);
defaults = access.getInstanceScope().getNode(CCorePlugin.PLUGIN_ID);
} else {
scope = access.getInstanceScope();
defaults = access.getDefaultScope().getNode(CCorePlugin.PLUGIN_ID);
}
fPrefs = scope.getNode(CCorePlugin.PLUGIN_ID);
fDefaultFormatterId = defaults.get(CCorePreferenceConstants.CODE_FORMATTER, null);
if (fDefaultFormatterId == null) {
// Backward compatibility: use UI prefs
IEclipsePreferences instance = access.getInstanceScope().getNode(CUIPlugin.PLUGIN_ID);
fDefaultFormatterId = instance.get(CCorePreferenceConstants.CODE_FORMATTER, null);
if (fDefaultFormatterId != null) {
instance.remove(CCorePreferenceConstants.CODE_FORMATTER);
if (project != null) {
defaults.put(CCorePreferenceConstants.CODE_FORMATTER, fDefaultFormatterId);
}
}
}
initializeFormatters();
}
public void performOk() {
if (fFormatterCombo == null) {
return;
}
String text = fFormatterCombo.getText();
String formatterId = idMap.get(text);
if (formatterId != null && !formatterId.equals(fDefaultFormatterId)) {
fPrefs.put(CCorePreferenceConstants.CODE_FORMATTER, formatterId);
} else {
// Simply reset to the default one.
performDefaults();
}
}
public void performDefaults() {
fPrefs.remove(CCorePreferenceConstants.CODE_FORMATTER);
if (fFormatterCombo == null) {
return;
}
fFormatterCombo.clearSelection();
String formatter = getFormatterById(fDefaultFormatterId);
fFormatterCombo.setText(formatter);
handleFormatterChanged();
}
public void enableProjectSpecificSettings(boolean useProjectSpecificSettings) {
if (useProjectSpecificSettings) {
if (fDefaultFormatterId != null)
fPrefs.put(CCorePreferenceConstants.CODE_FORMATTER, fDefaultFormatterId);
} else {
initDefault();
}
}
/**
* Returns the currently selected formatter id.
*
* @return the selected formatter id or {@code null} if the default is selected.
*/
public String getFormatterId() {
if (fFormatterCombo == null) {
return fPrefs.get(CCorePreferenceConstants.CODE_FORMATTER, fDefaultFormatterId);
}
return idMap.get(fFormatterCombo.getText());
}
public Control createContents(Composite parent) {
if (idMap.size() == 1) {
return parent; // No selector is needed since there is only one formatter.
}
Composite composite = ControlFactory.createGroup(parent,
FormatterMessages.CustomCodeFormatterBlock_formatter_name, 1);
((GridData) composite.getLayoutData()).horizontalSpan = 5;
PlatformUI.getWorkbench().getHelpSystem().setHelp(composite, ICHelpContextIds.CODEFORMATTER_PREFERENCE_PAGE);
fFormatterCombo = new Combo(composite, SWT.DROP_DOWN | SWT.READ_ONLY);
fFormatterCombo.setFont(parent.getFont());
fFormatterCombo.setLayoutData(new GridData(GridData.FILL_HORIZONTAL));
fFormatterCombo.addSelectionListener(new SelectionAdapter() {
@Override
public void widgetSelected(SelectionEvent e) {
handleFormatterChanged();
}
});
for (String item : idMap.keySet()) {
fFormatterCombo.add(item);
}
final String noteTitle = FormatterMessages.CustomCodeFormatterBlock_formatter_note;
final String noteMessage = FormatterMessages.CustomCodeFormatterBlock_contributed_formatter_warning;
ControlFactory.createNoteComposite(JFaceResources.getDialogFont(), composite, noteTitle, noteMessage);
initDefault();
return composite;
}
private void handleFormatterChanged() {
setChanged();
String formatterId = getFormatterId();
notifyObservers(formatterId);
}
private void initDefault() {
if (fFormatterCombo == null) {
return;
}
String formatterID = fPrefs.get(CCorePreferenceConstants.CODE_FORMATTER, fDefaultFormatterId);
fFormatterCombo.setText(getFormatterById(formatterID));
}
private String getFormatterById(String formatterId) {
String formatter = DEFAULT;
if (formatterId != null) {
for (Map.Entry<String, String> entry : idMap.entrySet()) {
String val = entry.getValue();
if (formatterId.equals(val)) {
formatter = entry.getKey();
break;
}
}
}
return formatter;
}
private void initializeFormatters() {
idMap.clear();
idMap.put(DEFAULT, CCorePreferenceConstants.DEFAULT_CODE_FORMATTER);
IExtensionPoint point = Platform.getExtensionRegistry().getExtensionPoint(CCorePlugin.PLUGIN_ID,
CCorePlugin.FORMATTER_EXTPOINT_ID);
if (point != null) {
IExtension[] exts = point.getExtensions();
for (IExtension ext : exts) {
IConfigurationElement[] elements = ext.getConfigurationElements();
for (int j = 0; j < elements.length; ++j) {
String name = elements[j].getAttribute(ATTR_NAME);
String id = elements[j].getAttribute(ATTR_ID);
idMap.put(name, id);
}
}
}
}
}
| {
"pile_set_name": "Github"
} |
/*************************************************************************//**
*****************************************************************************
* @file test.c
* @brief TestA, TestB and TestC are procs for test
* @author Forrest Y. Yu
* @date 2011
*****************************************************************************
*****************************************************************************/
#include "type.h"
#include "config.h"
#include "stdio.h"
#include "const.h"
#include "protect.h"
#include "string.h"
#include "fs.h"
#include "proc.h"
#include "tty.h"
#include "console.h"
#include "global.h"
#include "proto.h"
/**
* @def STACK_OVERFLOW_BUF_SIZE
*/
#define STACK_OVERFLOW_BUF_SIZE (0x4000 - 1024 - 48)
/*****************************************************************************
* TestA
*****************************************************************************/
/**
* The first routine for test.
* @test just for test
*****************************************************************************/
void TestA()
{
char badboy[STACK_OVERFLOW_BUF_SIZE];
memset(badboy, 0xAB, STACK_OVERFLOW_BUF_SIZE);
while(1){}
}
/*****************************************************************************
* TestB
*****************************************************************************/
/**
* The second routine for test.
*
* @test just for test
*****************************************************************************/
void TestB()
{
while(1){}
assert(0); /* never arrive here */
spin("TestB()");
}
/*****************************************************************************
* TestC
*****************************************************************************/
/**
* The third routine for test.
*
* @test just for test
*****************************************************************************/
void TestC()
{
while(1){}
assert(0); /* never arrive here */
milli_delay(2000);
printm(" { TestC() running ... } ");
char buf[SECTOR_SIZE];
memset(buf, 0xBB, SECTOR_SIZE);
int fp = open("/blah", O_CREAT | O_RDWR);
printm("fp:%d\n", fp);
write(fp, "long long ago, ", 15);
write(fp, "there was a king who loved horses", 33);
char bufr[SECTOR_SIZE];
memset(bufr, 0, SECTOR_SIZE);
lseek(fp, 0, SEEK_SET);
read(fp, bufr, 10);
printm(bufr);
close(fp);
int fd_stdin = open("/dev_tty2", O_RDWR);
assert(fd_stdin == 0);
int fd_stdout = open("/dev_tty2", O_RDWR);
assert(fd_stdout == 1);
char rdbuf[128];
do {
int r = read(0, rdbuf, 3);
write(1, "<", 1);
write(1, rdbuf, r);
write(1, ">", 1);
} while (rdbuf[0] != ' ');
close(1);
close(0);
spin("TestC()");
}
| {
"pile_set_name": "Github"
} |
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "uv.h"
#include "task.h"
#include <stdio.h>
#include <stdlib.h>
#define WRITES 3
#define CHUNKS_PER_WRITE 4096
#define CHUNK_SIZE 10024 /* 10 kb */
#define TOTAL_BYTES (WRITES * CHUNKS_PER_WRITE * CHUNK_SIZE)
static char* send_buffer;
static int shutdown_cb_called = 0;
static int connect_cb_called = 0;
static int write_cb_called = 0;
static int close_cb_called = 0;
static size_t bytes_sent = 0;
static size_t bytes_sent_done = 0;
static size_t bytes_received_done = 0;
static uv_connect_t connect_req;
static uv_shutdown_t shutdown_req;
static uv_write_t write_reqs[WRITES];
static void alloc_cb(uv_handle_t* handle, size_t size, uv_buf_t* buf) {
buf->base = malloc(size);
buf->len = size;
}
static void close_cb(uv_handle_t* handle) {
ASSERT(handle != NULL);
close_cb_called++;
}
static void shutdown_cb(uv_shutdown_t* req, int status) {
uv_tcp_t* tcp;
ASSERT(req == &shutdown_req);
ASSERT(status == 0);
tcp = (uv_tcp_t*)(req->handle);
/* The write buffer should be empty by now. */
ASSERT(tcp->write_queue_size == 0);
/* Now we wait for the EOF */
shutdown_cb_called++;
/* We should have had all the writes called already. */
ASSERT(write_cb_called == WRITES);
}
static void read_cb(uv_stream_t* tcp, ssize_t nread, const uv_buf_t* buf) {
ASSERT(tcp != NULL);
if (nread >= 0) {
bytes_received_done += nread;
}
else {
ASSERT(nread == UV_EOF);
printf("GOT EOF\n");
uv_close((uv_handle_t*)tcp, close_cb);
}
free(buf->base);
}
static void write_cb(uv_write_t* req, int status) {
ASSERT(req != NULL);
if (status) {
fprintf(stderr, "uv_write error: %s\n", uv_strerror(status));
ASSERT(0);
}
bytes_sent_done += CHUNKS_PER_WRITE * CHUNK_SIZE;
write_cb_called++;
}
static void connect_cb(uv_connect_t* req, int status) {
uv_buf_t send_bufs[CHUNKS_PER_WRITE];
uv_stream_t* stream;
int i, j, r;
ASSERT(req == &connect_req);
ASSERT(status == 0);
stream = req->handle;
connect_cb_called++;
/* Write a lot of data */
for (i = 0; i < WRITES; i++) {
uv_write_t* write_req = write_reqs + i;
for (j = 0; j < CHUNKS_PER_WRITE; j++) {
send_bufs[j] = uv_buf_init(send_buffer + bytes_sent, CHUNK_SIZE);
bytes_sent += CHUNK_SIZE;
}
r = uv_write(write_req, stream, send_bufs, CHUNKS_PER_WRITE, write_cb);
ASSERT(r == 0);
}
/* Shutdown on drain. */
r = uv_shutdown(&shutdown_req, stream, shutdown_cb);
ASSERT(r == 0);
/* Start reading */
r = uv_read_start(stream, alloc_cb, read_cb);
ASSERT(r == 0);
}
TEST_IMPL(tcp_writealot) {
struct sockaddr_in addr;
uv_tcp_t client;
int r;
ASSERT(0 == uv_ip4_addr("127.0.0.1", TEST_PORT, &addr));
send_buffer = calloc(1, TOTAL_BYTES);
ASSERT(send_buffer != NULL);
r = uv_tcp_init(uv_default_loop(), &client);
ASSERT(r == 0);
r = uv_tcp_connect(&connect_req,
&client,
(const struct sockaddr*) &addr,
connect_cb);
ASSERT(r == 0);
uv_run(uv_default_loop(), UV_RUN_DEFAULT);
ASSERT(shutdown_cb_called == 1);
ASSERT(connect_cb_called == 1);
ASSERT(write_cb_called == WRITES);
ASSERT(close_cb_called == 1);
ASSERT(bytes_sent == TOTAL_BYTES);
ASSERT(bytes_sent_done == TOTAL_BYTES);
ASSERT(bytes_received_done == TOTAL_BYTES);
free(send_buffer);
MAKE_VALGRIND_HAPPY();
return 0;
}
| {
"pile_set_name": "Github"
} |
GNU GENERAL PUBLIC LICENSE
Version 2, June 1991
Copyright (C) 1989, 1991 Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
Preamble
The licenses for most software are designed to take away your
freedom to share and change it. By contrast, the GNU General Public
License is intended to guarantee your freedom to share and change free
software--to make sure the software is free for all its users. This
General Public License applies to most of the Free Software
Foundation's software and to any other program whose authors commit to
using it. (Some other Free Software Foundation software is covered by
the GNU Lesser General Public License instead.) You can apply it to
your programs, too.
When we speak of free software, we are referring to freedom, not
price. Our General Public Licenses are designed to make sure that you
have the freedom to distribute copies of free software (and charge for
this service if you wish), that you receive source code or can get it
if you want it, that you can change the software or use pieces of it
in new free programs; and that you know you can do these things.
To protect your rights, we need to make restrictions that forbid
anyone to deny you these rights or to ask you to surrender the rights.
These restrictions translate to certain responsibilities for you if you
distribute copies of the software, or if you modify it.
For example, if you distribute copies of such a program, whether
gratis or for a fee, you must give the recipients all the rights that
you have. You must make sure that they, too, receive or can get the
source code. And you must show them these terms so they know their
rights.
We protect your rights with two steps: (1) copyright the software, and
(2) offer you this license which gives you legal permission to copy,
distribute and/or modify the software.
Also, for each author's protection and ours, we want to make certain
that everyone understands that there is no warranty for this free
software. If the software is modified by someone else and passed on, we
want its recipients to know that what they have is not the original, so
that any problems introduced by others will not reflect on the original
authors' reputations.
Finally, any free program is threatened constantly by software
patents. We wish to avoid the danger that redistributors of a free
program will individually obtain patent licenses, in effect making the
program proprietary. To prevent this, we have made it clear that any
patent must be licensed for everyone's free use or not licensed at all.
The precise terms and conditions for copying, distribution and
modification follow.
GNU GENERAL PUBLIC LICENSE
TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
0. This License applies to any program or other work which contains
a notice placed by the copyright holder saying it may be distributed
under the terms of this General Public License. The "Program", below,
refers to any such program or work, and a "work based on the Program"
means either the Program or any derivative work under copyright law:
that is to say, a work containing the Program or a portion of it,
either verbatim or with modifications and/or translated into another
language. (Hereinafter, translation is included without limitation in
the term "modification".) Each licensee is addressed as "you".
Activities other than copying, distribution and modification are not
covered by this License; they are outside its scope. The act of
running the Program is not restricted, and the output from the Program
is covered only if its contents constitute a work based on the
Program (independent of having been made by running the Program).
Whether that is true depends on what the Program does.
1. You may copy and distribute verbatim copies of the Program's
source code as you receive it, in any medium, provided that you
conspicuously and appropriately publish on each copy an appropriate
copyright notice and disclaimer of warranty; keep intact all the
notices that refer to this License and to the absence of any warranty;
and give any other recipients of the Program a copy of this License
along with the Program.
You may charge a fee for the physical act of transferring a copy, and
you may at your option offer warranty protection in exchange for a fee.
2. You may modify your copy or copies of the Program or any portion
of it, thus forming a work based on the Program, and copy and
distribute such modifications or work under the terms of Section 1
above, provided that you also meet all of these conditions:
a) You must cause the modified files to carry prominent notices
stating that you changed the files and the date of any change.
b) You must cause any work that you distribute or publish, that in
whole or in part contains or is derived from the Program or any
part thereof, to be licensed as a whole at no charge to all third
parties under the terms of this License.
c) If the modified program normally reads commands interactively
when run, you must cause it, when started running for such
interactive use in the most ordinary way, to print or display an
announcement including an appropriate copyright notice and a
notice that there is no warranty (or else, saying that you provide
a warranty) and that users may redistribute the program under
these conditions, and telling the user how to view a copy of this
License. (Exception: if the Program itself is interactive but
does not normally print such an announcement, your work based on
the Program is not required to print an announcement.)
These requirements apply to the modified work as a whole. If
identifiable sections of that work are not derived from the Program,
and can be reasonably considered independent and separate works in
themselves, then this License, and its terms, do not apply to those
sections when you distribute them as separate works. But when you
distribute the same sections as part of a whole which is a work based
on the Program, the distribution of the whole must be on the terms of
this License, whose permissions for other licensees extend to the
entire whole, and thus to each and every part regardless of who wrote it.
Thus, it is not the intent of this section to claim rights or contest
your rights to work written entirely by you; rather, the intent is to
exercise the right to control the distribution of derivative or
collective works based on the Program.
In addition, mere aggregation of another work not based on the Program
with the Program (or with a work based on the Program) on a volume of
a storage or distribution medium does not bring the other work under
the scope of this License.
3. You may copy and distribute the Program (or a work based on it,
under Section 2) in object code or executable form under the terms of
Sections 1 and 2 above provided that you also do one of the following:
a) Accompany it with the complete corresponding machine-readable
source code, which must be distributed under the terms of Sections
1 and 2 above on a medium customarily used for software interchange; or,
b) Accompany it with a written offer, valid for at least three
years, to give any third party, for a charge no more than your
cost of physically performing source distribution, a complete
machine-readable copy of the corresponding source code, to be
distributed under the terms of Sections 1 and 2 above on a medium
customarily used for software interchange; or,
c) Accompany it with the information you received as to the offer
to distribute corresponding source code. (This alternative is
allowed only for noncommercial distribution and only if you
received the program in object code or executable form with such
an offer, in accord with Subsection b above.)
The source code for a work means the preferred form of the work for
making modifications to it. For an executable work, complete source
code means all the source code for all modules it contains, plus any
associated interface definition files, plus the scripts used to
control compilation and installation of the executable. However, as a
special exception, the source code distributed need not include
anything that is normally distributed (in either source or binary
form) with the major components (compiler, kernel, and so on) of the
operating system on which the executable runs, unless that component
itself accompanies the executable.
If distribution of executable or object code is made by offering
access to copy from a designated place, then offering equivalent
access to copy the source code from the same place counts as
distribution of the source code, even though third parties are not
compelled to copy the source along with the object code.
4. You may not copy, modify, sublicense, or distribute the Program
except as expressly provided under this License. Any attempt
otherwise to copy, modify, sublicense or distribute the Program is
void, and will automatically terminate your rights under this License.
However, parties who have received copies, or rights, from you under
this License will not have their licenses terminated so long as such
parties remain in full compliance.
5. You are not required to accept this License, since you have not
signed it. However, nothing else grants you permission to modify or
distribute the Program or its derivative works. These actions are
prohibited by law if you do not accept this License. Therefore, by
modifying or distributing the Program (or any work based on the
Program), you indicate your acceptance of this License to do so, and
all its terms and conditions for copying, distributing or modifying
the Program or works based on it.
6. Each time you redistribute the Program (or any work based on the
Program), the recipient automatically receives a license from the
original licensor to copy, distribute or modify the Program subject to
these terms and conditions. You may not impose any further
restrictions on the recipients' exercise of the rights granted herein.
You are not responsible for enforcing compliance by third parties to
this License.
7. If, as a consequence of a court judgment or allegation of patent
infringement or for any other reason (not limited to patent issues),
conditions are imposed on you (whether by court order, agreement or
otherwise) that contradict the conditions of this License, they do not
excuse you from the conditions of this License. If you cannot
distribute so as to satisfy simultaneously your obligations under this
License and any other pertinent obligations, then as a consequence you
may not distribute the Program at all. For example, if a patent
license would not permit royalty-free redistribution of the Program by
all those who receive copies directly or indirectly through you, then
the only way you could satisfy both it and this License would be to
refrain entirely from distribution of the Program.
If any portion of this section is held invalid or unenforceable under
any particular circumstance, the balance of the section is intended to
apply and the section as a whole is intended to apply in other
circumstances.
It is not the purpose of this section to induce you to infringe any
patents or other property right claims or to contest validity of any
such claims; this section has the sole purpose of protecting the
integrity of the free software distribution system, which is
implemented by public license practices. Many people have made
generous contributions to the wide range of software distributed
through that system in reliance on consistent application of that
system; it is up to the author/donor to decide if he or she is willing
to distribute software through any other system and a licensee cannot
impose that choice.
This section is intended to make thoroughly clear what is believed to
be a consequence of the rest of this License.
8. If the distribution and/or use of the Program is restricted in
certain countries either by patents or by copyrighted interfaces, the
original copyright holder who places the Program under this License
may add an explicit geographical distribution limitation excluding
those countries, so that distribution is permitted only in or among
countries not thus excluded. In such case, this License incorporates
the limitation as if written in the body of this License.
9. The Free Software Foundation may publish revised and/or new versions
of the General Public License from time to time. Such new versions will
be similar in spirit to the present version, but may differ in detail to
address new problems or concerns.
Each version is given a distinguishing version number. If the Program
specifies a version number of this License which applies to it and "any
later version", you have the option of following the terms and conditions
either of that version or of any later version published by the Free
Software Foundation. If the Program does not specify a version number of
this License, you may choose any version ever published by the Free Software
Foundation.
10. If you wish to incorporate parts of the Program into other free
programs whose distribution conditions are different, write to the author
to ask for permission. For software which is copyrighted by the Free
Software Foundation, write to the Free Software Foundation; we sometimes
make exceptions for this. Our decision will be guided by the two goals
of preserving the free status of all derivatives of our free software and
of promoting the sharing and reuse of software generally.
NO WARRANTY
11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY
FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN
OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES
PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED
OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS
TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE
PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING,
REPAIR OR CORRECTION.
12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,
INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING
OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED
TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY
YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER
PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
POSSIBILITY OF SUCH DAMAGES.
END OF TERMS AND CONDITIONS
How to Apply These Terms to Your New Programs
If you develop a new program, and you want it to be of the greatest
possible use to the public, the best way to achieve this is to make it
free software which everyone can redistribute and change under these terms.
To do so, attach the following notices to the program. It is safest
to attach them to the start of each source file to most effectively
convey the exclusion of warranty; and each file should have at least
the "copyright" line and a pointer to where the full notice is found.
<one line to give the program's name and a brief idea of what it does.>
Copyright (C) <year> <name of author>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
Also add information on how to contact you by electronic and paper mail.
If the program is interactive, make it output a short notice like this
when it starts in an interactive mode:
Gnomovision version 69, Copyright (C) year name of author
Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
This is free software, and you are welcome to redistribute it
under certain conditions; type `show c' for details.
The hypothetical commands `show w' and `show c' should show the appropriate
parts of the General Public License. Of course, the commands you use may
be called something other than `show w' and `show c'; they could even be
mouse-clicks or menu items--whatever suits your program.
You should also get your employer (if you work as a programmer) or your
school, if any, to sign a "copyright disclaimer" for the program, if
necessary. Here is a sample; alter the names:
Yoyodyne, Inc., hereby disclaims all copyright interest in the program
`Gnomovision' (which makes passes at compilers) written by James Hacker.
<signature of Ty Coon>, 1 April 1989
Ty Coon, President of Vice
This General Public License does not permit incorporating your program into
proprietary programs. If your program is a subroutine library, you may
consider it more useful to permit linking proprietary applications with the
library. If this is what you want to do, use the GNU Lesser General
Public License instead of this License.
| {
"pile_set_name": "Github"
} |
#begin document (wb/sel/66/sel_6608); part 000
wb/sel/66/sel_6608 -1 0 [WORD] XX (TOP* - - - - * -
wb/sel/66/sel_6608 -1 1 [WORD] XX * - - - - * -
wb/sel/66/sel_6608 -1 2 [WORD] XX * - - - - * -
wb/sel/66/sel_6608 -1 3 [WORD] XX * - - - - * -
wb/sel/66/sel_6608 -1 4 [WORD] XX * - - - - * -
wb/sel/66/sel_6608 -1 5 [WORD] XX * - - - - * -
wb/sel/66/sel_6608 -1 6 [WORD] XX * - - - - * -
wb/sel/66/sel_6608 -1 7 [WORD] VERB * inquire - 1 - * -
wb/sel/66/sel_6608 -1 8 [WORD] XX *) - - - - * -
#end document
| {
"pile_set_name": "Github"
} |
// Copyright (C) 2015 the V8 project authors. All rights reserved.
// This code is governed by the BSD license found in the LICENSE file.
/*---
desc: >
If the Initializer is present and v is undefined, the Initializer should be
evaluated and the result assigned to the target reference ("undefined"
property value defined).
template: default
es6id: 12.14.5.4
---*/
//- setup
var x;
//- elems
{ x = 1 }
//- vals
{ x: undefined }
//- body
assert.sameValue(x, 1);
| {
"pile_set_name": "Github"
} |
fileFormatVersion: 2
guid: 965efa32cf2345647a1c987546e08f86
timeCreated: 1459956391
licenseType: Store
ShaderImporter:
defaultTextures: []
userData:
assetBundleName:
assetBundleVariant:
| {
"pile_set_name": "Github"
} |
MobileBattleRoom_MapScripts:
def_scene_scripts
scene_script .InitializeMobileBattleRoom ; SCENE_DEFAULT
scene_script .DummyScene ; SCENE_FINISHED
def_callbacks
.InitializeMobileBattleRoom:
prioritysjump .InitializeAndPreparePokecenter2F
end
.DummyScene:
end
.InitializeAndPreparePokecenter2F:
setscene SCENE_FINISHED
setmapscene POKECENTER_2F, SCENE_POKECENTER2F_LEAVE_MOBILE_BATTLE_ROOM
end
MobileBattleRoomConsoleScript:
refreshscreen
special Function1037c2
ifequal $1, .one
special Function1037eb
iffalse .false
ifequal $1, .one_
ifequal $2, .two_
sjump .false
.one_
writetext MobileBattleRoom_HealText
pause 20
closetext
special FadeOutPalettes
playmusic MUSIC_HEAL
special LoadMapPalettes
pause 60
special FadeInPalettes
special RestartMapMusic
refreshscreen
.two_
special StubbedTrainerRankings_Healings
special HealParty
special Function10383c
iftrue .false
.one
special Function10387b
writetext MobileBattleRoom_EstablishingCommsText
waitbutton
reloadmappart
special Function101225
.false
closetext
end
MobileBattleRoom_EstablishingCommsText:
text "Establishing"
line "communications…"
done
MobileBattleRoom_HealText:
text "Your #MON will"
line "be fully healed"
cont "before battle."
done
MobileBattleRoom_MapEvents:
db 0, 0 ; filler
def_warp_events
warp_event 4, 7, POKECENTER_2F, 6
warp_event 5, 7, POKECENTER_2F, 6
def_coord_events
def_bg_events
bg_event 4, 2, BGEVENT_UP, MobileBattleRoomConsoleScript
def_object_events
| {
"pile_set_name": "Github"
} |
fileFormatVersion: 2
guid: ba42c0991b6db0a44b0c52c18a4554b6
folderAsset: yes
timeCreated: 1486726501
licenseType: Free
DefaultImporter:
userData:
assetBundleName:
assetBundleVariant:
| {
"pile_set_name": "Github"
} |
import { createLocal } from '../create/local';
import { createUTC } from '../create/utc';
import { createInvalid } from '../create/valid';
import { isMoment } from './constructor';
import { min, max } from './min-max';
import { now } from './now';
import momentPrototype from './prototype';
function createUnix (input) {
return createLocal(input * 1000);
}
function createInZone () {
return createLocal.apply(null, arguments).parseZone();
}
export {
now,
min,
max,
isMoment,
createUTC,
createUnix,
createLocal,
createInZone,
createInvalid,
momentPrototype
};
| {
"pile_set_name": "Github"
} |
//
// SceneDelegate.swift
// FritzImageLabelingDemo
//
// Created by Steven Yeung on 10/23/19.
// Copyright © 2019 Fritz AI. All rights reserved.
//
import UIKit
class SceneDelegate: UIResponder, UIWindowSceneDelegate {
var window: UIWindow?
func scene(_ scene: UIScene, willConnectTo session: UISceneSession, options connectionOptions: UIScene.ConnectionOptions) {
// Use this method to optionally configure and attach the UIWindow `window` to the provided UIWindowScene `scene`.
// If using a storyboard, the `window` property will automatically be initialized and attached to the scene.
// This delegate does not imply the connecting scene or session are new (see `application:configurationForConnectingSceneSession` instead).
guard let _ = (scene as? UIWindowScene) else { return }
}
func sceneDidDisconnect(_ scene: UIScene) {
// Called as the scene is being released by the system.
// This occurs shortly after the scene enters the background, or when its session is discarded.
// Release any resources associated with this scene that can be re-created the next time the scene connects.
// The scene may re-connect later, as its session was not neccessarily discarded (see `application:didDiscardSceneSessions` instead).
}
func sceneDidBecomeActive(_ scene: UIScene) {
// Called when the scene has moved from an inactive state to an active state.
// Use this method to restart any tasks that were paused (or not yet started) when the scene was inactive.
}
func sceneWillResignActive(_ scene: UIScene) {
// Called when the scene will move from an active state to an inactive state.
// This may occur due to temporary interruptions (ex. an incoming phone call).
}
func sceneWillEnterForeground(_ scene: UIScene) {
// Called as the scene transitions from the background to the foreground.
// Use this method to undo the changes made on entering the background.
}
func sceneDidEnterBackground(_ scene: UIScene) {
// Called as the scene transitions from the foreground to the background.
// Use this method to save data, release shared resources, and store enough scene-specific state information
// to restore the scene back to its current state.
}
}
| {
"pile_set_name": "Github"
} |
<!DOCTYPE html>
<html>
<head>
<meta name="description" content="DBMON HyperApp" />
<meta charset="utf-8">
<link href="../styles.css" rel="stylesheet" type="text/css" />
<title>dbmon (HyperApp)</title>
</head>
<body>
<div id="app"></div>
<script src="../lib/hyperapp.min.js"></script>
<script src="../ENV.js"></script>
<script src="../lib/memory-stats.js"></script>
<script src="../lib/monitor.js"></script>
<script src="./index.js"></script>
<script src="../ga.js"></script>
</body>
</html>
| {
"pile_set_name": "Github"
} |
# md5 from https://pypi.python.org/pypi/pycli/json
md5 89c487e7c8068ce05d63394d2ae7ebad pyCLI-2.0.3.tar.gz
# Locally computed
sha256 bc53e6c5db031ae1c05d131641f153d22a201c5e82cc8c9324a945752efbb622 pyCLI-2.0.3.tar.gz
sha256 591490b2ef462a4ad2be1aae5dd47738cb17e191c82d3ba38c3ae906d288e6ba lib/cli/__init__.py
| {
"pile_set_name": "Github"
} |
import { Component } from '@angular/core';
@Component({
selector: 'app-counter-component',
templateUrl: './counter.component.html'
})
export class CounterComponent {
public currentCount = 0;
public incrementCounter() {
this.currentCount++;
}
}
| {
"pile_set_name": "Github"
} |
<?php
/*
* This file is part of PHP CS Fixer.
*
* (c) Fabien Potencier <[email protected]>
* Dariusz Rumiński <[email protected]>
*
* This source file is subject to the MIT license that is bundled
* with this source code in the file LICENSE.
*/
namespace PhpCsFixer;
/**
* File reader that unify access to regular file and stdin-alike file.
*
* Regular file could be read multiple times with `file_get_contents`, but file provided on stdin can not.
* Consecutive try will provide empty content for stdin-alike file.
* This reader unifies access to them.
*
* @internal
*/
final class FileReader
{
/**
* @var null|self
*/
private static $instance;
/**
* @var null|string
*/
private $stdinContent;
/**
* @return self
*/
public static function createSingleton()
{
if (null === self::$instance) {
self::$instance = new self();
}
return self::$instance;
}
/**
* @param string $filePath
*
* @return string
*/
public function read($filePath)
{
if ('php://stdin' === $filePath) {
if (null === $this->stdinContent) {
$this->stdinContent = $this->readRaw($filePath);
}
return $this->stdinContent;
}
return $this->readRaw($filePath);
}
/**
* @param string $realPath
*
* @return string
*/
private function readRaw($realPath)
{
$content = @file_get_contents($realPath);
if (false === $content) {
$error = error_get_last();
throw new \RuntimeException(sprintf(
'Failed to read content from "%s".%s',
$realPath,
$error ? ' '.$error['message'] : ''
));
}
return $content;
}
}
| {
"pile_set_name": "Github"
} |
// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef COMPONENTS_CAST_CHANNEL_CAST_AUTH_UTIL_H_
#define COMPONENTS_CAST_CHANNEL_CAST_AUTH_UTIL_H_
#include <string>
#include "base/memory/ref_counted.h"
#include "base/time/time.h"
#include "components/cast_channel/proto/cast_channel.pb.h"
namespace cast_certificate {
enum class CRLPolicy;
}
namespace net {
class X509Certificate;
class TrustStore;
} // namespace net
namespace cast_channel {
class AuthResponse;
class CastMessage;
struct AuthResult {
public:
enum ErrorType {
ERROR_NONE,
ERROR_PEER_CERT_EMPTY,
ERROR_WRONG_PAYLOAD_TYPE,
ERROR_NO_PAYLOAD,
ERROR_PAYLOAD_PARSING_FAILED,
ERROR_MESSAGE_ERROR,
ERROR_NO_RESPONSE,
ERROR_FINGERPRINT_NOT_FOUND,
ERROR_CERT_PARSING_FAILED,
ERROR_CERT_NOT_SIGNED_BY_TRUSTED_CA,
ERROR_CANNOT_EXTRACT_PUBLIC_KEY,
ERROR_SIGNED_BLOBS_MISMATCH,
ERROR_TLS_CERT_VALIDITY_PERIOD_TOO_LONG,
ERROR_TLS_CERT_VALID_START_DATE_IN_FUTURE,
ERROR_TLS_CERT_EXPIRED,
ERROR_CRL_INVALID,
ERROR_CERT_REVOKED,
ERROR_SENDER_NONCE_MISMATCH,
ERROR_DIGEST_UNSUPPORTED,
ERROR_SIGNATURE_EMPTY,
};
enum PolicyType { POLICY_NONE = 0, POLICY_AUDIO_ONLY = 1 << 0 };
// Constructs a AuthResult that corresponds to success.
AuthResult();
AuthResult(const std::string& error_message, ErrorType error_type);
~AuthResult();
static AuthResult CreateWithParseError(const std::string& error_message,
ErrorType error_type);
bool success() const { return error_type == ERROR_NONE; }
std::string error_message;
ErrorType error_type;
unsigned int channel_policies;
};
class AuthContext {
public:
~AuthContext();
// Get an auth challenge context.
// The same context must be used in the challenge and reply.
static AuthContext Create();
// Verifies the nonce received in the response is equivalent to the one sent.
// Returns success if |nonce_response| matches nonce_
AuthResult VerifySenderNonce(const std::string& nonce_response) const;
// The nonce challenge.
const std::string& nonce() const { return nonce_; }
private:
explicit AuthContext(const std::string& nonce);
const std::string nonce_;
};
// Authenticates the given |challenge_reply|:
// 1. Signature contained in the reply is valid.
// 2. Certficate used to sign is rooted to a trusted CA.
AuthResult AuthenticateChallengeReply(const CastMessage& challenge_reply,
const net::X509Certificate& peer_cert,
const AuthContext& auth_context);
// Performs a quick check of the TLS certificate for time validity requirements.
AuthResult VerifyTLSCertificate(const net::X509Certificate& peer_cert,
std::string* peer_cert_der,
const base::Time& verification_time);
// Auth-library specific implementation of cryptographic signature
// verification routines. Verifies that |response| contains a
// valid signature of |signature_input|.
AuthResult VerifyCredentials(const AuthResponse& response,
const std::string& signature_input);
// Exposed for testing only.
//
// Overloaded version of VerifyCredentials that allows modifying
// the crl policy, trust stores, and verification times.
AuthResult VerifyCredentialsForTest(
const AuthResponse& response,
const std::string& signature_input,
const cast_certificate::CRLPolicy& crl_policy,
net::TrustStore* cast_trust_store,
net::TrustStore* crl_trust_store,
const base::Time& verification_time);
} // namespace cast_channel
#endif // COMPONENTS_CAST_CHANNEL_CAST_AUTH_UTIL_H_
| {
"pile_set_name": "Github"
} |
#!/bin/sh
# Tarsnap backup script
# Written by Tim Bishop, 2009.
# Directories to backup (relative to /)
DIRS="home root decrypted var/www etc/letsencrypt"
# Number of daily backups to keep
DAILY=7
# Number of weekly backups to keep
WEEKLY=3
# Which day to do weekly backups on
# 1-7, Monday = 1
WEEKLY_DAY=5
# Number of monthly backups to keep
MONTHLY=1
# Which day to do monthly backups on
# 01-31 (leading 0 is important)
MONTHLY_DAY=01
# Path to tarsnap
TARSNAP="/usr/local/bin/tarsnap"
# Extra flags to pass to tarsnap
EXTRA_FLAGS="-L -C /"
# end of config
set -e
# day of week: 1-7, monday = 1
DOW=`date +%u`
# day of month: 01-31
DOM=`date +%d`
# month of year: 01-12
MOY=`date +%m`
# year
YEAR=`date +%Y`
# time
TIME=`date +%H%M%S`
# Backup name
if [ X"$DOM" = X"$MONTHLY_DAY" ]; then
# monthly backup
BACKUP="$YEAR$MOY$DOM-$TIME-monthly"
elif [ X"$DOW" = X"$WEEKLY_DAY" ]; then
# weekly backup
BACKUP="$YEAR$MOY$DOM-$TIME-weekly"
else
# daily backup
BACKUP="$YEAR$MOY$DOM-$TIME-daily"
fi
# Below command complains to stderr if postgres user cannot write to CWD
cd /home/
# Dump PostgreSQL to file
umask 077
sudo -u postgres pg_dumpall -c | gzip > /decrypted/postgresql-backup.sql.gz
# Do backups
for dir in $DIRS; do
echo "==> create $BACKUP-$dir"
$TARSNAP $EXTRA_FLAGS -c -f $BACKUP-$dir $dir
done
# Backups done, time for cleaning up old archives
# using tail to find archives to delete, but its
# +n syntax is out by one from what we want to do
# (also +0 == +1, so we're safe :-)
DAILY=`expr $DAILY + 1`
WEEKLY=`expr $WEEKLY + 1`
MONTHLY=`expr $MONTHLY + 1`
# Do deletes
TMPFILE=/tmp/tarsnap.archives.$$
$TARSNAP --list-archives > $TMPFILE
for dir in $DIRS; do
for i in `grep -E "^[[:digit:]]{8}-[[:digit:]]{6}-daily-$dir" $TMPFILE | sort -rn | tail -n +$DAILY`; do
echo "==> delete $i"
$TARSNAP -d -f $i
done
for i in `grep -E "^[[:digit:]]{8}-[[:digit:]]{6}-weekly-$dir" $TMPFILE | sort -rn | tail -n +$WEEKLY`; do
echo "==> delete $i"
$TARSNAP -d -f $i
done
for i in `grep -E "^[[:digit:]]{8}-[[:digit:]]{6}-monthly-$dir" $TMPFILE | sort -rn | tail -n +$MONTHLY`; do
echo "==> delete $i"
$TARSNAP -d -f $i
done
done
rm $TMPFILE
| {
"pile_set_name": "Github"
} |
/** @file
PEI memory status code worker.
Copyright (c) 2006 - 2018, Intel Corporation. All rights reserved.<BR>
SPDX-License-Identifier: BSD-2-Clause-Patent
**/
#include "StatusCodeHandlerPei.h"
/**
Create the first memory status code GUID'ed HOB as initialization for memory status code worker.
@retval EFI_SUCCESS The GUID'ed HOB is created successfully.
**/
EFI_STATUS
MemoryStatusCodeInitializeWorker (
VOID
)
{
//
// Create memory status code GUID'ed HOB.
//
MEMORY_STATUSCODE_PACKET_HEADER *PacketHeader;
//
// Build GUID'ed HOB with PCD defined size.
//
PacketHeader = BuildGuidHob (
&gMemoryStatusCodeRecordGuid,
PcdGet16 (PcdStatusCodeMemorySize) * 1024 + sizeof (MEMORY_STATUSCODE_PACKET_HEADER)
);
ASSERT (PacketHeader != NULL);
PacketHeader->MaxRecordsNumber = (PcdGet16 (PcdStatusCodeMemorySize) * 1024) / sizeof (MEMORY_STATUSCODE_RECORD);
PacketHeader->PacketIndex = 0;
PacketHeader->RecordIndex = 0;
return EFI_SUCCESS;
}
/**
Report status code into GUID'ed HOB.
This function reports status code into GUID'ed HOB. If not all packets are full, then
write status code into available entry. Otherwise, create a new packet for it.
@param PeiServices An indirect pointer to the EFI_PEI_SERVICES table published by the PEI Foundation.
@param CodeType Indicates the type of status code being reported.
@param Value Describes the current status of a hardware or
software entity. This includes information about the class and
subclass that is used to classify the entity as well as an operation.
For progress codes, the operation is the current activity.
For error codes, it is the exception.For debug codes,it is not defined at this time.
@param Instance The enumeration of a hardware or software entity within
the system. A system may contain multiple entities that match a class/subclass
pairing. The instance differentiates between them. An instance of 0 indicates
that instance information is unavailable, not meaningful, or not relevant.
Valid instance numbers start with 1.
@param CallerId This optional parameter may be used to identify the caller.
This parameter allows the status code driver to apply different rules to
different callers.
@param Data This optional parameter may be used to pass additional data.
@retval EFI_SUCCESS The function always return EFI_SUCCESS.
**/
EFI_STATUS
EFIAPI
MemoryStatusCodeReportWorker (
IN CONST EFI_PEI_SERVICES **PeiServices,
IN EFI_STATUS_CODE_TYPE CodeType,
IN EFI_STATUS_CODE_VALUE Value,
IN UINT32 Instance,
IN CONST EFI_GUID *CallerId,
IN CONST EFI_STATUS_CODE_DATA *Data OPTIONAL
)
{
EFI_PEI_HOB_POINTERS Hob;
MEMORY_STATUSCODE_PACKET_HEADER *PacketHeader;
MEMORY_STATUSCODE_RECORD *Record;
//
// Find GUID'ed HOBs to locate current record buffer.
//
Hob.Raw = GetFirstGuidHob (&gMemoryStatusCodeRecordGuid);
ASSERT (Hob.Raw != NULL);
PacketHeader = (MEMORY_STATUSCODE_PACKET_HEADER *) GET_GUID_HOB_DATA (Hob.Guid);
Record = (MEMORY_STATUSCODE_RECORD *) (PacketHeader + 1);
Record = &Record[PacketHeader->RecordIndex++];
//
// Save status code.
//
Record->CodeType = CodeType;
Record->Instance = Instance;
Record->Value = Value;
//
// If record index equals to max record number, then wrap around record index to zero.
//
// The reader of status code should compare the number of records with max records number,
// If it is equal to or larger than the max number, then the wrap-around had happened,
// so the first record is pointed by record index.
// If it is less then max number, index of the first record is zero.
//
if (PacketHeader->RecordIndex == PacketHeader->MaxRecordsNumber) {
//
// Wrap around record index.
//
PacketHeader->RecordIndex = 0;
PacketHeader->PacketIndex ++;
}
return EFI_SUCCESS;
}
| {
"pile_set_name": "Github"
} |
#include "graph.h"
#include "program.h"
#include "mesh.h"
#include "timers.h"
#include <cmath>
#include <time.h>
#include <sys/time.h>
#include "Thermal.h"
#include "../thermal/ParameterManager.h"
#include "../thermal/ParameterManagerMacros.h"
#include "MGParameterManager.h"
using namespace simit;
#define TPM MGParameterManager
// Thermal-viz
void DumpToVisit(std::string ZoneName,int iter, double tim,
int Xsize, int Ysize, Set *quadsPan, Set *pointsPan);
int main(int argc, char **argv)
{
//1- Analyse command line arguments
if (argc != 2) {
std::cerr << "Usage: mg <path to .therm file>" << std::endl;
return -1;
}
std::string thermfile = argv[1];
//2- Construct the parameter manager
TPM PM;
PM.readParameters(thermfile);
//3 - Read the Pan part
Thermal Pan = Thermal(PM.get(TPM::PanFileName),PM.get(TPM::CGNSFileName_0),
PM.get(TPM::CGNSFileName_1),"Pan",1);
// Compute first time step
double time=0.0;
double dt=0.0;
Pan.compute_dt.runSafe();
dt=Pan.dt(0);
// Interface boundary condition : Dirichlet for the Pan
Pan.bc_types(Pan.coupling_direction(0))=1;
// Pan_L0.bc_types(Pan_L0.coupling_direction(0))=1;
int iter=0;
if (PM.get(TPM::dumpVisit)) {
DumpToVisit("Pan_L1",iter, time, Pan.Xsize[1], Pan.Ysize[1],
Pan.quads_MG[1], Pan.points_MG[1]);
}
// Time loop
timeval start;
gettimeofday(&start, NULL) ;
while ((time < PM.get(TPM::timeMax)) && (iter<PM.get(TPM::iterMax))) {
iter=iter+1;
std::cout << "---- Iteration " << iter << " ----" << std::endl;
std::cout << " -- Time " << time << " -- dt " << dt << " --" << std::endl;
if (Pan.PM.get(TPM::solver_type)<6)
Pan.solve_thermal.runSafe();
else
Pan.solve_thermalGS.runSafe();
time+=dt;
if ((PM.get(TPM::dumpVisit)) && (iter%(PM.get(TPM::dumpFrequency))==0)) {
DumpToVisit("Pan_L1",iter, time, Pan.Xsize[1], Pan.Ysize[1],
Pan.quads_MG[1], Pan.points_MG[1]);
}
// Compute the next timestep value
Pan.compute_dt.runSafe();
dt=Pan.dt(0);
// To arrive just on time
if (time+dt > PM.get(TPM::timeMax))
dt = PM.get(TPM::timeMax) - time;
Pan.dt(0)=dt;
}
timeval end;
gettimeofday(&end, NULL) ;
double elapsed_time = (double)(end.tv_sec - start.tv_sec) + ((double)(end.tv_usec - start.tv_usec))/1000000 ;
elapsed_time = elapsed_time*1000;
printf("\nElapsed time = %10.2f (ms)\n", elapsed_time);
// FieldRef<double> T = Pan.quads_MG[1]->getField<double>("T");
// for (auto quad = Pan.quads_MG[1]->begin(); quad != Pan.quads_MG[1]->end(); ++quad) {
// std::cout << float(T.get(*quad)) << std::endl;
// }
// simit::ir::printTimes();
if (PM.get(TPM::dumpVisit)) {
DumpToVisit("Pan_L1",iter, time, Pan.Xsize[1], Pan.Ysize[1],
Pan.quads_MG[1], Pan.points_MG[1]);
}
}
| {
"pile_set_name": "Github"
} |
//
// main.m
// Displaying Pins with Different Colors on a Map View
//
// Created by Vandad Nahavandipoor on 15/07/2011.
// Copyright 2011 Pixolity Ltd. All rights reserved.
//
#import <UIKit/UIKit.h>
#import "Displaying_Pins_with_Different_Colors_on_a_Map_ViewAppDelegate.h"
int main(int argc, char *argv[])
{
@autoreleasepool {
return UIApplicationMain(argc, argv, nil, NSStringFromClass([Displaying_Pins_with_Different_Colors_on_a_Map_ViewAppDelegate class]));
}
}
| {
"pile_set_name": "Github"
} |
#include "Etterna/Globals/global.h"
#include "X11Helper.h"
#include "RageUtil/Misc/RageLog.h"
#include "Etterna/Globals/ProductInfo.h"
#include "Etterna/Models/Misc/Preference.h"
#include "Etterna/Singletons/PrefsManager.h" // XXX: only used for m_bShowMouseCursor -aj
#include <X11/extensions/dpms.h>
#include <sys/types.h>
#include <unistd.h>
#include <string>
#include <sstream>
#include <cstdlib>
#include <csignal>
void
printCallStack()
{
pid_t myPid = getpid();
std::string pstackCommand = "pstack ";
std::stringstream ss;
ss << myPid;
pstackCommand += ss.str();
system(pstackCommand.c_str());
}
Display* X11Helper::Dpy = NULL;
Window X11Helper::Win = None;
static int
ErrorCallback(Display*, XErrorEvent*);
static int
FatalCallback(Display*);
static Preference<std::string> g_XWMName("XWMName", PRODUCT_ID);
static bool display_supports_dpms_extension = false;
static bool dpms_state_at_startup = false;
bool
X11Helper::OpenXConnection()
{
DEBUG_ASSERT(Dpy == NULL && Win == None);
int res = XInitThreads();
if (res == 0)
return false;
Dpy = XOpenDisplay(0);
if (Dpy == NULL)
return false;
XSetIOErrorHandler(FatalCallback);
XSetErrorHandler(ErrorCallback);
int event_base, error_base;
display_supports_dpms_extension =
DPMSQueryExtension(Dpy, &event_base, &error_base);
if (display_supports_dpms_extension) {
LOG->Trace("DPMSQueryExtension returned true. Stepmania will disable "
"power management, and restore the original state on exit.");
CARD16 power_level;
BOOL state;
if (DPMSInfo(Dpy, &power_level, &state)) {
dpms_state_at_startup = state;
DPMSDisable(Dpy);
} else {
LOG->Trace("DPMSInfo returned false. Stepmania will not be able "
"to disable power management.");
}
} else {
LOG->Trace("DPMSQueryExtension returned false, which means this "
"display does not support the DPMS extension. Stepmania "
"will not be able to disable power management.");
}
return true;
}
void
X11Helper::CloseXConnection()
{
if (display_supports_dpms_extension) {
if (dpms_state_at_startup) {
DPMSEnable(Dpy);
} else {
DPMSDisable(Dpy);
}
}
// The window should have been shut down
DEBUG_ASSERT(Dpy != NULL);
DEBUG_ASSERT(Win == None);
XCloseDisplay(Dpy);
Dpy = NULL;
}
bool
X11Helper::MakeWindow(Window& win,
int screenNum,
int depth,
Visual* visual,
int width,
int height,
bool overrideRedirect)
{
if (!Dpy)
return false;
XSetWindowAttributes winAttribs;
winAttribs.border_pixel = 0;
winAttribs.event_mask = 0L;
if (win) {
// Preserve the event mask.
XWindowAttributes attribs;
XGetWindowAttributes(Dpy, win, &attribs);
winAttribs.event_mask = attribs.your_event_mask;
XDestroyWindow(Dpy, win);
}
// XXX: Error catching/handling?
winAttribs.colormap =
XCreateColormap(Dpy, RootWindow(Dpy, screenNum), visual, AllocNone);
unsigned long mask = CWBorderPixel | CWColormap | CWEventMask;
if (overrideRedirect) {
winAttribs.override_redirect = True;
mask |= CWOverrideRedirect;
}
win = XCreateWindow(Dpy,
RootWindow(Dpy, screenNum),
0,
0,
width,
height,
0,
depth,
InputOutput,
visual,
mask,
&winAttribs);
if (win == None)
return false;
XClassHint* hint = XAllocClassHint();
if (hint == NULL) {
LOG->Warn("Could not set class hint for X11 Window");
} else {
hint->res_name = (char*)g_XWMName.Get().c_str();
hint->res_class = (char*)PRODUCT_FAMILY;
XSetClassHint(Dpy, win, hint);
XFree(hint);
}
// Hide the mouse cursor in certain situations.
if (!PREFSMAN->m_bShowMouseCursor) {
const char pBlank[] = { 0, 0, 0, 0, 0, 0, 0, 0 };
Pixmap BlankBitmap = XCreateBitmapFromData(Dpy, win, pBlank, 8, 8);
XColor black = { 0, 0, 0, 0, 0, 0 };
Cursor pBlankPointer = XCreatePixmapCursor(
Dpy, BlankBitmap, BlankBitmap, &black, &black, 0, 0);
XFreePixmap(Dpy, BlankBitmap);
XDefineCursor(Dpy, win, pBlankPointer);
XFreeCursor(Dpy, pBlankPointer);
}
return true;
}
void
X11Helper::SetWMState(const Window& root,
const Window& win,
const long action,
const Atom atom)
{
if (!Dpy)
return;
Atom wm_state = XInternAtom(Dpy, "_NET_WM_STATE", False);
XEvent xev;
memset(&xev, 0, sizeof(xev));
xev.type = ClientMessage;
xev.xclient.window = Win;
xev.xclient.message_type = wm_state;
xev.xclient.format = 32;
xev.xclient.data.l[0] = action; // 0 = Remove, 1 = Add, 2 = Toggle
xev.xclient.data.l[1] = atom;
xev.xclient.data.l[2] = 0; // end list of Atoms
XSendEvent(Dpy,
root,
False,
SubstructureRedirectMask | SubstructureNotifyMask,
&xev);
}
int
ErrorCallback(Display* d, XErrorEvent* err)
{
char errText[512];
XGetErrorText(d, err->error_code, errText, 512);
LOG->Warn("X11 Protocol error %s (%d) has occurred, caused by request "
"%d,%d, resource ID %d",
errText,
err->error_code,
err->request_code,
err->minor_code,
(int)err->resourceid);
return 0; // Xlib ignores our return value
}
int
FatalCallback(Display* d)
{
RageException::Throw("Fatal I/O error communicating with X server.");
}
#ifdef HAVE_XINERAMA
#include <X11/extensions/Xinerama.h>
bool
X11Helper::SetWMFullscreenMonitors(const DisplaySpec& target)
{
int num_screens = 0;
XineramaScreenInfo* screens = XineramaQueryScreens(Dpy, &num_screens);
if (screens == nullptr) {
return false;
}
XineramaScreenInfo* end = screens + num_screens;
RectI monitors{};
bool found_bounds = false;
if (target.isVirtual()) {
auto topmost = std::min_element(
screens, end, [](XineramaScreenInfo& a, XineramaScreenInfo& b) {
return a.y_org < b.y_org;
});
monitors.top = topmost->screen_number;
auto bottommost = std::max_element(
screens, end, [](XineramaScreenInfo& a, XineramaScreenInfo& b) {
return a.y_org < b.y_org;
});
monitors.bottom = bottommost->screen_number;
auto leftmost = std::min_element(
screens, end, [](XineramaScreenInfo& a, XineramaScreenInfo& b) {
return a.x_org < b.x_org;
});
monitors.left = leftmost->screen_number;
auto rightmost = std::max_element(
screens, end, [](XineramaScreenInfo& a, XineramaScreenInfo& b) {
return a.x_org < b.x_org;
});
monitors.right = rightmost->screen_number;
found_bounds = true;
} else if (target.currentMode() != nullptr) {
auto mon = std::find_if(screens, end, [&](XineramaScreenInfo& screen) {
return screen.x_org == target.currentBounds().left &&
screen.y_org == target.currentBounds().top &&
screen.width == target.currentMode()->width &&
screen.height == target.currentMode()->height;
});
if (mon != end) {
monitors.left = monitors.right = monitors.top = monitors.bottom =
mon->screen_number;
found_bounds = true;
}
}
XFree(screens);
XWindowAttributes attr = { 0 };
if (!found_bounds || !XGetWindowAttributes(Dpy, Win, &attr)) {
return false;
}
SetWMState(
attr.root, Win, 1, XInternAtom(Dpy, "_NET_WM_STATE_FULLSCREEN", False));
XClientMessageEvent xclient = { 0 };
xclient.type = ClientMessage;
xclient.window = Win;
xclient.message_type =
XInternAtom(Dpy, "_NET_WM_FULLSCREEN_MONITORS", False);
xclient.format = 32;
xclient.data.l[0] = monitors.top;
xclient.data.l[1] = monitors.bottom;
xclient.data.l[2] = monitors.left;
xclient.data.l[3] = monitors.right;
xclient.data.l[4] = 1;
XSendEvent(Dpy,
attr.root,
False,
SubstructureRedirectMask | SubstructureNotifyMask,
reinterpret_cast<XEvent*>(&xclient));
XFlush(Dpy);
return true;
}
#endif
| {
"pile_set_name": "Github"
} |
package pointerstructure
import (
"sort"
)
// Sort does an in-place sort of the pointers so that they are in order
// of least specific to most specific alphabetized. For example:
// "/foo", "/foo/0", "/qux"
//
// This ordering is ideal for applying the changes in a way that ensures
// that parents are set first.
func Sort(p []*Pointer) { sort.Sort(PointerSlice(p)) }
// PointerSlice is a slice of pointers that adheres to sort.Interface
type PointerSlice []*Pointer
func (p PointerSlice) Len() int { return len(p) }
func (p PointerSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
func (p PointerSlice) Less(i, j int) bool {
// Equal number of parts, do a string compare per part
for idx, ival := range p[i].Parts {
// If we're passed the length of p[j] parts, then we're done
if idx >= len(p[j].Parts) {
break
}
// Compare the values if they're not equal
jval := p[j].Parts[idx]
if ival != jval {
return ival < jval
}
}
// Equal prefix, take the shorter
if len(p[i].Parts) != len(p[j].Parts) {
return len(p[i].Parts) < len(p[j].Parts)
}
// Equal, it doesn't matter
return false
}
| {
"pile_set_name": "Github"
} |
-- Remove Incorrect Aggro Text actions from EAI
DELETE FROM creature_ai_scripts WHERE action1_type=1 AND creature_id IN (203,1564,3198,16873,1097,2554,2587,8566);
-- Remove old Azgalor text
DELETE FROM creature_ai_texts WHERE `comment`=17842;
-- Remove old Rage Winterchill text
DELETE FROM creature_ai_texts WHERE `comment`=17767;
-- SAI for Spectral Servant
SET @ENTRY := 16407;
UPDATE `creature_template` SET `AIName`='SmartAI' WHERE `entry`=@ENTRY;
DELETE FROM `creature_ai_scripts` WHERE `creature_id`=@ENTRY;
DELETE FROM `smart_scripts` WHERE `source_type`=0 AND `entryorguid`=@ENTRY;
INSERT INTO `smart_scripts` (`entryorguid`,`source_type`,`id`,`link`,`event_type`,`event_phase_mask`,`event_chance`,`event_flags`,`event_param1`,`event_param2`,`event_param3`,`event_param4`,`action_type`,`action_param1`,`action_param2`,`action_param3`,`action_param4`,`action_param5`,`action_param6`,`target_type`,`target_param1`,`target_param2`,`target_param3`,`target_x`,`target_y`,`target_z`,`target_o`,`comment`) VALUES
(@ENTRY,0,0,0,4,0,100,3,0,0,0,0,11,29540,0,0,0,0,0,2,0,0,0,0,0,0,0, 'Spectral Servant - On Aggro - Cast Curse of Past Burdens'),
(@ENTRY,0,1,0,0,0,70,0,12000,12000,30000,30000,11,29540,0,0,0,0,0,2,0,0,0,0,0,0,0, 'Spectral Servant - Combat - Cast Curse of Past Burdens'),
(@ENTRY,0,2,0,6,0,100,3,0,0,0,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0, 'Spectral Servant - On Death - Say random text');
-- NPC talk text convert from creature_ai_text
DELETE FROM `creature_ai_texts` WHERE `entry` BETWEEN -45 AND -43;
DELETE FROM `creature_text` WHERE `entry` IN (16407);
INSERT INTO `creature_text` (`entry`,`groupid`,`id`,`text`,`type`,`language`,`probability`,`emote`,`duration`,`sound`,`comment`) VALUES
(16407,0,0, 'No!',14,0,100,0,0,0, 'Spectral Servant - On Death Say'),
(16407,0,1, 'I am finished!',12,0,100,0,0,0, 'Spectral Servant - On Death Say'),
(16407,0,2, 'The master... will be angry...',12,0,100,0,0,0, 'Spectral Servant - On Death Say');
| {
"pile_set_name": "Github"
} |
import {ApiModelProperty} from '@nestjs/swagger';
export class JwtDto {
@ApiModelProperty()
expiresIn: number;
@ApiModelProperty()
accessToken: string;
@ApiModelProperty()
refreshToken: string;
}
| {
"pile_set_name": "Github"
} |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class NetworkSecurityGroupsOperations(object):
"""NetworkSecurityGroupsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2015_06_15.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _delete_initial(
self,
resource_group_name, # type: str
network_security_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2015-06-15"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkSecurityGroupName': self._serialize.url("network_security_group_name", network_security_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
network_security_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes the specified network security group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_security_group_name: The name of the network security group.
:type network_security_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
network_security_group_name=network_security_group_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
network_security_group_name, # type: str
expand=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> "models.NetworkSecurityGroup"
"""Gets the specified network security group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_security_group_name: The name of the network security group.
:type network_security_group_name: str
:param expand: Expands referenced resources.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: NetworkSecurityGroup, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2015_06_15.models.NetworkSecurityGroup
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.NetworkSecurityGroup"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2015-06-15"
accept = "application/json, text/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkSecurityGroupName': self._serialize.url("network_security_group_name", network_security_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('NetworkSecurityGroup', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name, # type: str
network_security_group_name, # type: str
parameters, # type: "models.NetworkSecurityGroup"
**kwargs # type: Any
):
# type: (...) -> "models.NetworkSecurityGroup"
cls = kwargs.pop('cls', None) # type: ClsType["models.NetworkSecurityGroup"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2015-06-15"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json, text/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkSecurityGroupName': self._serialize.url("network_security_group_name", network_security_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'NetworkSecurityGroup')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('NetworkSecurityGroup', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('NetworkSecurityGroup', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
network_security_group_name, # type: str
parameters, # type: "models.NetworkSecurityGroup"
**kwargs # type: Any
):
# type: (...) -> LROPoller["models.NetworkSecurityGroup"]
"""Creates or updates a network security group in the specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_security_group_name: The name of the network security group.
:type network_security_group_name: str
:param parameters: Parameters supplied to the create or update network security group
operation.
:type parameters: ~azure.mgmt.network.v2015_06_15.models.NetworkSecurityGroup
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either NetworkSecurityGroup or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2015_06_15.models.NetworkSecurityGroup]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.NetworkSecurityGroup"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
network_security_group_name=network_security_group_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('NetworkSecurityGroup', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups/{networkSecurityGroupName}'} # type: ignore
def list_all(
self,
**kwargs # type: Any
):
# type: (...) -> Iterable["models.NetworkSecurityGroupListResult"]
"""Gets all network security groups in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either NetworkSecurityGroupListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2015_06_15.models.NetworkSecurityGroupListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.NetworkSecurityGroupListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2015-06-15"
accept = "application/json, text/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_all.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('NetworkSecurityGroupListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_all.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/networkSecurityGroups'} # type: ignore
def list(
self,
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["models.NetworkSecurityGroupListResult"]
"""Gets all network security groups in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either NetworkSecurityGroupListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2015_06_15.models.NetworkSecurityGroupListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.NetworkSecurityGroupListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2015-06-15"
accept = "application/json, text/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('NetworkSecurityGroupListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkSecurityGroups'} # type: ignore
| {
"pile_set_name": "Github"
} |
1 [intro]
1.1 [intro.scope]
1.2 [intro.refs]
1.3 [intro.defs]
1.3.1 [defns.argument]
1.3.2 [defns.cond.supp]
1.3.3 [defns.diagnostic]
1.3.4 [defns.dynamic.type]
1.3.5 [defns.ill.formed]
1.3.6 [defns.impl.defined]
1.3.7 [defns.impl.limits]
1.3.8 [defns.locale.specific]
1.3.9 [defns.multibyte]
1.3.10 [defns.parameter]
1.3.11 [defns.signature]
1.3.12 [defns.static.type]
1.3.13 [defns.undefined]
1.3.14 [defns.unspecified]
1.3.15 [defns.well.formed]
1.4 [intro.compliance]
1.5 [intro.structure]
1.6 [syntax]
1.7 [intro.memory]
1.8 [intro.object]
1.9 [intro.execution]
1.10 [intro.multithread]
1.11 [intro.ack]
2 [lex]
2.1 [lex.separate]
2.2 [lex.phases]
2.3 [lex.charset]
2.4 [lex.trigraph]
2.5 [lex.pptoken]
2.6 [lex.digraph]
2.7 [lex.token]
2.8 [lex.comment]
2.9 [lex.header]
2.10 [lex.ppnumber]
2.11 [lex.name]
2.12 [lex.key]
2.13 [lex.operators]
2.14 [lex.literal]
2.14.1 [lex.literal.kinds]
2.14.2 [lex.icon]
2.14.3 [lex.ccon]
2.14.4 [lex.fcon]
2.14.5 [lex.string]
2.14.6 [lex.bool]
2.14.7 [lex.nullptr]
2.14.8 [lex.ext]
3 [basic]
3.1 [basic.def]
3.2 [basic.def.odr]
3.3 [basic.scope]
3.3.1 [basic.scope.declarative]
3.3.2 [basic.scope.pdecl]
3.3.3 [basic.scope.local]
3.3.4 [basic.scope.proto]
3.3.5 [basic.funscope]
3.3.6 [basic.scope.namespace]
3.3.7 [basic.scope.class]
3.3.8 [basic.scope.concept]
3.3.9 [basic.scope.req]
3.3.10 [basic.scope.enum]
3.3.11 [basic.scope.hiding]
3.4 [basic.lookup]
3.4.1 [basic.lookup.unqual]
3.4.2 [basic.lookup.argdep]
3.4.3 [basic.lookup.qual]
3.4.3.1 [class.qual]
3.4.3.2 [namespace.qual]
3.4.3.3 [concept.qual]
3.4.4 [basic.lookup.elab]
3.4.5 [basic.lookup.classref]
3.4.6 [basic.lookup.udir]
3.5 [basic.link]
3.6 [basic.start]
3.6.1 [basic.start.main]
3.6.2 [basic.start.init]
3.6.3 [basic.start.term]
3.7 [basic.stc]
3.7.1 [basic.stc.static]
3.7.2 [basic.stc.thread]
3.7.3 [basic.stc.auto]
3.7.4 [basic.stc.dynamic]
3.7.4.1 [basic.stc.dynamic.allocation]
3.7.4.2 [basic.stc.dynamic.deallocation]
3.7.4.3 [basic.stc.dynamic.safety]
3.7.5 [basic.stc.inherit]
3.8 [basic.life]
3.9 [basic.types]
3.9.1 [basic.fundamental]
3.9.2 [basic.compound]
3.9.3 [basic.type.qualifier]
3.10 [basic.lval]
3.11 [basic.align]
4 [conv]
4.1 [conv.lval]
4.2 [conv.array]
4.3 [conv.func]
4.4 [conv.qual]
4.5 [conv.prom]
4.6 [conv.fpprom]
4.7 [conv.integral]
4.8 [conv.double]
4.9 [conv.fpint]
4.10 [conv.ptr]
4.11 [conv.mem]
4.12 [conv.bool]
4.13 [conv.rank]
5 [expr]
5.1 [expr.prim]
5.1.1 [expr.prim.general]
5.1.2 [expr.prim.lambda]
5.2 [expr.post]
5.2.1 [expr.sub]
5.2.2 [expr.call]
5.2.3 [expr.type.conv]
5.2.4 [expr.pseudo]
5.2.5 [expr.ref]
5.2.6 [expr.post.incr]
5.2.7 [expr.dynamic.cast]
5.2.8 [expr.typeid]
5.2.9 [expr.static.cast]
5.2.10 [expr.reinterpret.cast]
5.2.11 [expr.const.cast]
5.3 [expr.unary]
5.3.1 [expr.unary.op]
5.3.2 [expr.pre.incr]
5.3.3 [expr.sizeof]
5.3.4 [expr.new]
5.3.5 [expr.delete]
5.3.6 [expr.alignof]
5.4 [expr.cast]
5.5 [expr.mptr.oper]
5.6 [expr.mul]
5.7 [expr.add]
5.8 [expr.shift]
5.9 [expr.rel]
5.10 [expr.eq]
5.11 [expr.bit.and]
5.12 [expr.xor]
5.13 [expr.or]
5.14 [expr.log.and]
5.15 [expr.log.or]
5.16 [expr.cond]
5.17 [expr.ass]
5.18 [expr.comma]
5.19 [expr.const]
6 [stmt.stmt]
6.1 [stmt.label]
6.2 [stmt.expr]
6.3 [stmt.block]
6.4 [stmt.select]
6.4.1 [stmt.if]
6.4.2 [stmt.switch]
6.5 [stmt.iter]
6.5.1 [stmt.while]
6.5.2 [stmt.do]
6.5.3 [stmt.for]
6.5.4 [stmt.ranged]
6.6 [stmt.jump]
6.6.1 [stmt.break]
6.6.2 [stmt.cont]
6.6.3 [stmt.return]
6.6.4 [stmt.goto]
6.7 [stmt.dcl]
6.8 [stmt.ambig]
6.9 [stmt.late]
7 [dcl.dcl]
7.1 [dcl.spec]
7.1.1 [dcl.stc]
7.1.2 [dcl.fct.spec]
7.1.3 [dcl.typedef]
7.1.4 [dcl.friend]
7.1.5 [dcl.constexpr]
7.1.6 [dcl.type]
7.1.6.1 [dcl.type.cv]
7.1.6.2 [dcl.type.simple]
7.1.6.3 [dcl.type.elab]
7.1.6.4 [dcl.spec.auto]
7.2 [dcl.enum]
7.3 [basic.namespace]
7.3.1 [namespace.def]
7.3.1.1 [namespace.unnamed]
7.3.1.2 [namespace.memdef]
7.3.2 [namespace.alias]
7.3.3 [namespace.udecl]
7.3.4 [namespace.udir]
7.4 [dcl.asm]
7.5 [dcl.link]
7.6 [dcl.attr]
7.6.1 [dcl.attr.grammar]
7.6.2 [dcl.align]
7.6.3 [dcl.attr.noreturn]
7.6.4 [dcl.attr.final]
7.6.5 [dcl.attr.depend]
8 [dcl.decl]
8.1 [dcl.name]
8.2 [dcl.ambig.res]
8.3 [dcl.meaning]
8.3.1 [dcl.ptr]
8.3.2 [dcl.ref]
8.3.3 [dcl.mptr]
8.3.4 [dcl.array]
8.3.5 [dcl.fct]
8.3.6 [dcl.fct.default]
8.4 [dcl.fct.def]
8.5 [dcl.init]
8.5.1 [dcl.init.aggr]
8.5.2 [dcl.init.string]
8.5.3 [dcl.init.ref]
8.5.4 [dcl.init.list]
9 [class]
9.1 [class.name]
9.2 [class.mem]
9.3 [class.mfct]
9.3.1 [class.mfct.non-static]
9.3.2 [class.this]
9.4 [class.static]
9.4.1 [class.static.mfct]
9.4.2 [class.static.data]
9.5 [class.union]
9.6 [class.bit]
9.7 [class.nest]
9.8 [class.local]
9.9 [class.nested.type]
10 [class.derived]
10.1 [class.mi]
10.2 [class.member.lookup]
10.3 [class.virtual]
10.4 [class.abstract]
11 [class.access]
11.1 [class.access.spec]
11.2 [class.access.base]
11.3 [class.access.dcl]
11.4 [class.friend]
11.5 [class.protected]
11.6 [class.access.virt]
11.7 [class.paths]
11.8 [class.access.nest]
12 [special]
12.1 [class.ctor]
12.2 [class.temporary]
12.3 [class.conv]
12.3.1 [class.conv.ctor]
12.3.2 [class.conv.fct]
12.4 [class.dtor]
12.5 [class.free]
12.6 [class.init]
12.6.1 [class.expl.init]
12.6.2 [class.base.init]
12.7 [class.cdtor]
12.8 [class.copy]
12.9 [class.inhctor]
13 [over]
13.1 [over.load]
13.2 [over.dcl]
13.3 [over.match]
13.3.1 [over.match.funcs]
13.3.1.1 [over.match.call]
13.3.1.1.1 [over.call.func]
13.3.1.1.2 [over.call.object]
13.3.1.2 [over.match.oper]
13.3.1.3 [over.match.ctor]
13.3.1.4 [over.match.copy]
13.3.1.5 [over.match.conv]
13.3.1.6 [over.match.ref]
13.3.1.7 [over.match.list]
13.3.2 [over.match.viable]
13.3.3 [over.match.best]
13.3.3.1 [over.best.ics]
13.3.3.1.1 [over.ics.scs]
13.3.3.1.2 [over.ics.user]
13.3.3.1.3 [over.ics.ellipsis]
13.3.3.1.4 [over.ics.ref]
13.3.3.1.5 [over.ics.list]
13.3.3.2 [over.ics.rank]
13.4 [over.over]
13.5 [over.oper]
13.5.1 [over.unary]
13.5.2 [over.binary]
13.5.3 [over.ass]
13.5.4 [over.call]
13.5.5 [over.sub]
13.5.6 [over.ref]
13.5.7 [over.inc]
13.5.8 [over.literal]
13.6 [over.built]
14 [temp]
14.1 [temp.export]
14.2 [temp.param]
14.3 [temp.names]
14.4 [temp.arg]
14.4.1 [temp.arg.type]
14.4.2 [temp.arg.nontype]
14.4.3 [temp.arg.template]
14.5 [temp.type]
14.6 [temp.decls]
14.6.1 [temp.class]
14.6.1.1 [temp.mem.func]
14.6.1.2 [temp.mem.class]
14.6.1.3 [temp.static]
14.6.2 [temp.mem]
14.6.3 [temp.variadic]
14.6.4 [temp.friend]
14.6.5 [temp.class.spec]
14.6.5.1 [temp.class.spec.match]
14.6.5.2 [temp.class.order]
14.6.5.3 [temp.class.spec.mfunc]
14.6.6 [temp.fct]
14.6.6.1 [temp.over.link]
14.6.6.2 [temp.func.order]
14.6.7 [temp.alias]
14.6.8 [temp.concept.map]
14.7 [temp.res]
14.7.1 [temp.local]
14.7.2 [temp.dep]
14.7.2.1 [temp.dep.type]
14.7.2.2 [temp.dep.expr]
14.7.2.3 [temp.dep.constexpr]
14.7.2.4 [temp.dep.temp]
14.7.3 [temp.nondep]
14.7.4 [temp.dep.res]
14.7.4.1 [temp.point]
14.7.4.2 [temp.dep.candidate]
14.7.5 [temp.inject]
14.8 [temp.spec]
14.8.1 [temp.inst]
14.8.2 [temp.explicit]
14.8.3 [temp.expl.spec]
14.9 [temp.fct.spec]
14.9.1 [temp.arg.explicit]
14.9.2 [temp.deduct]
14.9.2.1 [temp.deduct.call]
14.9.2.2 [temp.deduct.funcaddr]
14.9.2.3 [temp.deduct.conv]
14.9.2.4 [temp.deduct.partial]
14.9.2.5 [temp.deduct.type]
14.9.3 [temp.over]
14.10 [concept]
14.10.1 [concept.def]
14.10.1.1 [concept.fct]
14.10.1.2 [concept.assoc]
14.10.1.3 [concept.req]
14.10.1.4 [concept.axiom]
14.10.2 [concept.map]
14.10.2.1 [concept.map.fct]
14.10.2.2 [concept.map.assoc]
14.10.3 [concept.refine]
14.10.3.1 [concept.member.lookup]
14.10.3.2 [concept.refine.maps]
14.10.4 [concept.support]
14.11 [temp.constrained]
14.11.1 [temp.req]
14.11.1.1 [temp.req.sat]
14.11.1.2 [temp.req.impl]
14.11.2 [temp.archetype]
14.11.2.1 [temp.archetype.assemble]
14.11.3 [temp.constrained.set]
14.11.4 [temp.constrained.inst]
15 [except]
15.1 [except.throw]
15.2 [except.ctor]
15.3 [except.handle]
15.4 [except.spec]
15.5 [except.special]
15.5.1 [except.terminate]
15.5.2 [except.unexpected]
15.5.3 [except.uncaught]
16 [cpp]
16.1 [cpp.cond]
16.2 [cpp.include]
16.3 [cpp.replace]
16.3.1 [cpp.subst]
16.3.2 [cpp.stringize]
16.3.3 [cpp.concat]
16.3.4 [cpp.rescan]
16.3.5 [cpp.scope]
16.4 [cpp.line]
16.5 [cpp.error]
16.6 [cpp.pragma]
16.7 [cpp.null]
16.8 [cpp.predefined]
16.9 [cpp.pragma.op]
17 [library]
17.1 [library.general]
17.2 [library.c]
17.3 [definitions]
17.3.1 [defns.arbitrary.stream]
17.3.2 [defns.blocked]
17.3.3 [defns.character]
17.3.4 [defns.character.container]
17.3.5 [defns.comparison]
17.3.6 [defns.component]
17.3.7 [defns.deadlock]
17.3.8 [defns.default.behavior]
17.3.9 [defns.handler]
17.3.10 [defns.iostream.templates]
17.3.11 [defns.modifier]
17.3.12 [defns.move.assign]
17.3.13 [defns.move.assign.op]
17.3.14 [defns.move.ctor]
17.3.15 [defns.obj.state]
17.3.16 [defns.ntcts]
17.3.17 [defns.observer]
17.3.18 [defns.replacement]
17.3.19 [defns.repositional.stream]
17.3.20 [defns.required.behavior]
17.3.21 [defns.reserved.function]
17.3.22 [defns.stable]
17.3.23 [defns.traits]
17.4 [defns.additional]
17.5 [description]
17.5.1 [structure]
17.5.1.1 [structure.elements]
17.5.1.2 [structure.summary]
17.5.1.3 [structure.requirements]
17.5.1.4 [structure.specifications]
17.5.1.5 [structure.see.also]
17.5.2 [conventions]
17.5.2.1 [type.descriptions]
17.5.2.1.1 [type.descriptions.general]
17.5.2.1.2 [enumerated.types]
17.5.2.1.3 [bitmask.types]
17.5.2.1.4 [character.seq]
17.5.2.1.4.1 [byte.strings]
17.5.2.1.4.2 [multibyte.strings]
17.5.2.1.4.3 [char16_t.seq]
17.5.2.1.4.4 [char32_t.seq]
17.5.2.1.4.5 [wide.characters]
17.5.2.2 [functions.within.classes]
17.5.2.3 [objects.within.classes]
17.6 [requirements]
17.6.1 [organization]
17.6.1.1 [contents]
17.6.1.2 [headers]
17.6.1.3 [compliance]
17.6.2 [using]
17.6.2.1 [using.overview]
17.6.2.2 [using.headers]
17.6.2.3 [using.linkage]
17.6.3 [constraints]
17.6.3.1 [constraints.overview]
17.6.3.2 [namespace.constraints]
17.6.3.2.1 [namespace.std]
17.6.3.2.2 [namespace.posix]
17.6.3.3 [reserved.names]
17.6.3.3.1 [macro.names]
17.6.3.3.2 [global.names]
17.6.3.3.3 [extern.names]
17.6.3.3.4 [extern.types]
17.6.3.3.5 [usrlit.suffix]
17.6.3.4 [alt.headers]
17.6.3.5 [derived.classes]
17.6.3.6 [replacement.functions]
17.6.3.7 [handler.functions]
17.6.3.8 [res.on.functions]
17.6.3.9 [res.on.arguments]
17.6.3.10 [res.on.objects]
17.6.3.11 [res.on.required]
17.6.4 [conforming]
17.6.4.1 [conforming.overview]
17.6.4.2 [res.on.headers]
17.6.4.3 [res.on.macro.definitions]
17.6.4.4 [global.functions]
17.6.4.5 [member.functions]
17.6.4.6 [reentrancy]
17.6.4.7 [res.on.data.races]
17.6.4.8 [protection.within.classes]
17.6.4.9 [derivation]
17.6.4.10 [res.on.exception.handling]
17.6.4.11 [res.on.pointer.storage]
17.6.4.12 [value.error.codes]
18 [language.support]
18.1 [support.general]
18.2 [support.types]
18.3 [support.limits]
18.3.1 [limits]
18.3.1.1 [numeric.limits]
18.3.1.2 [numeric.limits.members]
18.3.1.3 [round.style]
18.3.1.4 [denorm.style]
18.3.1.5 [numeric.special]
18.3.2 [c.limits]
18.4 [cstdint]
18.4.1 [cstdint.syn]
18.4.2 [stdinth]
18.5 [support.start.term]
18.6 [support.dynamic]
18.6.1 [new.delete]
18.6.1.1 [new.delete.single]
18.6.1.2 [new.delete.array]
18.6.1.3 [new.delete.placement]
18.6.1.4 [new.delete.dataraces]
18.6.2 [alloc.errors]
18.6.2.1 [bad.alloc]
18.6.2.2 [new.handler]
18.6.2.3 [set.new.handler]
18.7 [support.rtti]
18.7.1 [type.info]
18.7.2 [type.index]
18.7.2.1 [type.index.overview]
18.7.2.2 [type.index.members]
18.7.2.3 [type.index.templ]
18.7.3 [bad.cast]
18.7.4 [bad.typeid]
18.8 [support.exception]
18.8.1 [exception]
18.8.2 [exception.unexpected]
18.8.2.1 [bad.exception]
18.8.2.2 [unexpected.handler]
18.8.2.3 [set.unexpected]
18.8.2.4 [unexpected]
18.8.3 [exception.terminate]
18.8.3.1 [terminate.handler]
18.8.3.2 [set.terminate]
18.8.3.3 [terminate]
18.8.4 [uncaught]
18.8.5 [propagation]
18.8.6 [except.nested]
18.9 [support.initlist]
18.9.1 [support.initlist.cons]
18.9.2 [support.initlist.access]
18.9.3 [support.initlist.concept]
18.10 [support.runtime]
19 [diagnostics]
19.1 [diagnostics.general]
19.2 [std.exceptions]
19.2.1 [logic.error]
19.2.2 [domain.error]
19.2.3 [invalid.argument]
19.2.4 [length.error]
19.2.5 [out.of.range]
19.2.6 [runtime.error]
19.2.7 [range.error]
19.2.8 [overflow.error]
19.2.9 [underflow.error]
19.3 [assertions]
19.4 [errno]
19.5 [syserr]
19.5.1 [syserr.errcat]
19.5.1.1 [syserr.errcat.overview]
19.5.1.2 [syserr.errcat.virtuals]
19.5.1.3 [syserr.errcat.nonvirtuals]
19.5.1.4 [syserr.errcat.derived]
19.5.1.5 [syserr.errcat.objects]
19.5.2 [syserr.errcode]
19.5.2.1 [syserr.errcodeenum]
19.5.2.2 [syserr.errcode.overview]
19.5.2.3 [syserr.errcode.constructors]
19.5.2.4 [syserr.errcode.modifiers]
19.5.2.5 [syserr.errcode.observers]
19.5.2.6 [syserr.errcode.nonmembers]
19.5.3 [syserr.errcondition]
19.5.3.1 [syserr.errcondenum]
19.5.3.2 [syserr.errcondition.overview]
19.5.3.3 [syserr.errcondition.constructors]
19.5.3.4 [syserr.errcondition.modifiers]
19.5.3.5 [syserr.errcondition.observers]
19.5.3.6 [syserr.errcondition.nonmembers]
19.5.4 [syserr.compare]
19.5.5 [syserr.syserr]
19.5.5.1 [syserr.syserr.overview]
19.5.5.2 [syserr.syserr.members]
20 [utilities]
20.1 [utilities.general]
20.2 [utility.concepts]
20.2.1 [concept.transform]
20.2.2 [concept.true]
20.2.3 [concept.classify]
20.2.4 [concept.operator]
20.2.5 [concept.predicate]
20.2.6 [concept.comparison]
20.2.7 [concept.construct]
20.2.8 [concept.destruct]
20.2.9 [concept.copymove]
20.2.10 [concept.memory]
20.2.11 [concept.regular]
20.2.12 [concept.convertible]
20.2.13 [concept.arithmetic]
20.3 [utility]
20.3.1 [operators]
20.3.2 [forward]
20.3.3 [pairs]
20.3.4 [pair.astuple]
20.3.5 [pair.concepts]
20.3.6 [template.bitset]
20.3.6.1 [bitset.cons]
20.3.6.2 [bitset.members]
20.3.6.3 [bitset.operators]
20.4 [ratio]
20.4.1 [ratio.ratio]
20.4.2 [ratio.arithmetic]
20.4.3 [ratio.comparison]
20.4.4 [ratio.si]
20.5 [tuple]
20.5.1 [tuple.general]
20.5.2 [tuple.tuple]
20.5.2.1 [tuple.cnstr]
20.5.2.2 [tuple.creation]
20.5.2.3 [tuple.helper]
20.5.2.4 [tuple.elem]
20.5.2.5 [tuple.rel]
20.5.2.6 [tuple.swap]
20.5.2.7 [tuple.special]
20.5.2.8 [tuple.concepts]
20.6 [meta]
20.6.1 [meta.rqmts]
20.6.2 [meta.type.synop]
20.6.3 [meta.help]
20.6.4 [meta.unary]
20.6.4.1 [meta.unary.cat]
20.6.4.2 [meta.unary.comp]
20.6.4.3 [meta.unary.prop]
20.6.5 [meta.rel]
20.6.6 [meta.trans]
20.6.6.1 [meta.trans.cv]
20.6.6.2 [meta.trans.ref]
20.6.6.3 [meta.trans.sign]
20.6.6.4 [meta.trans.arr]
20.6.6.5 [meta.trans.ptr]
20.6.7 [meta.trans.other]
20.7 [function.objects]
20.7.1 [func.def]
20.7.2 [func.require]
20.7.3 [base]
20.7.4 [func.ret]
20.7.5 [refwrap]
20.7.5.1 [refwrap.const]
20.7.5.2 [refwrap.assign]
20.7.5.3 [refwrap.access]
20.7.5.4 [refwrap.invoke]
20.7.5.5 [refwrap.helpers]
20.7.6 [identity.operation]
20.7.7 [arithmetic.operations]
20.7.8 [comparisons]
20.7.9 [logical.operations]
20.7.10 [bitwise.operations]
20.7.11 [negators]
20.7.12 [bind]
20.7.12.1 [func.bind]
20.7.12.1.1 [func.bind.isbind]
20.7.12.1.2 [func.bind.isplace]
20.7.12.1.3 [func.bind.bind]
20.7.12.1.4 [func.bind.place]
20.7.13 [function.pointer.adaptors]
20.7.14 [member.pointer.adaptors]
20.7.15 [func.memfn]
20.7.16 [func.wrap]
20.7.16.1 [func.wrap.badcall]
20.7.16.1.1 [func.wrap.badcall.const]
20.7.16.2 [func.wrap.func]
20.7.16.2.1 [func.wrap.func.con]
20.7.16.2.2 [func.wrap.func.mod]
20.7.16.2.3 [func.wrap.func.cap]
20.7.16.2.4 [func.wrap.func.inv]
20.7.16.2.5 [func.wrap.func.targ]
20.7.16.2.6 [func.wrap.func.nullptr]
20.7.16.2.7 [func.wrap.func.alg]
20.7.17 [unord.hash]
20.8 [memory]
20.8.1 [allocator.tag]
20.8.2 [allocator]
20.8.2.1 [allocator.general]
20.8.2.2 [allocator.concepts]
20.8.2.3 [allocator.concepts.legacy]
20.8.2.4 [allocator.concepts.members]
20.8.3 [allocator.element.concepts]
20.8.4 [default.allocator]
20.8.4.1 [allocator.members]
20.8.4.2 [allocator.globals]
20.8.5 [allocator.adaptor]
20.8.5.1 [allocator.adaptor.base]
20.8.5.2 [allocator.adaptor.cntr]
20.8.5.3 [allocator.adaptor2.cntr]
20.8.5.4 [allocator.adaptor.members]
20.8.5.5 [allocator.adaptor.globals]
20.8.6 [storage.iterator]
20.8.7 [temporary.buffer]
20.8.8 [specialized.algorithms]
20.8.8.1 [object.addressof]
20.8.8.2 [uninitialized.copy]
20.8.8.3 [uninitialized.fill]
20.8.8.4 [uninitialized.fill.n]
20.8.9 [unique.ptr]
20.8.9.1 [unique.ptr.dltr]
20.8.9.1.1 [unique.ptr.dltr.dflt]
20.8.9.1.2 [unique.ptr.dltr.dflt1]
20.8.9.2 [unique.ptr.single]
20.8.9.2.1 [unique.ptr.single.ctor]
20.8.9.2.2 [unique.ptr.single.dtor]
20.8.9.2.3 [unique.ptr.single.asgn]
20.8.9.2.4 [unique.ptr.single.observers]
20.8.9.2.5 [unique.ptr.single.modifiers]
20.8.9.3 [unique.ptr.runtime]
20.8.9.3.1 [unique.ptr.runtime.ctor]
20.8.9.3.2 [unique.ptr.runtime.observers]
20.8.9.3.3 [unique.ptr.runtime.modifiers]
20.8.9.4 [unique.ptr.special]
20.8.10 [util.smartptr]
20.8.10.1 [util.smartptr.weakptr]
20.8.10.2 [util.smartptr.shared]
20.8.10.2.1 [util.smartptr.shared.const]
20.8.10.2.2 [util.smartptr.shared.dest]
20.8.10.2.3 [util.smartptr.shared.assign]
20.8.10.2.4 [util.smartptr.shared.mod]
20.8.10.2.5 [util.smartptr.shared.obs]
20.8.10.2.6 [util.smartptr.shared.create]
20.8.10.2.7 [util.smartptr.shared.cmp]
20.8.10.2.8 [util.smartptr.shared.io]
20.8.10.2.9 [util.smartptr.shared.spec]
20.8.10.2.10 [util.smartptr.shared.cast]
20.8.10.2.11 [util.smartptr.getdeleter]
20.8.10.3 [util.smartptr.weak]
20.8.10.3.1 [util.smartptr.weak.const]
20.8.10.3.2 [util.smartptr.weak.dest]
20.8.10.3.3 [util.smartptr.weak.assign]
20.8.10.3.4 [util.smartptr.weak.mod]
20.8.10.3.5 [util.smartptr.weak.obs]
20.8.10.3.6 [util.smartptr.weak.spec]
20.8.10.4 [util.smartptr.ownerless]
20.8.10.5 [util.smartptr.enab]
20.8.10.6 [util.smartptr.shared.atomic]
20.8.10.7 [util.dynamic.safety]
20.8.11 [ptr.align]
20.8.12 [c.malloc]
20.9 [time]
20.9.1 [time.clock.req]
20.9.2 [time.traits]
20.9.2.1 [time.traits.is_fp]
20.9.2.2 [time.traits.duration_values]
20.9.2.3 [time.traits.specializations]
20.9.3 [time.duration]
20.9.3.1 [time.duration.cons]
20.9.3.2 [time.duration.observer]
20.9.3.3 [time.duration.arithmetic]
20.9.3.4 [time.duration.special]
20.9.3.5 [time.duration.nonmember]
20.9.3.6 [time.duration.comparisons]
20.9.3.7 [time.duration.cast]
20.9.4 [time.point]
20.9.4.1 [time.point.cons]
20.9.4.2 [time.point.observer]
20.9.4.3 [time.point.arithmetic]
20.9.4.4 [time.point.special]
20.9.4.5 [time.point.nonmember]
20.9.4.6 [time.point.comparisons]
20.9.4.7 [time.point.cast]
20.9.5 [time.clock]
20.9.5.1 [time.clock.system]
20.9.5.2 [time.clock.monotonic]
20.9.5.3 [time.clock.hires]
20.10 [date.time]
21 [strings]
21.1 [strings.general]
21.2 [char.traits]
21.2.1 [char.traits.require]
21.2.2 [char.traits.typedefs]
21.2.3 [char.traits.specializations]
21.2.3.1 [char.traits.specializations.char]
21.2.3.2 [char.traits.specializations.char16_t]
21.2.3.3 [char.traits.specializations.char32_t]
21.2.3.4 [char.traits.specializations.wchar.t]
21.3 [string.classes]
21.4 [basic.string]
21.4.1 [string.require]
21.4.2 [string.cons]
21.4.3 [string.iterators]
21.4.4 [string.capacity]
21.4.5 [string.access]
21.4.6 [string.modifiers]
21.4.6.1 [string::op+=]
21.4.6.2 [string::append]
21.4.6.3 [string::assign]
21.4.6.4 [string::insert]
21.4.6.5 [string::erase]
21.4.6.6 [string::replace]
21.4.6.7 [string::copy]
21.4.6.8 [string::swap]
21.4.7 [string.ops]
21.4.7.1 [string.accessors]
21.4.7.2 [string::find]
21.4.7.3 [string::rfind]
21.4.7.4 [string::find.first.of]
21.4.7.5 [string::find.last.of]
21.4.7.6 [string::find.first.not.of]
21.4.7.7 [string::find.last.not.of]
21.4.7.8 [string::substr]
21.4.7.9 [string::compare]
21.4.8 [string.nonmembers]
21.4.8.1 [string::op+]
21.4.8.2 [string::operator==]
21.4.8.3 [string::op!=]
21.4.8.4 [string::op<]
21.4.8.5 [string::op>]
21.4.8.6 [string::op<=]
21.4.8.7 [string::op>=]
21.4.8.8 [string.special]
21.4.8.9 [string.io]
21.5 [string.conversions]
21.6 [c.strings]
22 [localization]
22.1 [localization.general]
22.2 [locale.syn]
22.3 [locales]
22.3.1 [locale]
22.3.1.1 [locale.types]
22.3.1.1.1 [locale.category]
22.3.1.1.2 [locale.facet]
22.3.1.1.3 [locale.id]
22.3.1.2 [locale.cons]
22.3.1.3 [locale.members]
22.3.1.4 [locale.operators]
22.3.1.5 [locale.statics]
22.3.2 [locale.global.templates]
22.3.3 [locale.convenience]
22.3.3.1 [classification]
22.3.3.2 [conversions]
22.3.3.2.1 [conversions.character]
22.3.3.2.2 [conversions.string]
22.3.3.2.3 [conversions.buffer]
22.4 [locale.categories]
22.4.1 [category.ctype]
22.4.1.1 [locale.ctype]
22.4.1.1.1 [locale.ctype.members]
22.4.1.1.2 [locale.ctype.virtuals]
22.4.1.2 [locale.ctype.byname]
22.4.1.3 [facet.ctype.special]
22.4.1.3.1 [facet.ctype.char.dtor]
22.4.1.3.2 [facet.ctype.char.members]
22.4.1.3.3 [facet.ctype.char.statics]
22.4.1.3.4 [facet.ctype.char.virtuals]
22.4.1.4 [locale.codecvt]
22.4.1.4.1 [locale.codecvt.members]
22.4.1.4.2 [locale.codecvt.virtuals]
22.4.1.5 [locale.codecvt.byname]
22.4.2 [category.numeric]
22.4.2.1 [locale.num.get]
22.4.2.1.1 [facet.num.get.members]
22.4.2.1.2 [facet.num.get.virtuals]
22.4.2.2 [locale.nm.put]
22.4.2.2.1 [facet.num.put.members]
22.4.2.2.2 [facet.num.put.virtuals]
22.4.3 [facet.numpunct]
22.4.3.1 [locale.numpunct]
22.4.3.1.1 [facet.numpunct.members]
22.4.3.1.2 [facet.numpunct.virtuals]
22.4.3.2 [locale.numpunct.byname]
22.4.4 [category.collate]
22.4.4.1 [locale.collate]
22.4.4.1.1 [locale.collate.members]
22.4.4.1.2 [locale.collate.virtuals]
22.4.4.2 [locale.collate.byname]
22.4.5 [category.time]
22.4.5.1 [locale.time.get]
22.4.5.1.1 [locale.time.get.members]
22.4.5.1.2 [locale.time.get.virtuals]
22.4.5.2 [locale.time.get.byname]
22.4.5.3 [locale.time.put]
22.4.5.3.1 [locale.time.put.members]
22.4.5.3.2 [locale.time.put.virtuals]
22.4.5.4 [locale.time.put.byname]
22.4.6 [category.monetary]
22.4.6.1 [locale.money.get]
22.4.6.1.1 [locale.money.get.members]
22.4.6.1.2 [locale.money.get.virtuals]
22.4.6.2 [locale.money.put]
22.4.6.2.1 [locale.money.put.members]
22.4.6.2.2 [locale.money.put.virtuals]
22.4.6.3 [locale.moneypunct]
22.4.6.3.1 [locale.moneypunct.members]
22.4.6.3.2 [locale.moneypunct.virtuals]
22.4.6.4 [locale.moneypunct.byname]
22.4.7 [category.messages]
22.4.7.1 [locale.messages]
22.4.7.1.1 [locale.messages.members]
22.4.7.1.2 [locale.messages.virtuals]
22.4.7.2 [locale.messages.byname]
22.4.8 [facets.examples]
22.5 [locale.stdcvt]
22.6 [c.locales]
23 [containers]
23.1 [containers.general]
23.2 [container.requirements]
23.2.1 [container.requirements.general]
23.2.2 [container.requirements.dataraces]
23.2.3 [sequence.reqmts]
23.2.4 [associative.reqmts]
23.2.4.1 [associative.reqmts.except]
23.2.5 [unord.req]
23.2.5.1 [unord.req.except]
23.2.6 [container.concepts]
23.2.6.1 [container.concepts.free]
23.2.6.2 [container.concepts.member]
23.2.6.3 [container.concepts.maps]
23.3 [sequences]
23.3.1 [array]
23.3.1.1 [array.cons]
23.3.1.2 [array.special]
23.3.1.3 [array.size]
23.3.1.4 [array.data]
23.3.1.5 [array.fill]
23.3.1.6 [array.zero]
23.3.1.7 [array.tuple]
23.3.2 [deque]
23.3.2.1 [deque.cons]
23.3.2.2 [deque.capacity]
23.3.2.3 [deque.modifiers]
23.3.2.4 [deque.special]
23.3.3 [forwardlist]
23.3.3.1 [forwardlist.cons]
23.3.3.2 [forwardlist.iter]
23.3.3.3 [forwardlist.access]
23.3.3.4 [forwardlist.modifiers]
23.3.3.5 [forwardlist.ops]
23.3.3.6 [forwardlist.spec]
23.3.4 [list]
23.3.4.1 [list.cons]
23.3.4.2 [list.capacity]
23.3.4.3 [list.modifiers]
23.3.4.4 [list.ops]
23.3.4.5 [list.special]
23.3.5 [container.adaptors]
23.3.5.1 [queue]
23.3.5.1.1 [queue.defn]
23.3.5.1.2 [queue.ops]
23.3.5.1.3 [queue.special]
23.3.5.2 [priority.queue]
23.3.5.2.1 [priqueue.cons]
23.3.5.2.2 [priqueue.members]
23.3.5.2.3 [priqueue.special]
23.3.5.3 [stack]
23.3.5.3.1 [stack.defn]
23.3.5.3.2 [stack.ops]
23.3.5.3.3 [stack.special]
23.3.6 [vector]
23.3.6.1 [vector.cons]
23.3.6.2 [vector.capacity]
23.3.6.3 [vector.data]
23.3.6.4 [vector.modifiers]
23.3.6.5 [vector.special]
23.3.7 [vector.bool]
23.4 [associative]
23.4.1 [map]
23.4.1.1 [map.cons]
23.4.1.2 [map.access]
23.4.1.3 [map.modifiers]
23.4.1.4 [map.ops]
23.4.1.5 [map.special]
23.4.2 [multimap]
23.4.2.1 [multimap.cons]
23.4.2.2 [multimap.modifiers]
23.4.2.3 [multimap.ops]
23.4.2.4 [multimap.special]
23.4.3 [set]
23.4.3.1 [set.cons]
23.4.3.2 [set.special]
23.4.4 [multiset]
23.4.4.1 [multiset.cons]
23.4.4.2 [multiset.special]
23.5 [unord]
23.5.1 [unord.map]
23.5.1.1 [unord.map.cnstr]
23.5.1.2 [unord.map.elem]
23.5.1.3 [unord.map.swap]
23.5.2 [unord.multimap]
23.5.2.1 [unord.multimap.cnstr]
23.5.2.2 [unord.multimap.swap]
23.5.3 [unord.set]
23.5.3.1 [unord.set.cnstr]
23.5.3.2 [unord.set.swap]
23.5.4 [unord.multiset]
23.5.4.1 [unord.multiset.cnstr]
23.5.4.2 [unord.multiset.swap]
24 [iterators]
24.1 [iterators.general]
24.2 [iterator.concepts]
24.2.1 [iterator.iterators]
24.2.2 [input.iterators]
24.2.3 [output.iterators]
24.2.4 [forward.iterators]
24.2.5 [bidirectional.iterators]
24.2.6 [random.access.iterators]
24.2.7 [shuffle.iterators]
24.2.8 [iterator.concepts.range]
24.3 [iterator.syn]
24.4 [iterator.operations]
24.5 [predef.iterators]
24.5.1 [reverse.iterators]
24.5.1.1 [reverse.iterator]
24.5.1.2 [reverse.iter.ops]
24.5.1.2.1 [reverse.iter.cons]
24.5.1.2.2 [reverse.iter.op=]
24.5.1.2.3 [reverse.iter.conv]
24.5.1.2.4 [reverse.iter.op.star]
24.5.1.2.5 [reverse.iter.opref]
24.5.1.2.6 [reverse.iter.op++]
24.5.1.2.7 [reverse.iter.op--]
24.5.1.2.8 [reverse.iter.op+]
24.5.1.2.9 [reverse.iter.op+=]
24.5.1.2.10 [reverse.iter.op-]
24.5.1.2.11 [reverse.iter.op-=]
24.5.1.2.12 [reverse.iter.opindex]
24.5.1.2.13 [reverse.iter.op==]
24.5.1.2.14 [reverse.iter.op<]
24.5.1.2.15 [reverse.iter.op!=]
24.5.1.2.16 [reverse.iter.op>]
24.5.1.2.17 [reverse.iter.op>=]
24.5.1.2.18 [reverse.iter.op<=]
24.5.1.2.19 [reverse.iter.opdiff]
24.5.1.2.20 [reverse.iter.opsum]
24.5.1.3 [reverse.iter.maps]
24.5.2 [move.iterators]
24.5.2.1 [move.iterator]
24.5.2.2 [move.iter.ops]
24.5.2.2.1 [move.iter.op.const]
24.5.2.2.2 [move.iter.op=]
24.5.2.2.3 [move.iter.op.conv]
24.5.2.2.4 [move.iter.op.star]
24.5.2.2.5 [move.iter.op.ref]
24.5.2.2.6 [move.iter.op.incr]
24.5.2.2.7 [move.iter.op.decr]
24.5.2.2.8 [move.iter.op.+]
24.5.2.2.9 [move.iter.op.+=]
24.5.2.2.10 [move.iter.op.-]
24.5.2.2.11 [move.iter.op.-=]
24.5.2.2.12 [move.iter.op.index]
24.5.2.2.13 [move.iter.op.comp]
24.5.2.2.14 [move.iter.nonmember]
24.5.2.2.15 [move.iter.maps]
24.6 [stream.iterators]
24.6.1 [istream.iterator]
24.6.1.1 [istream.iterator.cons]
24.6.1.2 [istream.iterator.ops]
24.6.2 [ostream.iterator]
24.6.2.1 [ostream.iterator.cons.des]
24.6.2.2 [ostream.iterator.ops]
24.6.3 [istreambuf.iterator]
24.6.3.1 [istreambuf.iterator::proxy]
24.6.3.2 [istreambuf.iterator.cons]
24.6.3.3 [istreambuf.iterator::op*]
24.6.3.4 [istreambuf.iterator::op++]
24.6.3.5 [istreambuf.iterator::equal]
24.6.3.6 [istreambuf.iterator::op==]
24.6.3.7 [istreambuf.iterator::op!=]
24.6.4 [ostreambuf.iterator]
24.6.4.1 [ostreambuf.iter.cons]
24.6.4.2 [ostreambuf.iter.ops]
24.7 [insert.iterators]
24.7.1 [back.insert.iterator]
24.7.2 [back.insert.iter.ops]
24.7.2.1 [back.insert.iter.cons]
24.7.2.2 [back.insert.iter.op=]
24.7.2.3 [back.insert.iter.op*]
24.7.2.4 [back.insert.iter.op++]
24.7.2.5 [back.inserter]
24.7.2.6 [back.insert.iter.maps]
24.7.3 [front.insert.iterator]
24.7.4 [front.insert.iter.ops]
24.7.4.1 [front.insert.iter.cons]
24.7.4.2 [front.insert.iter.op=]
24.7.4.3 [front.insert.iter.op*]
24.7.4.4 [front.insert.iter.op++]
24.7.4.5 [front.inserter]
24.7.4.6 [front.insert.iter.maps]
24.7.5 [insert.iterator]
24.7.6 [insert.iter.ops]
24.7.6.1 [insert.iter.cons]
24.7.6.2 [insert.iter.op=]
24.7.6.3 [insert.iter.op*]
24.7.6.4 [insert.iter.op++]
24.7.6.5 [inserter]
24.7.6.6 [insert.iter.maps]
25 [algorithms]
25.1 [algorithms.general]
25.2 [algorithms.syn]
25.3 [alg.nonmodifying]
25.3.1 [alg.all_of]
25.3.2 [alg.any_of]
25.3.3 [alg.none_of]
25.3.4 [alg.foreach]
25.3.5 [alg.find]
25.3.6 [alg.find.end]
25.3.7 [alg.find.first.of]
25.3.8 [alg.adjacent.find]
25.3.9 [alg.count]
25.3.10 [mismatch]
25.3.11 [alg.equal]
25.3.12 [alg.search]
25.4 [alg.modifying.operations]
25.4.1 [alg.copy]
25.4.2 [alg.move]
25.4.3 [alg.swap]
25.4.4 [alg.transform]
25.4.5 [alg.replace]
25.4.6 [alg.fill]
25.4.7 [alg.generate]
25.4.8 [alg.remove]
25.4.9 [alg.unique]
25.4.10 [alg.reverse]
25.4.11 [alg.rotate]
25.4.12 [alg.random.shuffle]
25.4.13 [alg.partitions]
25.5 [alg.sorting]
25.5.1 [alg.sort]
25.5.1.1 [sort]
25.5.1.2 [stable.sort]
25.5.1.3 [partial.sort]
25.5.1.4 [partial.sort.copy]
25.5.1.5 [is.sorted]
25.5.2 [alg.nth.element]
25.5.3 [alg.binary.search]
25.5.3.1 [lower.bound]
25.5.3.2 [upper.bound]
25.5.3.3 [equal.range]
25.5.3.4 [binary.search]
25.5.4 [alg.merge]
25.5.5 [alg.set.operations]
25.5.5.1 [includes]
25.5.5.2 [set.union]
25.5.5.3 [set.intersection]
25.5.5.4 [set.difference]
25.5.5.5 [set.symmetric.difference]
25.5.6 [alg.heap.operations]
25.5.6.1 [push.heap]
25.5.6.2 [pop.heap]
25.5.6.3 [make.heap]
25.5.6.4 [sort.heap]
25.5.6.5 [is.heap]
25.5.7 [alg.min.max]
25.5.8 [alg.lex.comparison]
25.5.9 [alg.permutation.generators]
25.6 [alg.c.library]
26 [numerics]
26.1 [numerics.general]
26.2 [numeric.requirements]
26.3 [cfenv]
26.3.1 [cfenv.syn]
26.3.2 [fenv]
26.4 [complex.numbers]
26.4.1 [complex.syn]
26.4.2 [complex]
26.4.3 [complex.special]
26.4.4 [complex.members]
26.4.5 [complex.member.ops]
26.4.6 [complex.ops]
26.4.7 [complex.value.ops]
26.4.8 [complex.transcendentals]
26.4.9 [cmplx.over]
26.4.10 [ccmplx]
26.4.11 [cmplxh]
26.5 [rand]
26.5.1 [rand.synopsis]
26.5.2 [rand.concept]
26.5.2.1 [rand.concept.seedseq]
26.5.2.2 [rand.concept.urng]
26.5.2.3 [rand.concept.eng]
26.5.2.4 [rand.concept.adapt]
26.5.2.5 [rand.concept.dist]
26.5.3 [rand.eng]
26.5.3.1 [rand.eng.lcong]
26.5.3.2 [rand.eng.mers]
26.5.3.3 [rand.eng.sub]
26.5.4 [rand.adapt]
26.5.4.1 [rand.adapt.disc]
26.5.4.2 [rand.adapt.ibits]
26.5.4.3 [rand.adapt.shuf]
26.5.5 [rand.predef]
26.5.6 [rand.device]
26.5.7 [rand.util]
26.5.7.1 [rand.util.seedseq]
26.5.7.2 [rand.util.canonical]
26.5.8 [rand.dist]
26.5.8.1 [rand.dist.uni]
26.5.8.1.1 [rand.dist.uni.int]
26.5.8.1.2 [rand.dist.uni.real]
26.5.8.2 [rand.dist.bern]
26.5.8.2.1 [rand.dist.bern.bernoulli]
26.5.8.2.2 [rand.dist.bern.bin]
26.5.8.2.3 [rand.dist.bern.geo]
26.5.8.2.4 [rand.dist.bern.negbin]
26.5.8.3 [rand.dist.pois]
26.5.8.3.1 [rand.dist.pois.poisson]
26.5.8.3.2 [rand.dist.pois.exp]
26.5.8.3.3 [rand.dist.pois.gamma]
26.5.8.3.4 [rand.dist.pois.weibull]
26.5.8.3.5 [rand.dist.pois.extreme]
26.5.8.4 [rand.dist.norm]
26.5.8.4.1 [rand.dist.norm.normal]
26.5.8.4.2 [rand.dist.norm.lognormal]
26.5.8.4.3 [rand.dist.norm.chisq]
26.5.8.4.4 [rand.dist.norm.cauchy]
26.5.8.4.5 [rand.dist.norm.f]
26.5.8.4.6 [rand.dist.norm.t]
26.5.8.5 [rand.dist.samp]
26.5.8.5.1 [rand.dist.samp.discrete]
26.5.8.5.2 [rand.dist.samp.pconst]
26.5.8.5.3 [rand.dist.samp.plinear]
26.6 [numarray]
26.6.1 [valarray.syn]
26.6.2 [template.valarray]
26.6.2.1 [valarray.cons]
26.6.2.2 [valarray.assign]
26.6.2.3 [valarray.access]
26.6.2.4 [valarray.sub]
26.6.2.5 [valarray.unary]
26.6.2.6 [valarray.cassign]
26.6.2.7 [valarray.members]
26.6.3 [valarray.nonmembers]
26.6.3.1 [valarray.binary]
26.6.3.2 [valarray.comparison]
26.6.3.3 [valarray.transcend]
26.6.3.4 [valarray.special]
26.6.4 [class.slice]
26.6.4.1 [cons.slice]
26.6.4.2 [slice.access]
26.6.5 [template.slice.array]
26.6.5.1 [slice.arr.assign]
26.6.5.2 [slice.arr.comp.assign]
26.6.5.3 [slice.arr.fill]
26.6.6 [class.gslice]
26.6.6.1 [gslice.cons]
26.6.6.2 [gslice.access]
26.6.7 [template.gslice.array]
26.6.7.1 [gslice.array.assign]
26.6.7.2 [gslice.array.comp.assign]
26.6.7.3 [gslice.array.fill]
26.6.8 [template.mask.array]
26.6.8.1 [mask.array.assign]
26.6.8.2 [mask.array.comp.assign]
26.6.8.3 [mask.array.fill]
26.6.9 [template.indirect.array]
26.6.9.1 [indirect.array.assign]
26.6.9.2 [indirect.array.comp.assign]
26.6.9.3 [indirect.array.fill]
26.6.9.4 [valarray.concepts]
26.7 [numeric.ops]
26.7.1 [accumulate]
26.7.2 [inner.product]
26.7.3 [partial.sum]
26.7.4 [adjacent.difference]
26.7.5 [numeric.iota]
26.8 [c.math]
27 [input.output]
27.1 [input.output.general]
27.2 [iostreams.requirements]
27.2.1 [iostream.limits.imbue]
27.2.2 [iostreams.limits.pos]
27.2.3 [iostreams.threadsafety]
27.3 [iostream.forward]
27.4 [iostream.objects]
27.4.1 [narrow.stream.objects]
27.4.2 [wide.stream.objects]
27.5 [iostreams.base]
27.5.1 [stream.types]
27.5.2 [ios.base]
27.5.2.1 [ios.types]
27.5.2.1.1 [ios::failure]
27.5.2.1.2 [ios::fmtflags]
27.5.2.1.3 [ios::iostate]
27.5.2.1.4 [ios::openmode]
27.5.2.1.5 [ios::seekdir]
27.5.2.1.6 [ios::Init]
27.5.2.2 [fmtflags.state]
27.5.2.3 [ios.base.locales]
27.5.2.4 [ios.members.static]
27.5.2.5 [ios.base.storage]
27.5.2.6 [ios.base.callback]
27.5.2.7 [ios.base.cons]
27.5.3 [fpos]
27.5.3.1 [fpos.members]
27.5.3.2 [fpos.operations]
27.5.4 [ios]
27.5.4.1 [basic.ios.cons]
27.5.4.2 [basic.ios.members]
27.5.4.3 [iostate.flags]
27.5.5 [std.ios.manip]
27.5.5.1 [fmtflags.manip]
27.5.5.2 [adjustfield.manip]
27.5.5.3 [basefield.manip]
27.5.5.4 [floatfield.manip]
27.5.5.5 [error.reporting]
27.6 [stream.buffers]
27.6.1 [streambuf.reqts]
27.6.2 [streambuf]
27.6.2.1 [streambuf.cons]
27.6.2.2 [streambuf.members]
27.6.2.2.1 [streambuf.locales]
27.6.2.2.2 [streambuf.buffer]
27.6.2.2.3 [streambuf.pub.get]
27.6.2.2.4 [streambuf.pub.pback]
27.6.2.2.5 [streambuf.pub.put]
27.6.2.3 [streambuf.protected]
27.6.2.3.1 [streambuf.assign]
27.6.2.3.2 [streambuf.get.area]
27.6.2.3.3 [streambuf.put.area]
27.6.2.4 [streambuf.virtuals]
27.6.2.4.1 [streambuf.virt.locales]
27.6.2.4.2 [streambuf.virt.buffer]
27.6.2.4.3 [streambuf.virt.get]
27.6.2.4.4 [streambuf.virt.pback]
27.6.2.4.5 [streambuf.virt.put]
27.7 [iostream.format]
27.7.1 [input.streams]
27.7.1.1 [istream]
27.7.1.1.1 [istream.cons]
27.7.1.1.2 [istream.assign]
27.7.1.1.3 [istream::sentry]
27.7.1.2 [istream.formatted]
27.7.1.2.1 [istream.formatted.reqmts]
27.7.1.2.2 [istream.formatted.arithmetic]
27.7.1.2.3 [istream::extractors]
27.7.1.3 [istream.unformatted]
27.7.1.4 [istream.manip]
27.7.1.5 [iostreamclass]
27.7.1.5.1 [iostream.cons]
27.7.1.5.2 [iostream.dest]
27.7.1.5.3 [iostream.assign]
27.7.1.6 [istream.rvalue]
27.7.2 [output.streams]
27.7.2.1 [ostream]
27.7.2.2 [ostream.cons]
27.7.2.3 [ostream.assign]
27.7.2.4 [ostream::sentry]
27.7.2.5 [ostream.seeks]
27.7.2.6 [ostream.formatted]
27.7.2.6.1 [ostream.formatted.reqmts]
27.7.2.6.2 [ostream.inserters.arithmetic]
27.7.2.6.3 [ostream.inserters]
27.7.2.6.4 [ostream.inserters.character]
27.7.2.7 [ostream.unformatted]
27.7.2.8 [ostream.manip]
27.7.2.9 [ostream.rvalue]
27.7.3 [std.manip]
27.7.4 [ext.manip]
27.8 [string.streams]
27.8.1 [stringbuf]
27.8.1.1 [stringbuf.cons]
27.8.1.2 [stringbuf.assign]
27.8.1.3 [stringbuf.members]
27.8.1.4 [stringbuf.virtuals]
27.8.2 [istringstream]
27.8.2.1 [istringstream.cons]
27.8.2.2 [istringstream.assign]
27.8.2.3 [istringstream.members]
27.8.3 [ostringstream]
27.8.3.1 [ostringstream.cons]
27.8.3.2 [ostringstream.assign]
27.8.3.3 [ostringstream.members]
27.8.4 [stringstream]
27.8.5 [stringstream.cons]
27.8.5.1 [stringstream.assign]
27.8.6 [stringstream.members]
27.9 [file.streams]
27.9.1 [fstreams]
27.9.1.1 [filebuf]
27.9.1.2 [filebuf.cons]
27.9.1.3 [filebuf.assign]
27.9.1.4 [filebuf.members]
27.9.1.5 [filebuf.virtuals]
27.9.1.6 [ifstream]
27.9.1.7 [ifstream.cons]
27.9.1.8 [ifstream.assign]
27.9.1.9 [ifstream.members]
27.9.1.10 [ofstream]
27.9.1.11 [ofstream.cons]
27.9.1.12 [ofstream.assign]
27.9.1.13 [ofstream.members]
27.9.1.14 [fstream]
27.9.1.15 [fstream.cons]
27.9.1.16 [fstream.assign]
27.9.1.17 [fstream.members]
27.9.2 [c.files]
28 [re]
28.1 [re.general]
28.2 [re.def]
28.2.1 [defns.regex.collating.element]
28.2.2 [defns.regex.finite.state.machine]
28.2.3 [defns.regex.format.specifier]
28.2.4 [defns.regex.matched]
28.2.5 [defns.regex.primary.equivalence.class]
28.2.6 [defns.regex.regular.expression]
28.2.7 [defns.regex.subexpression]
28.3 [re.req]
28.4 [re.syn]
28.5 [re.const]
28.5.1 [re.synopt]
28.5.2 [re.matchflag]
28.5.3 [re.err]
28.6 [re.badexp]
28.7 [re.traits]
28.8 [re.regex]
28.8.1 [re.regex.const]
28.8.2 [re.regex.construct]
28.8.3 [re.regex.assign]
28.8.4 [re.regex.operations]
28.8.5 [re.regex.locale]
28.8.6 [re.regex.swap]
28.8.7 [re.regex.nonmemb]
28.8.7.1 [re.regex.nmswap]
28.9 [re.submatch]
28.9.1 [re.submatch.members]
28.9.2 [re.submatch.op]
28.9.3 [re.submatch.concepts]
28.10 [re.results]
28.10.1 [re.results.const]
28.10.2 [re.results.size]
28.10.3 [re.results.acc]
28.10.4 [re.results.form]
28.10.5 [re.results.all]
28.10.6 [re.results.swap]
28.10.7 [re.results.nonmember]
28.11 [re.alg]
28.11.1 [re.except]
28.11.2 [re.alg.match]
28.11.3 [re.alg.search]
28.11.4 [re.alg.replace]
28.12 [re.iter]
28.12.1 [re.regiter]
28.12.1.1 [re.regiter.cnstr]
28.12.1.2 [re.regiter.comp]
28.12.1.3 [re.regiter.deref]
28.12.1.4 [re.regiter.incr]
28.12.2 [re.tokiter]
28.12.2.1 [re.tokiter.cnstr]
28.12.2.2 [re.tokiter.comp]
28.12.2.3 [re.tokiter.deref]
28.12.2.4 [re.tokiter.incr]
28.13 [re.grammar]
29 [atomics]
29.1 [atomics.general]
29.2 [atomics.syn]
29.3 [atomics.order]
29.4 [atomics.lockfree]
29.5 [atomics.types]
29.5.1 [atomics.types.integral]
29.5.2 [atomics.types.address]
29.5.3 [atomics.types.generic]
29.6 [atomics.types.operations]
29.7 [atomics.flag]
29.8 [atomics.fences]
30 [thread]
30.1 [thread.general]
30.2 [thread.req]
30.2.1 [thread.req.paramname]
30.2.2 [thread.req.exception]
30.2.3 [thread.req.native]
30.2.4 [thread.req.timing]
30.3 [thread.threads]
30.3.1 [thread.thread.class]
30.3.1.1 [thread.thread.id]
30.3.1.2 [thread.thread.constr]
30.3.1.3 [thread.thread.destr]
30.3.1.4 [thread.thread.assign]
30.3.1.5 [thread.thread.member]
30.3.1.6 [thread.thread.static]
30.3.1.7 [thread.thread.algorithm]
30.3.2 [thread.thread.this]
30.4 [thread.mutex]
30.4.1 [thread.mutex.requirements]
30.4.1.1 [thread.mutex.class]
30.4.1.2 [thread.mutex.recursive]
30.4.2 [thread.timedmutex.requirements]
30.4.2.1 [thread.timedmutex.class]
30.4.2.2 [thread.timedmutex.recursive]
30.4.3 [thread.lock]
30.4.3.1 [thread.lock.guard]
30.4.3.2 [thread.lock.unique]
30.4.3.2.1 [thread.lock.unique.cons]
30.4.3.2.2 [thread.lock.unique.locking]
30.4.3.2.3 [thread.lock.unique.mod]
30.4.3.2.4 [thread.lock.unique.obs]
30.4.4 [thread.lock.algorithm]
30.4.5 [thread.once]
30.4.5.1 [thread.once.onceflag]
30.4.5.2 [thread.once.callonce]
30.5 [thread.condition]
30.5.1 [thread.condition.condvar]
30.5.2 [thread.condition.condvarany]
30.6 [futures]
30.6.1 [futures.overview]
30.6.2 [futures.errors]
30.6.3 [futures.future_error]
30.6.4 [futures.promise]
30.6.5 [futures.unique_future]
30.6.6 [future.shared_future]
30.6.7 [futures.task]
A [gram]
A.1 [gram.key]
A.2 [gram.lex]
A.3 [gram.basic]
A.4 [gram.expr]
A.5 [gram.stmt]
A.6 [gram.dcl]
A.7 [gram.decl]
A.8 [gram.class]
A.9 [gram.derived]
A.10 [gram.special]
A.11 [gram.over]
A.12 [gram.temp]
A.13 [gram.except]
A.14 [gram.cpp]
B [implimits]
C [diff]
C.1 [diff.iso]
C.1.1 [diff.lex]
C.1.2 [diff.basic]
C.1.3 [diff.expr]
C.1.4 [diff.stat]
C.1.5 [diff.dcl]
C.1.6 [diff.decl]
C.1.7 [diff.class]
C.1.8 [diff.special]
C.1.9 [diff.cpp]
C.2 [diff.library]
C.2.1 [diff.mods.to.headers]
C.2.2 [diff.mods.to.definitions]
C.2.2.1 [diff.char16]
C.2.2.2 [diff.wchar.t]
C.2.2.3 [diff.header.iso646.h]
C.2.2.4 [diff.null]
C.2.3 [diff.mods.to.declarations]
C.2.4 [diff.mods.to.behavior]
C.2.4.1 [diff.offsetof]
C.2.4.2 [diff.malloc]
D [depr]
D.1 [depr.incr.bool]
D.2 [depr.static]
D.3 [depr.access.dcl]
D.4 [depr.string]
D.5 [depr.c.headers]
D.6 [depr.ios.members]
D.7 [depr.str.strstreams]
D.7.1 [depr.strstreambuf]
D.7.1.1 [depr.strstreambuf.cons]
D.7.1.2 [depr.strstreambuf.members]
D.7.1.3 [depr.strstreambuf.virtuals]
D.7.2 [depr.istrstream]
D.7.2.1 [depr.istrstream.cons]
D.7.2.2 [depr.istrstream.members]
D.7.3 [depr.ostrstream]
D.7.3.1 [depr.ostrstream.cons]
D.7.3.2 [depr.ostrstream.members]
D.7.4 [depr.strstream]
D.7.4.1 [depr.strstream.cons]
D.7.4.2 [depr.strstream.dest]
D.7.4.3 [depr.strstream.oper]
D.8 [depr.lib.binders]
D.8.1 [depr.lib.binder.1st]
D.8.2 [depr.lib.bind.1st]
D.8.3 [depr.lib.binder.2nd]
D.8.4 [depr.lib.bind.2nd]
D.9 [depr.auto.ptr]
D.9.1 [auto.ptr]
D.9.1.1 [auto.ptr.cons]
D.9.1.2 [auto.ptr.members]
D.9.1.3 [auto.ptr.conv]
D.10 [depr.lib.iterator.primitives]
D.10.1 [iterator.traits]
D.10.2 [iterator.basic]
D.10.3 [std.iterator.tags]
D.10.4 [iterator.backward]
E [xref]
TR1 1 [tr.intro]
TR1 1.1 [tr.description]
TR1 1.2 [tr.intro.ext]
TR1 1.3 [tr.intro.namespaces]
TR1 2 [tr.util]
TR1 2.1 [tr.util.refwrap]
TR1 2.1.1 [tr.util.refwrp.synopsis]
TR1 2.1.2 [tr.util.refwrp.refwrp]
TR1 2.1.2.1 [tr.util.refwrp.const]
TR1 2.1.2.2 [tr.util.refwrp.assign]
TR1 2.1.2.3 [tr.util.refwrp.access]
TR1 2.1.2.4 [tr.util.refwrp.invoke]
TR1 2.1.2.5 [tr.util.refwrp.helpers]
TR1 2.2 [tr.util.smartptr]
TR1 2.2.1 [tr.util.smartptr.synopsis]
TR1 2.2.2 [tr.util.smartptr.weakptr]
TR1 2.2.3 [tr.util.smartptr.shared]
TR1 2.2.3.1 [tr.util.smartptr.shared.const]
TR1 2.2.3.2 [tr.util.smartptr.shared.dest]
TR1 2.2.3.3 [tr.util.smartptr.shared.assign]
TR1 2.2.3.4 [tr.util.smartptr.shared.mod]
TR1 2.2.3.5 [tr.util.smartptr.shared.obs]
TR1 2.2.3.6 [tr.util.smartptr.shared.cmp]
TR1 2.2.3.7 [tr.util.smartptr.shared.io]
TR1 2.2.3.8 [tr.util.smartptr.shared.spec]
TR1 2.2.3.9 [tr.util.smartptr.shared.cast]
TR1 2.2.3.10 [tr.util.smartptr.getdeleter]
TR1 2.2.4 [tr.util.smartptr.weak]
TR1 2.2.4.1 [tr.util.smartptr.weak.const]
TR1 2.2.4.2 [tr.util.smartptr.weak.dest]
TR1 2.2.4.3 [tr.util.smartptr.weak.assign]
TR1 2.2.4.4 [tr.util.smartptr.weak.mod]
TR1 2.2.4.5 [tr.util.smartptr.weak.obs]
TR1 2.2.4.6 [tr.util.smartptr.weak.cmp]
TR1 2.2.4.7 [tr.util.smartptr.weak.spec]
TR1 2.2.5 [tr.util.smartptr.enab]
TR1 3 [tr.func]
TR1 3.1 [tr.func.def]
TR1 3.2 [tr.func.syn]
TR1 3.3 [tr.func.require]
TR1 3.4 [tr.func.ret]
TR1 3.5 [tr.func.memfn]
TR1 3.6 [tr.func.bind]
TR1 3.6.1 [tr.func.bind.isbind]
TR1 3.6.2 [tr.func.bind.isplace]
TR1 3.6.3 [tr.func.bind.bind]
TR1 3.6.4 [tr.func.bind.place]
TR1 3.7 [tr.func.wrap]
TR1 3.7.1 [tr.func.wrap.badcall]
TR1 3.7.1.1 [tr.func.wrap.badcall.const]
TR1 3.7.2 [tr.func.wrap.func]
TR1 3.7.2.1 [tr.func.wrap.func.con]
TR1 3.7.2.2 [tr.func.wrap.func.mod]
TR1 3.7.2.3 [tr.func.wrap.func.cap]
TR1 3.7.2.4 [tr.func.wrap.func.inv]
TR1 3.7.2.5 [tr.func.wrap.func.targ]
TR1 3.7.2.6 [tr.func.wrap.func.undef]
TR1 3.7.2.7 [tr.func.wrap.func.nullptr]
TR1 3.7.2.8 [tr.func.wrap.func.alg]
TR1 4 [tr.meta]
TR1 4.1 [tr.meta.rqmts]
TR1 4.2 [tr.meta.type.synop]
TR1 4.3 [tr.meta.help]
TR1 4.4 [tr.meta.requirements]
TR1 4.5 [tr.meta.unary]
TR1 4.5.1 [tr.meta.unary.cat]
TR1 4.5.2 [tr.meta.unary.comp]
TR1 4.5.3 [tr.meta.unary.prop]
TR1 4.6 [tr.meta.rel]
TR1 4.7 [tr.meta.trans]
TR1 4.7.1 [tr.meta.trans.cv]
TR1 4.7.2 [tr.meta.trans.ref]
TR1 4.7.3 [tr.meta.trans.arr]
TR1 4.7.4 [tr.meta.trans.ptr]
TR1 4.8 [tr.meta.trans.other]
TR1 4.9 [tr.meta.req]
TR1 5 [tr.num]
TR1 5.1 [tr.rand]
TR1 5.1.1 [tr.rand.req]
TR1 5.1.2 [tr.rand.synopsis]
TR1 5.1.3 [tr.rand.var]
TR1 5.1.4 [tr.rand.eng]
TR1 5.1.4.1 [tr.rand.eng.lcong]
TR1 5.1.4.2 [tr.rand.eng.mers]
TR1 5.1.4.3 [tr.rand.eng.sub]
TR1 5.1.4.4 [tr.rand.eng.sub1]
TR1 5.1.4.5 [tr.rand.eng.disc]
TR1 5.1.4.6 [tr.rand.eng.xor]
TR1 5.1.5 [tr.rand.predef]
TR1 5.1.6 [tr.rand.device]
TR1 5.1.7 [tr.rand.dist]
TR1 5.1.7.1 [tr.rand.dist.iunif]
TR1 5.1.7.2 [tr.rand.dist.bern]
TR1 5.1.7.3 [tr.rand.dist.geom]
TR1 5.1.7.4 [tr.rand.dist.pois]
TR1 5.1.7.5 [tr.rand.dist.bin]
TR1 5.1.7.6 [tr.rand.dist.runif]
TR1 5.1.7.7 [tr.rand.dist.exp]
TR1 5.1.7.8 [tr.rand.dist.norm]
TR1 5.1.7.9 [tr.rand.dist.gamma]
TR1 5.2 [tr.num.sf]
TR1 5.2.1 [tr.num.sf.cmath]
TR1 5.2.1.1 [tr.num.sf.Lnm]
TR1 5.2.1.2 [tr.num.sf.Plm]
TR1 5.2.1.3 [tr.num.sf.beta]
TR1 5.2.1.4 [tr.num.sf.ellK]
TR1 5.2.1.5 [tr.num.sf.ellEx]
TR1 5.2.1.6 [tr.num.sf.ellPx]
TR1 5.2.1.7 [tr.num.sf.conhyp]
TR1 5.2.1.8 [tr.num.sf.I]
TR1 5.2.1.9 [tr.num.sf.J]
TR1 5.2.1.10 [tr.num.sf.K]
TR1 5.2.1.11 [tr.num.sf.N]
TR1 5.2.1.12 [tr.num.sf.ellF]
TR1 5.2.1.13 [tr.num.sf.ellE]
TR1 5.2.1.14 [tr.num.sf.ellP]
TR1 5.2.1.15 [tr.num.sf.ei]
TR1 5.2.1.16 [tr.num.sf.Hn]
TR1 5.2.1.17 [tr.num.sf.hyper]
TR1 5.2.1.18 [tr.num.sf.Ln]
TR1 5.2.1.19 [tr.num.sf.Pl]
TR1 5.2.1.20 [tr.num.sf.riemannzeta]
TR1 5.2.1.21 [tr.num.sf.j]
TR1 5.2.1.22 [tr.num.sf.Ylm]
TR1 5.2.1.23 [tr.num.sf.n]
TR1 5.2.2 [tr.num.sf.mathh]
TR1 6 [tr.cont]
TR1 6.1 [tr.tuple]
TR1 6.1.1 [tr.tuple.synopsis]
TR1 6.1.2 [tr.tuple.synopsis.pair]
TR1 6.1.3 [tr.tuple.tuple]
TR1 6.1.3.1 [tr.tuple.cnstr]
TR1 6.1.3.2 [tr.tuple.creation]
TR1 6.1.3.3 [tr.tuple.helper]
TR1 6.1.3.4 [tr.tuple.elem]
TR1 6.1.3.5 [tr.tuple.rel]
TR1 6.1.4 [tr.tuple.pairs]
TR1 6.2 [tr.array]
TR1 6.2.1 [tr.array.syn]
TR1 6.2.2 [tr.array.array]
TR1 6.2.2.1 [tr.array.cons]
TR1 6.2.2.2 [tr.array.special]
TR1 6.2.2.3 [tr.array.size]
TR1 6.2.2.4 [tr.array.zero]
TR1 6.2.2.5 [tr.array.tuple]
TR1 6.3 [tr.hash]
TR1 6.3.1 [tr.unord.req]
TR1 6.3.1.1 [tr.unord.req.except]
TR1 6.3.2 [tr.unord.fun.syn]
TR1 6.3.3 [tr.unord.hash]
TR1 6.3.4 [tr.unord.unord]
TR1 6.3.4.1 [tr.unord.syn.set]
TR1 6.3.4.2 [tr.unord.syn.map]
TR1 6.3.4.3 [tr.unord.set]
TR1 6.3.4.3.1 [tr.unord.set.cnstr]
TR1 6.3.4.3.2 [tr.unord.set.swap]
TR1 6.3.4.4 [tr.unord.map]
TR1 6.3.4.4.1 [tr.unord.map.cnstr]
TR1 6.3.4.4.2 [tr.unord.map.elem]
TR1 6.3.4.4.3 [tr.unord.map.swap]
TR1 6.3.4.5 [tr.unord.multiset]
TR1 6.3.4.5.1 [tr.unord.multiset.cnstr]
TR1 6.3.4.5.2 [tr.unord.multiset.swap]
TR1 6.3.4.6 [tr.unord.multimap]
TR1 6.3.4.6.1 [tr.unord.multimap.cnstr]
TR1 6.3.4.6.2 [tr.unord.multimap.swap]
TR1 7 [tr.re]
TR1 7.1 [tr.re.def]
TR1 7.2 [tr.re.req]
TR1 7.3 [tr.re.sum]
TR1 7.4 [tr.re.syn]
TR1 7.5 [tr.re.const]
TR1 7.5.1 [tr.re.synopt]
TR1 7.5.2 [tr.re.matchflag]
TR1 7.5.3 [tr.re.err]
TR1 7.6 [tr.re.badexp]
TR1 7.7 [tr.re.traits]
TR1 7.8 [tr.re.regex]
TR1 7.8.1 [tr.re.regex.const]
TR1 7.8.2 [tr.re.regex.construct]
TR1 7.8.3 [tr.re.regex.assign]
TR1 7.8.4 [tr.re.regex.operations]
TR1 7.8.5 [tr.re.regex.locale]
TR1 7.8.6 [tr.re.regex.swap]
TR1 7.8.7 [tr.re.regex.nonmemb]
TR1 7.8.7.1 [tr.re.regex.nmswap.]
TR1 7.9 [tr.re.submatch]
TR1 7.9.1 [tr.re.submatch.members]
TR1 7.9.2 [tr.re.submatch.op]
TR1 7.10 [tr.re.results]
TR1 7.10.1 [tr.re.results.const]
TR1 7.10.2 [tr.re.results.size]
TR1 7.10.3 [tr.re.results.acc]
TR1 7.10.4 [tr.re.results.form]
TR1 7.10.5 [tr.re.results.all]
TR1 7.10.1 [tr.re.results.swap]
TR1 7.11 [tr.re.alg]
TR1 7.11.1 [tr.re.except]
TR1 7.11.2 [tr.re.alg.match]
TR1 7.11.3 [tr.re.alg.search]
TR1 7.11.4 [tr.re.alg.replace]
TR1 7.12 [tr.re.iter]
TR1 7.12.1 [tr.re.regiter]
TR1 7.12.1.1 [tr.re.regiter.cnstr]
TR1 7.12.1.2 [tr.re.regiter.comp]
TR1 7.12.1.3 [tr.re.regiter.deref]
TR1 7.12.1.4 [tr.re.regiter.incr]
TR1 7.12.2 [tr.re.tokiter]
TR1 7.12.2.1 [tr.re.tokiter.cnstr]
TR1 7.12.2.2 [tr.re.tokiter.comp]
TR1 7.12.2.3 [tr.re.tokiter.deref]
TR1 7.12.2.4 [tr.re.tokiter.incr]
TR1 7.13 [tr.re.grammar]
TR1 8 [tr.c99]
TR1 8.1 [tr.c99.cmplx]
TR1 8.1.1 [tr.c99.cmplx.syn]
TR1 8.1.2 [tr.c99.cmplx.acos]
TR1 8.1.3 [tr.c99.cmplx.asin]
TR1 8.1.4 [tr.c99.cmplx.atan]
TR1 8.1.5 [tr.c99.cmplx.acosh]
TR1 8.1.6 [tr.c99.cmplx.asinh]
TR1 8.1.7 [tr.c99.cmplx.atanh]
TR1 8.1.8 [tr.c99.cmplx.fabs]
TR1 8.1.9 [tr.c99.cmplx.over]
TR1 8.2 [tr.c99.ccmplx]
TR1 8.3 [tr.c99.cmplxh]
TR1 8.4 [tr.c99.cctype]
TR1 8.4.1 [tr.c99.cctype.syn]
TR1 8.4.2 [tr.c99.cctype.blank]
TR1 8.5 [tr.c99.ctypeh]
TR1 8.6 [tr.c99.cfenv]
TR1 8.6.1 [tr.c99.cfenv.syn]
TR1 8.6.2 [tr.c99.cfenv.def]
TR1 8.7 [tr.c99.fenv]
TR1 8.8 [tr.c99.cfloat]
TR1 8.9 [tr.c99.floath]
TR1 8.10 [tr.c99.ios]
TR1 8.10.1 [tr.c99.ios.syn]
TR1 8.10.2 [tr.c99.ios.hex]
TR1 8.11 [tr.c99.cinttypes]
TR1 8.11.1 [tr.c99.cinttypes.syn]
TR1 8.11.2 [tr.c99.cinttypes.def]
TR1 8.12 [tr.c99.inttypesh]
TR1 8.13 [tr.c99.climits]
TR1 8.14 [tr.c99.limitsh]
TR1 8.15 [tr.c99.locale]
TR1 8.16 [tr.c99.cmath]
TR1 8.16.1 [tr.c99.cmath.syn]
TR1 8.16.2 [tr.c99.cmath.def]
TR1 8.16.3 [tr.c99.cmath.tmpl]
TR1 8.16.4 [tr.c99.cmath.over]
TR1 8.17 [tr.c99.mathh]
TR1 8.18 [tr.c99.cstdarg]
TR1 8.19 [tr.c99.stdargh]
TR1 8.20 [tr.c99.cbool]
TR1 8.21 [tr.c99.boolh]
TR1 8.22 [tr.c99.cstdint]
TR1 8.22.1 [tr.c99.cstdint.syn]
TR1 8.22.2 [tr.c99.cstdint.def]
TR1 8.23 [tr.c99.stdinth]
TR1 8.24 [tr.c99.cstdio]
TR1 8.24.1 [tr.c99.cstdio.syn]
TR1 8.24.2 [tr.c99.cstdio.def]
TR1 8.24.3 [tr.c99.cstdio.spec]
TR1 8.24.4 [tr.c99.stdioh]
TR1 8.25 [tr.c99.cstdlib]
TR1 8.25.1 [tr.c99.cstdlib.syn]
TR1 8.25.2 [tr.c99.cstdlib.def]
TR1 8.25.3 [tr.c99.cstdlib.abs]
TR1 8.25.4 [tr.c99.cstdlib.div]
TR1 8.26 [tr.c99.stdlibh]
TR1 8.27 [tr.c99.ctgmath]
TR1 8.28 [tr.c99.tgmathh]
TR1 8.29 [tr.c99.ctime]
TR1 8.30 [tr.c99.cwchar]
TR1 8.30.1 [tr.c99.cwchar.syn]
TR1 8.30.2 [tr.c99.cwchar.def]
TR1 8.30.3 [tr.c99.cwchar.spec]
TR1 8.31 [tr.c99.wcharh]
TR1 8.32 [tr.c99.cwctype]
TR1 8.32.1 [tr.c99.cwctype.syn]
TR1 8.32.2 [tr.c99.cwctype.iswblank]
TR1 8.33 [tr.c99.wctypeh]
TR1 A [tr.limits]
TRDecimal 1 [trdec.intro]
TRDecimal 1.1 [trdec.model]
TRDecimal 1.2 [trdec.encodings]
TRDecimal 1.3 [trdec.refs]
TRDecimal 2 [trdec.conventions]
TRDecimal 2.1 [trdec.relation.intro]
TRDecimal 2.2 [trdec.relation.tr1]
TRDecimal 2.3 [trdec.categories]
TRDecimal 2.4 [trdec.namespace]
TRDecimal 3 [trdec.types]
TRDecimal 3.1 [trdec.types.encodings]
TRDecimal 3.2 [trdec.types.types]
TRDecimal 3.2.1 [trdec.types.types.synopsis]
TRDecimal 3.2.2 [trdec.types.types.decimal32]
TRDecimal 3.2.2.1 [trdec.types.types.decimal32.cons]
TRDecimal 3.2.2.2 [trdec.types.types.decimal32.conv.float]
TRDecimal 3.2.2.3 [trdec.types.types.decimal32.conv.from.int]
TRDecimal 3.2.2.4 [trdec.types.types.decimal32.conv.to.int]
TRDecimal 3.2.2.5 [trdec.types.types.decimal32.incr]
TRDecimal 3.2.2.6 [trdec.types.types.decimal32.comp.ass]
TRDecimal 3.2.3 [trdec.types.types.decimal64]
TRDecimal 3.2.3.1 [trdec.types.types.decimal64.cons]
TRDecimal 3.2.3.2 [trdec.types.types.decimal64.float]
TRDecimal 3.2.3.3 [trdec.types.types.decimal64.from.int]
TRDecimal 3.2.3.4 [trdec.types.types.decimal64.to.int]
TRDecimal 3.2.3.5 [trdec.types.types.decimal64.incr]
TRDecimal 3.2.3.6 [trdec.types.types.decimal64.comp.ass]
TRDecimal 3.2.4 [trdec.types.types.decimal128]
TRDecimal 3.2.4.1 [trdec.types.types.decimal128.cons]
TRDecimal 3.2.4.2 [trdec.types.types.decimal128.float]
TRDecimal 3.2.4.3 [trdec.types.types.decimal128.from.int]
TRDecimal 3.2.4.4 [trdec.types.types.decimal128.to.int]
TRDecimal 3.2.4.5 [trdec.types.types.decimal128.incr]
TRDecimal 3.2.4.6 [trdec.types.types.decimal128.comp.ass]
TRDecimal 3.2.5 [trdec.types.types.init]
TRDecimal 3.2.6 [trdec.types.types.conv.float]
TRDecimal 3.2.7 [trdec.types.types.unary]
TRDecimal 3.2.8 [trdec.types.types.binary]
TRDecimal 3.2.9 [trdec.types.types.comp]
TRDecimal 3.2.10 [trdec.types.types.input]
TRDecimal 3.2.11 [trdec.types.types.output]
TRDecimal 3.3 [trdec.types.limits]
TRDecimal 3.4 [trdec.types.cdecfloat]
TRDecimal 3.4.1 [trdec.types.cdecfloat.synopsis]
TRDecimal 3.4.2 [trdec.types.decfloat.h.synopsis]
TRDecimal 3.4.3 [trdec.types.cdecfloat.max.value]
TRDecimal 3.4.4 [trdec.types.cdecfloat.epsilon]
TRDecimal 3.4.5 [trdec.types.cdecfloat.min.normal.value]
TRDecimal 3.4.6 [trdec.types.cdecfloat.min.subnormal.value]
TRDecimal 3.4.7 [trdec.types.cdecfloat.eval.format]
TRDecimal 3.5 [trdec.types.cfenv]
TRDecimal 3.5.1 [trdec.types.cfenv.synopsis]
TRDecimal 3.5.2 [trdec.types.cfenv.round]
TRDecimal 3.5.3 [trdec.types.cfenv.fe_dec_getround]
TRDecimal 3.5.4 [trdec.types.cfenv.fe_dec_setround]
TRDecimal 3.5.5 [trdec.types.cfenv.fenv.h]
TRDecimal 3.6 [trdec.types.cmath]
TRDecimal 3.6.1 [trdec.types.cmath.synopsis]
TRDecimal 3.6.2 [trdec.types.cmath.macros]
TRDecimal 3.6.3 [trdec.types.cmath.eval.format]
TRDecimal 3.6.4 [trdec.types.cmath.samequantum]
TRDecimal 3.6.5 [trdec.types.cmath.quantize]
TRDecimal 3.6.6 [trdec.types.cmath.elementary]
TRDecimal 3.6.6.1 [trdec.types.cmath.elementary.abs]
TRDecimal 3.6.7 [trdec.types.cmath.math.h]
TRDecimal 3.6.7.1 [trdec.types.cmath.math.h.synopsis]
TRDecimal 3.7 [trdec.types.cstdio]
TRDecimal 3.8 [trdec.types.cstdlib]
TRDecimal 3.8.1 [trdec.types.cstdlib.synopsis]
TRDecimal 3.8.2 [trdec.types.cstdlib.strtod]
TRDecimal 3.8.3 [trdec.types.cstdlib.stdlib.h]
TRDecimal 3.9 [trdec.types.cwchar]
TRDecimal 3.9.1 [trdec.types.cwchar.synopsis]
TRDecimal 3.9.2 [trdec.types.cwchar.wcstod]
TRDecimal 3.9.3 [trdec.types.cwchar.wchar.h]
TRDecimal 3.10 [trdec.types.facets]
TRDecimal 3.10.1 [trdec.types.facets.locale]
TRDecimal 3.10.2 [trdec.types.facets.extended_num_get]
TRDecimal 3.10.2.1 [trdec.types.facets.extended_num_get.mem]
TRDecimal 3.10.2.2 [trdec.types.facets.extended_num_get.virt]
TRDecimal 3.10.3 [trdec.types.facets.extended_num_put]
TRDecimal 3.10.3.1 [trdec.types.facets.extended_num_put.mem]
TRDecimal 3.10.3.2 [trdec.types.facets.extended_num_put.virt]
TRDecimal 3.11 [trdec.types.traits]
TRDecimal 3.11.1 [trdec.types.traits.synopsis]
TRDecimal 3.11.2 [trdec.types.traits.is_decimal_floating_point]
TRDecimal 3.12 [trdec.types.hash]
TRDecimal 3.12.1 [trdec.types.hash.synopsis]
TRDecimal 3.12.2 [trdec.types.hash.spec]
TRDecimal 4 [trdec.compat]
TRDecimal 4.1 [trdec.compat.decfloat.h]
TRDecimal 4.2 [trdec.compat.literals]
TRDecimal 4.3 [trdec.compat.conv]
| {
"pile_set_name": "Github"
} |
{ config, lib, ... }:
with lib;
let
cfg = config.hardware.ksm;
in {
imports = [
(mkRenamedOptionModule [ "hardware" "enableKSM" ] [ "hardware" "ksm" "enable" ])
];
options.hardware.ksm = {
enable = mkEnableOption "Kernel Same-Page Merging";
sleep = mkOption {
type = types.nullOr types.int;
default = null;
description = ''
How many milliseconds ksmd should sleep between scans.
Setting it to <literal>null</literal> uses the kernel's default time.
'';
};
};
config = mkIf cfg.enable {
systemd.services.enable-ksm = {
description = "Enable Kernel Same-Page Merging";
wantedBy = [ "multi-user.target" ];
after = [ "systemd-udev-settle.service" ];
script = ''
if [ -e /sys/kernel/mm/ksm ]; then
echo 1 > /sys/kernel/mm/ksm/run
${optionalString (cfg.sleep != null) ''echo ${toString cfg.sleep} > /sys/kernel/mm/ksm/sleep_millisecs''}
fi
'';
};
};
}
| {
"pile_set_name": "Github"
} |
// CodeMirror, copyright (c) by Marijn Haverbeke and others
// Distributed under an MIT license: http://codemirror.net/LICENSE
(function(mod) {
if (typeof exports == "object" && typeof module == "object") // CommonJS
mod(require("../../lib/codemirror"), require("../python/python"), require("../stex/stex"), require("../../addon/mode/overlay"));
else if (typeof define == "function" && define.amd) // AMD
define(["../../lib/codemirror", "../python/python", "../stex/stex", "../../addon/mode/overlay"], mod);
else // Plain browser env
mod(CodeMirror);
})(function(CodeMirror) {
"use strict";
CodeMirror.defineMode('rst', function (config, options) {
var rx_strong = /^\*\*[^\*\s](?:[^\*]*[^\*\s])?\*\*/;
var rx_emphasis = /^\*[^\*\s](?:[^\*]*[^\*\s])?\*/;
var rx_literal = /^``[^`\s](?:[^`]*[^`\s])``/;
var rx_number = /^(?:[\d]+(?:[\.,]\d+)*)/;
var rx_positive = /^(?:\s\+[\d]+(?:[\.,]\d+)*)/;
var rx_negative = /^(?:\s\-[\d]+(?:[\.,]\d+)*)/;
var rx_uri_protocol = "[Hh][Tt][Tt][Pp][Ss]?://";
var rx_uri_domain = "(?:[\\d\\w.-]+)\\.(?:\\w{2,6})";
var rx_uri_path = "(?:/[\\d\\w\\#\\%\\&\\-\\.\\,\\/\\:\\=\\?\\~]+)*";
var rx_uri = new RegExp("^" + rx_uri_protocol + rx_uri_domain + rx_uri_path);
var overlay = {
token: function (stream) {
if (stream.match(rx_strong) && stream.match (/\W+|$/, false))
return 'strong';
if (stream.match(rx_emphasis) && stream.match (/\W+|$/, false))
return 'em';
if (stream.match(rx_literal) && stream.match (/\W+|$/, false))
return 'string-2';
if (stream.match(rx_number))
return 'number';
if (stream.match(rx_positive))
return 'positive';
if (stream.match(rx_negative))
return 'negative';
if (stream.match(rx_uri))
return 'link';
while (stream.next() != null) {
if (stream.match(rx_strong, false)) break;
if (stream.match(rx_emphasis, false)) break;
if (stream.match(rx_literal, false)) break;
if (stream.match(rx_number, false)) break;
if (stream.match(rx_positive, false)) break;
if (stream.match(rx_negative, false)) break;
if (stream.match(rx_uri, false)) break;
}
return null;
}
};
var mode = CodeMirror.getMode(
config, options.backdrop || 'rst-base'
);
return CodeMirror.overlayMode(mode, overlay, true); // combine
}, 'python', 'stex');
///////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////
CodeMirror.defineMode('rst-base', function (config) {
///////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////
function format(string) {
var args = Array.prototype.slice.call(arguments, 1);
return string.replace(/{(\d+)}/g, function (match, n) {
return typeof args[n] != 'undefined' ? args[n] : match;
});
}
///////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////
var mode_python = CodeMirror.getMode(config, 'python');
var mode_stex = CodeMirror.getMode(config, 'stex');
///////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////
var SEPA = "\\s+";
var TAIL = "(?:\\s*|\\W|$)",
rx_TAIL = new RegExp(format('^{0}', TAIL));
var NAME =
"(?:[^\\W\\d_](?:[\\w!\"#$%&'()\\*\\+,\\-\\.\/:;<=>\\?]*[^\\W_])?)",
rx_NAME = new RegExp(format('^{0}', NAME));
var NAME_WWS =
"(?:[^\\W\\d_](?:[\\w\\s!\"#$%&'()\\*\\+,\\-\\.\/:;<=>\\?]*[^\\W_])?)";
var REF_NAME = format('(?:{0}|`{1}`)', NAME, NAME_WWS);
var TEXT1 = "(?:[^\\s\\|](?:[^\\|]*[^\\s\\|])?)";
var TEXT2 = "(?:[^\\`]+)",
rx_TEXT2 = new RegExp(format('^{0}', TEXT2));
var rx_section = new RegExp(
"^([!'#$%&\"()*+,-./:;<=>?@\\[\\\\\\]^_`{|}~])\\1{3,}\\s*$");
var rx_explicit = new RegExp(
format('^\\.\\.{0}', SEPA));
var rx_link = new RegExp(
format('^_{0}:{1}|^__:{1}', REF_NAME, TAIL));
var rx_directive = new RegExp(
format('^{0}::{1}', REF_NAME, TAIL));
var rx_substitution = new RegExp(
format('^\\|{0}\\|{1}{2}::{3}', TEXT1, SEPA, REF_NAME, TAIL));
var rx_footnote = new RegExp(
format('^\\[(?:\\d+|#{0}?|\\*)]{1}', REF_NAME, TAIL));
var rx_citation = new RegExp(
format('^\\[{0}\\]{1}', REF_NAME, TAIL));
var rx_substitution_ref = new RegExp(
format('^\\|{0}\\|', TEXT1));
var rx_footnote_ref = new RegExp(
format('^\\[(?:\\d+|#{0}?|\\*)]_', REF_NAME));
var rx_citation_ref = new RegExp(
format('^\\[{0}\\]_', REF_NAME));
var rx_link_ref1 = new RegExp(
format('^{0}__?', REF_NAME));
var rx_link_ref2 = new RegExp(
format('^`{0}`_', TEXT2));
var rx_role_pre = new RegExp(
format('^:{0}:`{1}`{2}', NAME, TEXT2, TAIL));
var rx_role_suf = new RegExp(
format('^`{1}`:{0}:{2}', NAME, TEXT2, TAIL));
var rx_role = new RegExp(
format('^:{0}:{1}', NAME, TAIL));
var rx_directive_name = new RegExp(format('^{0}', REF_NAME));
var rx_directive_tail = new RegExp(format('^::{0}', TAIL));
var rx_substitution_text = new RegExp(format('^\\|{0}\\|', TEXT1));
var rx_substitution_sepa = new RegExp(format('^{0}', SEPA));
var rx_substitution_name = new RegExp(format('^{0}', REF_NAME));
var rx_substitution_tail = new RegExp(format('^::{0}', TAIL));
var rx_link_head = new RegExp("^_");
var rx_link_name = new RegExp(format('^{0}|_', REF_NAME));
var rx_link_tail = new RegExp(format('^:{0}', TAIL));
var rx_verbatim = new RegExp('^::\\s*$');
var rx_examples = new RegExp('^\\s+(?:>>>|In \\[\\d+\\]:)\\s');
///////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////
function to_normal(stream, state) {
var token = null;
if (stream.sol() && stream.match(rx_examples, false)) {
change(state, to_mode, {
mode: mode_python, local: CodeMirror.startState(mode_python)
});
} else if (stream.sol() && stream.match(rx_explicit)) {
change(state, to_explicit);
token = 'meta';
} else if (stream.sol() && stream.match(rx_section)) {
change(state, to_normal);
token = 'header';
} else if (phase(state) == rx_role_pre ||
stream.match(rx_role_pre, false)) {
switch (stage(state)) {
case 0:
change(state, to_normal, context(rx_role_pre, 1));
stream.match(/^:/);
token = 'meta';
break;
case 1:
change(state, to_normal, context(rx_role_pre, 2));
stream.match(rx_NAME);
token = 'keyword';
if (stream.current().match(/^(?:math|latex)/)) {
state.tmp_stex = true;
}
break;
case 2:
change(state, to_normal, context(rx_role_pre, 3));
stream.match(/^:`/);
token = 'meta';
break;
case 3:
if (state.tmp_stex) {
state.tmp_stex = undefined; state.tmp = {
mode: mode_stex, local: CodeMirror.startState(mode_stex)
};
}
if (state.tmp) {
if (stream.peek() == '`') {
change(state, to_normal, context(rx_role_pre, 4));
state.tmp = undefined;
break;
}
token = state.tmp.mode.token(stream, state.tmp.local);
break;
}
change(state, to_normal, context(rx_role_pre, 4));
stream.match(rx_TEXT2);
token = 'string';
break;
case 4:
change(state, to_normal, context(rx_role_pre, 5));
stream.match(/^`/);
token = 'meta';
break;
case 5:
change(state, to_normal, context(rx_role_pre, 6));
stream.match(rx_TAIL);
break;
default:
change(state, to_normal);
}
} else if (phase(state) == rx_role_suf ||
stream.match(rx_role_suf, false)) {
switch (stage(state)) {
case 0:
change(state, to_normal, context(rx_role_suf, 1));
stream.match(/^`/);
token = 'meta';
break;
case 1:
change(state, to_normal, context(rx_role_suf, 2));
stream.match(rx_TEXT2);
token = 'string';
break;
case 2:
change(state, to_normal, context(rx_role_suf, 3));
stream.match(/^`:/);
token = 'meta';
break;
case 3:
change(state, to_normal, context(rx_role_suf, 4));
stream.match(rx_NAME);
token = 'keyword';
break;
case 4:
change(state, to_normal, context(rx_role_suf, 5));
stream.match(/^:/);
token = 'meta';
break;
case 5:
change(state, to_normal, context(rx_role_suf, 6));
stream.match(rx_TAIL);
break;
default:
change(state, to_normal);
}
} else if (phase(state) == rx_role || stream.match(rx_role, false)) {
switch (stage(state)) {
case 0:
change(state, to_normal, context(rx_role, 1));
stream.match(/^:/);
token = 'meta';
break;
case 1:
change(state, to_normal, context(rx_role, 2));
stream.match(rx_NAME);
token = 'keyword';
break;
case 2:
change(state, to_normal, context(rx_role, 3));
stream.match(/^:/);
token = 'meta';
break;
case 3:
change(state, to_normal, context(rx_role, 4));
stream.match(rx_TAIL);
break;
default:
change(state, to_normal);
}
} else if (phase(state) == rx_substitution_ref ||
stream.match(rx_substitution_ref, false)) {
switch (stage(state)) {
case 0:
change(state, to_normal, context(rx_substitution_ref, 1));
stream.match(rx_substitution_text);
token = 'variable-2';
break;
case 1:
change(state, to_normal, context(rx_substitution_ref, 2));
if (stream.match(/^_?_?/)) token = 'link';
break;
default:
change(state, to_normal);
}
} else if (stream.match(rx_footnote_ref)) {
change(state, to_normal);
token = 'quote';
} else if (stream.match(rx_citation_ref)) {
change(state, to_normal);
token = 'quote';
} else if (stream.match(rx_link_ref1)) {
change(state, to_normal);
if (!stream.peek() || stream.peek().match(/^\W$/)) {
token = 'link';
}
} else if (phase(state) == rx_link_ref2 ||
stream.match(rx_link_ref2, false)) {
switch (stage(state)) {
case 0:
if (!stream.peek() || stream.peek().match(/^\W$/)) {
change(state, to_normal, context(rx_link_ref2, 1));
} else {
stream.match(rx_link_ref2);
}
break;
case 1:
change(state, to_normal, context(rx_link_ref2, 2));
stream.match(/^`/);
token = 'link';
break;
case 2:
change(state, to_normal, context(rx_link_ref2, 3));
stream.match(rx_TEXT2);
break;
case 3:
change(state, to_normal, context(rx_link_ref2, 4));
stream.match(/^`_/);
token = 'link';
break;
default:
change(state, to_normal);
}
} else if (stream.match(rx_verbatim)) {
change(state, to_verbatim);
}
else {
if (stream.next()) change(state, to_normal);
}
return token;
}
///////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////
function to_explicit(stream, state) {
var token = null;
if (phase(state) == rx_substitution ||
stream.match(rx_substitution, false)) {
switch (stage(state)) {
case 0:
change(state, to_explicit, context(rx_substitution, 1));
stream.match(rx_substitution_text);
token = 'variable-2';
break;
case 1:
change(state, to_explicit, context(rx_substitution, 2));
stream.match(rx_substitution_sepa);
break;
case 2:
change(state, to_explicit, context(rx_substitution, 3));
stream.match(rx_substitution_name);
token = 'keyword';
break;
case 3:
change(state, to_explicit, context(rx_substitution, 4));
stream.match(rx_substitution_tail);
token = 'meta';
break;
default:
change(state, to_normal);
}
} else if (phase(state) == rx_directive ||
stream.match(rx_directive, false)) {
switch (stage(state)) {
case 0:
change(state, to_explicit, context(rx_directive, 1));
stream.match(rx_directive_name);
token = 'keyword';
if (stream.current().match(/^(?:math|latex)/))
state.tmp_stex = true;
else if (stream.current().match(/^python/))
state.tmp_py = true;
break;
case 1:
change(state, to_explicit, context(rx_directive, 2));
stream.match(rx_directive_tail);
token = 'meta';
if (stream.match(/^latex\s*$/) || state.tmp_stex) {
state.tmp_stex = undefined; change(state, to_mode, {
mode: mode_stex, local: CodeMirror.startState(mode_stex)
});
}
break;
case 2:
change(state, to_explicit, context(rx_directive, 3));
if (stream.match(/^python\s*$/) || state.tmp_py) {
state.tmp_py = undefined; change(state, to_mode, {
mode: mode_python, local: CodeMirror.startState(mode_python)
});
}
break;
default:
change(state, to_normal);
}
} else if (phase(state) == rx_link || stream.match(rx_link, false)) {
switch (stage(state)) {
case 0:
change(state, to_explicit, context(rx_link, 1));
stream.match(rx_link_head);
stream.match(rx_link_name);
token = 'link';
break;
case 1:
change(state, to_explicit, context(rx_link, 2));
stream.match(rx_link_tail);
token = 'meta';
break;
default:
change(state, to_normal);
}
} else if (stream.match(rx_footnote)) {
change(state, to_normal);
token = 'quote';
} else if (stream.match(rx_citation)) {
change(state, to_normal);
token = 'quote';
}
else {
stream.eatSpace();
if (stream.eol()) {
change(state, to_normal);
} else {
stream.skipToEnd();
change(state, to_comment);
token = 'comment';
}
}
return token;
}
///////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////
function to_comment(stream, state) {
return as_block(stream, state, 'comment');
}
function to_verbatim(stream, state) {
return as_block(stream, state, 'meta');
}
function as_block(stream, state, token) {
if (stream.eol() || stream.eatSpace()) {
stream.skipToEnd();
return token;
} else {
change(state, to_normal);
return null;
}
}
///////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////
function to_mode(stream, state) {
if (state.ctx.mode && state.ctx.local) {
if (stream.sol()) {
if (!stream.eatSpace()) change(state, to_normal);
return null;
}
return state.ctx.mode.token(stream, state.ctx.local);
}
change(state, to_normal);
return null;
}
///////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////
function context(phase, stage, mode, local) {
return {phase: phase, stage: stage, mode: mode, local: local};
}
function change(state, tok, ctx) {
state.tok = tok;
state.ctx = ctx || {};
}
function stage(state) {
return state.ctx.stage || 0;
}
function phase(state) {
return state.ctx.phase;
}
///////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////
return {
startState: function () {
return {tok: to_normal, ctx: context(undefined, 0)};
},
copyState: function (state) {
var ctx = state.ctx, tmp = state.tmp;
if (ctx.local)
ctx = {mode: ctx.mode, local: CodeMirror.copyState(ctx.mode, ctx.local)};
if (tmp)
tmp = {mode: tmp.mode, local: CodeMirror.copyState(tmp.mode, tmp.local)};
return {tok: state.tok, ctx: ctx, tmp: tmp};
},
innerMode: function (state) {
return state.tmp ? {state: state.tmp.local, mode: state.tmp.mode}
: state.ctx.mode ? {state: state.ctx.local, mode: state.ctx.mode}
: null;
},
token: function (stream, state) {
return state.tok(stream, state);
}
};
}, 'python', 'stex');
///////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////
CodeMirror.defineMIME('text/x-rst', 'rst');
///////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////
});
| {
"pile_set_name": "Github"
} |
// cgo -godefs types_aix.go | go run mkpost.go
// Code generated by the command above; see README.md. DO NOT EDIT.
// +build ppc,aix
package unix
const (
SizeofPtr = 0x4
SizeofShort = 0x2
SizeofInt = 0x4
SizeofLong = 0x4
SizeofLongLong = 0x8
PathMax = 0x3ff
)
type (
_C_short int16
_C_int int32
_C_long int32
_C_long_long int64
)
type off64 int64
type off int32
type Mode_t uint32
type Timespec struct {
Sec int32
Nsec int32
}
type Timeval struct {
Sec int32
Usec int32
}
type Timeval32 struct {
Sec int32
Usec int32
}
type Timex struct{}
type Time_t int32
type Tms struct{}
type Utimbuf struct {
Actime int32
Modtime int32
}
type Timezone struct {
Minuteswest int32
Dsttime int32
}
type Rusage struct {
Utime Timeval
Stime Timeval
Maxrss int32
Ixrss int32
Idrss int32
Isrss int32
Minflt int32
Majflt int32
Nswap int32
Inblock int32
Oublock int32
Msgsnd int32
Msgrcv int32
Nsignals int32
Nvcsw int32
Nivcsw int32
}
type Rlimit struct {
Cur uint64
Max uint64
}
type Pid_t int32
type _Gid_t uint32
type dev_t uint32
type Stat_t struct {
Dev uint32
Ino uint32
Mode uint32
Nlink int16
Flag uint16
Uid uint32
Gid uint32
Rdev uint32
Size int32
Atim Timespec
Mtim Timespec
Ctim Timespec
Blksize int32
Blocks int32
Vfstype int32
Vfs uint32
Type uint32
Gen uint32
Reserved [9]uint32
}
type StatxTimestamp struct{}
type Statx_t struct{}
type Dirent struct {
Offset uint32
Ino uint32
Reclen uint16
Namlen uint16
Name [256]uint8
}
type RawSockaddrInet4 struct {
Len uint8
Family uint8
Port uint16
Addr [4]byte /* in_addr */
Zero [8]uint8
}
type RawSockaddrInet6 struct {
Len uint8
Family uint8
Port uint16
Flowinfo uint32
Addr [16]byte /* in6_addr */
Scope_id uint32
}
type RawSockaddrUnix struct {
Len uint8
Family uint8
Path [1023]uint8
}
type RawSockaddrDatalink struct {
Len uint8
Family uint8
Index uint16
Type uint8
Nlen uint8
Alen uint8
Slen uint8
Data [120]uint8
}
type RawSockaddr struct {
Len uint8
Family uint8
Data [14]uint8
}
type RawSockaddrAny struct {
Addr RawSockaddr
Pad [1012]uint8
}
type _Socklen uint32
type Cmsghdr struct {
Len uint32
Level int32
Type int32
}
type ICMPv6Filter struct {
Filt [8]uint32
}
type Iovec struct {
Base *byte
Len uint32
}
type IPMreq struct {
Multiaddr [4]byte /* in_addr */
Interface [4]byte /* in_addr */
}
type IPv6Mreq struct {
Multiaddr [16]byte /* in6_addr */
Interface uint32
}
type IPv6MTUInfo struct {
Addr RawSockaddrInet6
Mtu uint32
}
type Linger struct {
Onoff int32
Linger int32
}
type Msghdr struct {
Name *byte
Namelen uint32
Iov *Iovec
Iovlen int32
Control *byte
Controllen uint32
Flags int32
}
const (
SizeofSockaddrInet4 = 0x10
SizeofSockaddrInet6 = 0x1c
SizeofSockaddrAny = 0x404
SizeofSockaddrUnix = 0x401
SizeofSockaddrDatalink = 0x80
SizeofLinger = 0x8
SizeofIPMreq = 0x8
SizeofIPv6Mreq = 0x14
SizeofIPv6MTUInfo = 0x20
SizeofMsghdr = 0x1c
SizeofCmsghdr = 0xc
SizeofICMPv6Filter = 0x20
)
const (
SizeofIfMsghdr = 0x10
)
type IfMsgHdr struct {
Msglen uint16
Version uint8
Type uint8
Addrs int32
Flags int32
Index uint16
Addrlen uint8
_ [1]byte
}
type FdSet struct {
Bits [2048]int32
}
type Utsname struct {
Sysname [32]byte
Nodename [32]byte
Release [32]byte
Version [32]byte
Machine [32]byte
}
type Ustat_t struct{}
type Sigset_t struct {
Losigs uint32
Hisigs uint32
}
const (
AT_FDCWD = -0x2
AT_REMOVEDIR = 0x1
AT_SYMLINK_NOFOLLOW = 0x1
)
type Termios struct {
Iflag uint32
Oflag uint32
Cflag uint32
Lflag uint32
Cc [16]uint8
}
type Termio struct {
Iflag uint16
Oflag uint16
Cflag uint16
Lflag uint16
Line uint8
Cc [8]uint8
_ [1]byte
}
type Winsize struct {
Row uint16
Col uint16
Xpixel uint16
Ypixel uint16
}
type PollFd struct {
Fd int32
Events uint16
Revents uint16
}
const (
POLLERR = 0x4000
POLLHUP = 0x2000
POLLIN = 0x1
POLLNVAL = 0x8000
POLLOUT = 0x2
POLLPRI = 0x4
POLLRDBAND = 0x20
POLLRDNORM = 0x10
POLLWRBAND = 0x40
POLLWRNORM = 0x2
)
type Flock_t struct {
Type int16
Whence int16
Sysid uint32
Pid int32
Vfs int32
Start int64
Len int64
}
type Fsid_t struct {
Val [2]uint32
}
type Fsid64_t struct {
Val [2]uint64
}
type Statfs_t struct {
Version int32
Type int32
Bsize uint32
Blocks uint32
Bfree uint32
Bavail uint32
Files uint32
Ffree uint32
Fsid Fsid_t
Vfstype int32
Fsize uint32
Vfsnumber int32
Vfsoff int32
Vfslen int32
Vfsvers int32
Fname [32]uint8
Fpack [32]uint8
Name_max int32
}
const RNDGETENTCNT = 0x80045200
| {
"pile_set_name": "Github"
} |
/*
Copyright (c) 1998, 1999 Thai Open Source Software Center Ltd
See the file COPYING for copying permission.
*/
#ifndef XmlRole_INCLUDED
#define XmlRole_INCLUDED 1
#include "xmltok.h"
#ifdef __cplusplus
extern "C" {
#endif
enum {
XML_ROLE_ERROR = -1,
XML_ROLE_NONE = 0,
XML_ROLE_XML_DECL,
XML_ROLE_INSTANCE_START,
XML_ROLE_DOCTYPE_NAME,
XML_ROLE_DOCTYPE_SYSTEM_ID,
XML_ROLE_DOCTYPE_PUBLIC_ID,
XML_ROLE_DOCTYPE_INTERNAL_SUBSET,
XML_ROLE_DOCTYPE_CLOSE,
XML_ROLE_GENERAL_ENTITY_NAME,
XML_ROLE_PARAM_ENTITY_NAME,
XML_ROLE_ENTITY_VALUE,
XML_ROLE_ENTITY_SYSTEM_ID,
XML_ROLE_ENTITY_PUBLIC_ID,
XML_ROLE_ENTITY_COMPLETE,
XML_ROLE_ENTITY_NOTATION_NAME,
XML_ROLE_NOTATION_NAME,
XML_ROLE_NOTATION_SYSTEM_ID,
XML_ROLE_NOTATION_NO_SYSTEM_ID,
XML_ROLE_NOTATION_PUBLIC_ID,
XML_ROLE_ATTRIBUTE_NAME,
XML_ROLE_ATTRIBUTE_TYPE_CDATA,
XML_ROLE_ATTRIBUTE_TYPE_ID,
XML_ROLE_ATTRIBUTE_TYPE_IDREF,
XML_ROLE_ATTRIBUTE_TYPE_IDREFS,
XML_ROLE_ATTRIBUTE_TYPE_ENTITY,
XML_ROLE_ATTRIBUTE_TYPE_ENTITIES,
XML_ROLE_ATTRIBUTE_TYPE_NMTOKEN,
XML_ROLE_ATTRIBUTE_TYPE_NMTOKENS,
XML_ROLE_ATTRIBUTE_ENUM_VALUE,
XML_ROLE_ATTRIBUTE_NOTATION_VALUE,
XML_ROLE_ATTLIST_ELEMENT_NAME,
XML_ROLE_IMPLIED_ATTRIBUTE_VALUE,
XML_ROLE_REQUIRED_ATTRIBUTE_VALUE,
XML_ROLE_DEFAULT_ATTRIBUTE_VALUE,
XML_ROLE_FIXED_ATTRIBUTE_VALUE,
XML_ROLE_ELEMENT_NAME,
XML_ROLE_CONTENT_ANY,
XML_ROLE_CONTENT_EMPTY,
XML_ROLE_CONTENT_PCDATA,
XML_ROLE_GROUP_OPEN,
XML_ROLE_GROUP_CLOSE,
XML_ROLE_GROUP_CLOSE_REP,
XML_ROLE_GROUP_CLOSE_OPT,
XML_ROLE_GROUP_CLOSE_PLUS,
XML_ROLE_GROUP_CHOICE,
XML_ROLE_GROUP_SEQUENCE,
XML_ROLE_CONTENT_ELEMENT,
XML_ROLE_CONTENT_ELEMENT_REP,
XML_ROLE_CONTENT_ELEMENT_OPT,
XML_ROLE_CONTENT_ELEMENT_PLUS,
#ifdef XML_DTD
XML_ROLE_TEXT_DECL,
XML_ROLE_IGNORE_SECT,
XML_ROLE_INNER_PARAM_ENTITY_REF,
#endif /* XML_DTD */
XML_ROLE_PARAM_ENTITY_REF
};
typedef struct prolog_state {
int (*handler)(struct prolog_state *state,
int tok,
const char *ptr,
const char *end,
const ENCODING *enc);
unsigned level;
#ifdef XML_DTD
unsigned includeLevel;
int documentEntity;
#endif /* XML_DTD */
} PROLOG_STATE;
void XmlPrologStateInit(PROLOG_STATE *);
#ifdef XML_DTD
void XmlPrologStateInitExternalEntity(PROLOG_STATE *);
#endif /* XML_DTD */
#define XmlTokenRole(state, tok, ptr, end, enc) \
(((state)->handler)(state, tok, ptr, end, enc))
#ifdef __cplusplus
}
#endif
#endif /* not XmlRole_INCLUDED */
| {
"pile_set_name": "Github"
} |
<?php declare(strict_types=1);
namespace Shopware\Development\Analyze\Test\PHPStan\Rules\Decoratable\_fixtures\DecoratableImplementsInterface;
interface DecoratableInterface
{
}
| {
"pile_set_name": "Github"
} |
"""
The httplib2 algorithms ported for use with requests.
"""
import re
import calendar
import time
from email.utils import parsedate_tz
from pip._vendor.requests.structures import CaseInsensitiveDict
from .cache import DictCache
from .serialize import Serializer
URI = re.compile(r"^(([^:/?#]+):)?(//([^/?#]*))?([^?#]*)(\?([^#]*))?(#(.*))?")
def parse_uri(uri):
"""Parses a URI using the regex given in Appendix B of RFC 3986.
(scheme, authority, path, query, fragment) = parse_uri(uri)
"""
groups = URI.match(uri).groups()
return (groups[1], groups[3], groups[4], groups[6], groups[8])
class CacheController(object):
"""An interface to see if request should cached or not.
"""
def __init__(self, cache=None, cache_etags=True, serializer=None):
self.cache = cache or DictCache()
self.cache_etags = cache_etags
self.serializer = serializer or Serializer()
def _urlnorm(self, uri):
"""Normalize the URL to create a safe key for the cache"""
(scheme, authority, path, query, fragment) = parse_uri(uri)
if not scheme or not authority:
raise Exception("Only absolute URIs are allowed. uri = %s" % uri)
scheme = scheme.lower()
authority = authority.lower()
if not path:
path = "/"
# Could do syntax based normalization of the URI before
# computing the digest. See Section 6.2.2 of Std 66.
request_uri = query and "?".join([path, query]) or path
defrag_uri = scheme + "://" + authority + request_uri
return defrag_uri
def cache_url(self, uri):
return self._urlnorm(uri)
def parse_cache_control(self, headers):
"""
Parse the cache control headers returning a dictionary with values
for the different directives.
"""
retval = {}
cc_header = 'cache-control'
if 'Cache-Control' in headers:
cc_header = 'Cache-Control'
if cc_header in headers:
parts = headers[cc_header].split(',')
parts_with_args = [
tuple([x.strip().lower() for x in part.split("=", 1)])
for part in parts if -1 != part.find("=")
]
parts_wo_args = [
(name.strip().lower(), 1)
for name in parts if -1 == name.find("=")
]
retval = dict(parts_with_args + parts_wo_args)
return retval
def cached_request(self, request):
"""
Return a cached response if it exists in the cache, otherwise
return False.
"""
cache_url = self.cache_url(request.url)
cc = self.parse_cache_control(request.headers)
# non-caching states
no_cache = True if 'no-cache' in cc else False
if 'max-age' in cc and cc['max-age'] == 0:
no_cache = True
# Bail out if no-cache was set
if no_cache:
return False
# It is in the cache, so lets see if it is going to be
# fresh enough
resp = self.serializer.loads(request, self.cache.get(cache_url))
# Check to see if we have a cached object
if not resp:
return False
headers = CaseInsensitiveDict(resp.headers)
now = time.time()
date = calendar.timegm(
parsedate_tz(headers['date'])
)
current_age = max(0, now - date)
# TODO: There is an assumption that the result will be a
# urllib3 response object. This may not be best since we
# could probably avoid instantiating or constructing the
# response until we know we need it.
resp_cc = self.parse_cache_control(headers)
# determine freshness
freshness_lifetime = 0
# Check the max-age pragma in the cache control header
if 'max-age' in resp_cc and resp_cc['max-age'].isdigit():
freshness_lifetime = int(resp_cc['max-age'])
# If there isn't a max-age, check for an expires header
elif 'expires' in headers:
expires = parsedate_tz(headers['expires'])
if expires is not None:
expire_time = calendar.timegm(expires) - date
freshness_lifetime = max(0, expire_time)
# determine if we are setting freshness limit in the req
if 'max-age' in cc:
try:
freshness_lifetime = int(cc['max-age'])
except ValueError:
freshness_lifetime = 0
if 'min-fresh' in cc:
try:
min_fresh = int(cc['min-fresh'])
except ValueError:
min_fresh = 0
# adjust our current age by our min fresh
current_age += min_fresh
# see how fresh we actually are
fresh = (freshness_lifetime > current_age)
if fresh:
return resp
# we're not fresh. If we don't have an Etag, clear it out
if 'etag' not in headers:
self.cache.delete(cache_url)
# return the original handler
return False
def conditional_headers(self, request):
cache_url = self.cache_url(request.url)
resp = self.serializer.loads(request, self.cache.get(cache_url))
new_headers = {}
if resp:
headers = CaseInsensitiveDict(resp.headers)
if 'etag' in headers:
new_headers['If-None-Match'] = headers['ETag']
if 'last-modified' in headers:
new_headers['If-Modified-Since'] = headers['Last-Modified']
return new_headers
def cache_response(self, request, response, body=None):
"""
Algorithm for caching requests.
This assumes a requests Response object.
"""
# From httplib2: Don't cache 206's since we aren't going to
# handle byte range requests
if response.status not in [200, 203]:
return
response_headers = CaseInsensitiveDict(response.headers)
cc_req = self.parse_cache_control(request.headers)
cc = self.parse_cache_control(response_headers)
cache_url = self.cache_url(request.url)
# Delete it from the cache if we happen to have it stored there
no_store = cc.get('no-store') or cc_req.get('no-store')
if no_store and self.cache.get(cache_url):
self.cache.delete(cache_url)
# If we've been given an etag, then keep the response
if self.cache_etags and 'etag' in response_headers:
self.cache.set(
cache_url,
self.serializer.dumps(request, response, body=body),
)
# Add to the cache if the response headers demand it. If there
# is no date header then we can't do anything about expiring
# the cache.
elif 'date' in response_headers:
# cache when there is a max-age > 0
if cc and cc.get('max-age'):
if int(cc['max-age']) > 0:
self.cache.set(
cache_url,
self.serializer.dumps(request, response, body=body),
)
# If the request can expire, it means we should cache it
# in the meantime.
elif 'expires' in response_headers:
if response_headers['expires']:
self.cache.set(
cache_url,
self.serializer.dumps(request, response, body=body),
)
def update_cached_response(self, request, response):
"""On a 304 we will get a new set of headers that we want to
update our cached value with, assuming we have one.
This should only ever be called when we've sent an ETag and
gotten a 304 as the response.
"""
cache_url = self.cache_url(request.url)
cached_response = self.serializer.loads(request, self.cache.get(cache_url))
if not cached_response:
# we didn't have a cached response
return response
# Lets update our headers with the headers from the new request:
# http://tools.ietf.org/html/draft-ietf-httpbis-p4-conditional-26#section-4.1
#
# The server isn't supposed to send headers that would make
# the cached body invalid. But... just in case, we'll be sure
# to strip out ones we know that might be problmatic due to
# typical assumptions.
excluded_headers = [
"content-length",
]
cached_response.headers.update(
dict((k, v) for k, v in response.headers.items()
if k.lower() not in excluded_headers)
)
# we want a 200 b/c we have content via the cache
cached_response.status = 200
# update our cache
self.cache.set(
cache_url,
self.serializer.dumps(request, cached_response),
)
return cached_response
| {
"pile_set_name": "Github"
} |
/**
*
*/
package com.taobao.top.analysis.node;
import java.util.List;
import com.taobao.top.analysis.config.MasterConfig;
import com.taobao.top.analysis.node.job.Job;
import com.taobao.top.analysis.node.job.JobTask;
import com.taobao.top.analysis.node.job.JobTaskResult;
/**
* 任务导出管理类,支持任务输出结果报表,载入导出分析中间结果(保存分析现场,支持容灾)
* @author fangweng
* @Email [email protected]
* 2011-11-28
*
*/
public interface IJobExporter extends IComponent<MasterConfig>{
/**
* 导出报表,可以实现多线程,但必须是阻塞模式
* @param 需要导出的Job
* @param 是否需要时间后缀
* @return
*/
public List<String> exportReport(Job job,boolean needTimeSuffix);
/**
* 导出报表
* @param 需要导出的任务
* @param 需要导出任务对应的结果
* @param 是否需要时间后缀
* @return
*/
public List<String> exportReport(JobTask jobTask,JobTaskResult jobTaskResult,boolean needTimeSuffix);
/**
* 导出分析器某一个job主干的中间分析数据
* @param job
*/
public void exportEntryData(Job job);
/**
* 载入分析器某一个job中间分析数据到主干
* @param job
*/
public void loadEntryData(Job job);
/**
* 载入分析器某一个job中间分析数据到job的临时对象中
* @param job
*/
public void loadEntryDataToTmp(Job job);
/**
* 从某一个年代载入job的临时数据开始恢复
* @param job
* @param epoch
*/
public void loadJobBackupData(Job job,String epoch);
}
| {
"pile_set_name": "Github"
} |
[HIMDBVersion]
2.0
[DATABASE_VERSION]
"2.3"
[SESSION_DETAILS]
""
[INFORMATION]
""
[GENERAL_DATA]
"{287A8023-99B5-49E1-A54E-4DDCA43D7959}MapCtrlECX_MAP_FIND_SYMBOL_LIST" ""
"{287A8023-99B5-49E1-A54E-4DDCA43D7959}MapCtrlViews" "0"
"{313F4FC1-6566-11D5-8BBE-0004E2013C71}CmdLineCtrlBatchFileName" ""
"{313F4FC1-6566-11D5-8BBE-0004E2013C71}CmdLineCtrlBreakpointFlag" "-1 "
"{313F4FC1-6566-11D5-8BBE-0004E2013C71}CmdLineCtrlBreakpointStatus" "-1 "
"{313F4FC1-6566-11D5-8BBE-0004E2013C71}CmdLineCtrlBrowseDirectory" ""
"{313F4FC1-6566-11D5-8BBE-0004E2013C71}CmdLineCtrlLogFileName" ""
"{313F4FC1-6566-11D5-8BBE-0004E2013C71}CmdLineCtrlSplitterPosition" "242"
"{313F4FC1-6566-11D5-8BBE-0004E2013C71}CmdLineCtrlViews" "0"
"{313F4FC1-6566-11D5-8BBE-0004E2013C71}TclTkCtrlLogFileName" ""
"{6C4D5B81-FD67-46A9-A089-EA44DCDE47FD}RAMMonitorManagerCtrlBlockInfoFileDir" ""
"{6C4D5B81-FD67-46A9-A089-EA44DCDE47FD}RAMMonitorManagerCtrlBlockInfoFileName" ""
"{7943C44E-7D44-422A-9140-4CF55C88F7D3}DifferenceCtrlViews" "0"
[LANGUAGE]
"English"
[CONFIG_INFO_VD1]
1
[CONFIG_INFO_VD2]
0
[CONFIG_INFO_VD3]
0
[CONFIG_INFO_VD4]
0
[WINDOW_POSITION_STATE_DATA_VD1]
"Help" "TOOLBAR 0" 59419 1 5 "0.00" 0 0 0 0 0 17 0 "" "0.0"
"{WK_00000001_OUTPUT}" "WINDOW" 59422 0 0 "1.00" 289 560 340 350 200 18 0 "36756|36757|36758|36759|<<separator>>|36746|36747|<<separator>>|39531|<<separator>>|39500|39534|<<separator>>|36687" "0.0"
"{WK_00000002_WORKSPACE}" "WINDOW" 59420 0 0 "1.00" 206 560 340 350 200 18 0 "" "0.0"
"{WK_TB00000001_STANDARD}" "TOOLBAR 0" 59419 0 2 "0.00" 0 0 0 0 0 18 0 "" "0.0"
"{WK_TB00000002_EDITOR}" "TOOLBAR 0" 59419 0 0 "0.00" 0 0 0 0 0 18 0 "" "0.0"
"{WK_TB00000003_BOOKMARKS}" "TOOLBAR 0" 59419 1 1 "0.00" 0 0 0 0 0 17 0 "" "0.0"
"{WK_TB00000004_TEMPLATES}" "TOOLBAR 0" 59419 1 0 "0.00" 0 0 0 0 0 17 0 "" "0.0"
"{WK_TB00000005_SEARCH}" "TOOLBAR 0" 59419 0 1 "0.00" 0 0 0 0 0 18 0 "" "0.0"
"{WK_TB00000007_DEBUG}" "TOOLBAR 0" 59419 2 0 "0.00" 0 0 0 0 0 17 0 "" "0.0"
"{WK_TB00000008_DEBUGRUN}" "TOOLBAR 0" 59419 2 1 "0.00" 0 0 0 0 0 17 0 "" "0.0"
"{WK_TB00000009_VERSIONCONTROL}" "TOOLBAR 0" 59419 1 3 "0.00" 0 0 0 0 0 17 0 "" "0.0"
"{WK_TB00000012_MAP}" "TOOLBAR 0" 59419 1 4 "0.00" 0 0 0 0 0 17 0 "" "0.0"
"{WK_TB00000018_DEFAULTWINDOW}" "TOOLBAR 0" 59419 1 2 "0.00" 0 0 0 0 0 17 0 "" "0.0"
"{WK_TB00000025_HELPSYSTEMTOOL}" "TOOLBAR 0" 59419 2 3 "0.00" 0 0 0 0 0 18 0 "" "0.0"
"{WK_TB00000026_MACRO}" "TOOLBAR 0" 59419 1 6 "0.00" 0 0 0 0 0 17 0 "" "0.0"
"{WK_TB00000028_RTOSDEBUG}" "TOOLBAR 0" 59419 2 2 "0.00" 0 0 0 0 0 17 0 "" "0.0"
"{WK_TB00000029_SYSTEMTOOL}" "TOOLBAR 0" 59419 2 4 "0.00" 0 0 0 0 0 17 0 "" "0.0"
[WINDOW_POSITION_STATE_DATA_VD2]
[WINDOW_POSITION_STATE_DATA_VD3]
[WINDOW_POSITION_STATE_DATA_VD4]
[WINDOW_Z_ORDER]
"C:\E\Dev\FreeRTOS\WorkingCopy\Demo\RX600_RX62N_Renesas\RTOSDemo\RTOSDemo.c"
[TARGET_NAME]
"" "" 1229201492
[STATUSBAR_STATEINFO_VD1]
"MasterShowState" 1
"ApplicationShowState" 1
"DebuggerShowState" 1
[STATUSBAR_STATEINFO_VD2]
"MasterShowState" 1
"ApplicationShowState" 1
"DebuggerShowState" 1
[STATUSBAR_STATEINFO_VD3]
"MasterShowState" 1
"ApplicationShowState" 1
"DebuggerShowState" 1
[STATUSBAR_STATEINFO_VD4]
"MasterShowState" 1
"ApplicationShowState" 1
"DebuggerShowState" 1
[STATUSBAR_DEBUGGER_PANESTATE_VD1]
[STATUSBAR_DEBUGGER_PANESTATE_VD2]
[STATUSBAR_DEBUGGER_PANESTATE_VD3]
[STATUSBAR_DEBUGGER_PANESTATE_VD4]
[DEBUGGER_OPTIONS]
""
[DOWNLOAD_MODULES]
[CONNECT_ON_GO]
"FALSE"
[DOWNLOAD_MODULES_AFTER_BUILD]
"TRUE"
[REMOVE_BREAKPOINTS_ON_DOWNLOAD]
"FALSE"
[DISABLE_MEMORY_ACCESS_PRIOR_TO_COMMAND_FILE_EXECUTION]
"FALSE"
[LIMIT_DISASSEMBLY_MEMORY_ACCESS]
"FALSE"
[DISABLE_MEMORY_ACCESS_DURING_EXECUTION]
"FALSE"
[DEBUGGER_OPTIONS_PROPERTIES]
"1"
[COMMAND_FILES]
[DEFAULT_DEBUG_FORMAT]
""
[FLASH_DETAILS]
"" 0 0 "" 0 "" 0 0 "" 0 0 0 0 0 0 0 "" "" "" "" ""
[BREAKPOINTS]
[END]
| {
"pile_set_name": "Github"
} |
///////////////////////////////////////////////////////////////////////////////
// Copyright (c) 2001, Eric D. Friedman All Rights Reserved.
//
// This library is free software; you can redistribute it and/or
// modify it under the terms of the GNU Lesser General Public
// License as published by the Free Software Foundation; either
// version 2.1 of the License, or (at your option) any later version.
//
// This library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public
// License along with this program; if not, write to the Free Software
// Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
///////////////////////////////////////////////////////////////////////////////
package gnu.trove.iterator;
//////////////////////////////////////////////////
// THIS IS A GENERATED CLASS. DO NOT HAND EDIT! //
//////////////////////////////////////////////////
/**
* Iterator for maps of type int and long.
*
* <p>The iterator semantics for Trove's primitive maps is slightly different
* from those defined in <tt>java.util.Iterator</tt>, but still well within
* the scope of the pattern, as defined by Gamma, et al.</p>
*
* <p>This iterator does <b>not</b> implicitly advance to the next entry when
* the value at the current position is retrieved. Rather, you must explicitly
* ask the iterator to <tt>advance()</tt> and then retrieve either the <tt>key()</tt>,
* the <tt>value()</tt> or both. This is done so that you have the option, but not
* the obligation, to retrieve keys and/or values as your application requires, and
* without introducing wrapper objects that would carry both. As the iteration is
* stateful, access to the key/value parts of the current map entry happens in
* constant time.</p>
*
* <p>In practice, the iterator is akin to a "search finger" that you move from
* position to position. Read or write operations affect the current entry only and
* do not assume responsibility for moving the finger.</p>
*
* <p>Here are some sample scenarios for this class of iterator:</p>
*
* <pre>
* // accessing keys/values through an iterator:
* for ( TIntLongIterator it = map.iterator(); it.hasNext(); ) {
* it.advance();
* if ( satisfiesCondition( it.key() ) {
* doSomethingWithValue( it.value() );
* }
* }
* </pre>
*
* <pre>
* // modifying values in-place through iteration:
* for ( TIntLongIterator it = map.iterator(); it.hasNext(); ) {
* it.advance();
* if ( satisfiesCondition( it.key() ) {
* it.setValue( newValueForKey( it.key() ) );
* }
* }
* </pre>
*
* <pre>
* // deleting entries during iteration:
* for ( TIntLongIterator it = map.iterator(); it.hasNext(); ) {
* it.advance();
* if ( satisfiesCondition( it.key() ) {
* it.remove();
* }
* }
* </pre>
*
* <pre>
* // faster iteration by avoiding hasNext():
* TIntLongIterator iterator = map.iterator();
* for ( int i = map.size(); i-- > 0; ) {
* iterator.advance();
* doSomethingWithKeyAndValue( iterator.key(), iterator.value() );
* }
* </pre>
*/
public interface TIntLongIterator extends TAdvancingIterator {
/**
* Provides access to the key of the mapping at the iterator's position.
* Note that you must <tt>advance()</tt> the iterator at least once
* before invoking this method.
*
* @return the key of the entry at the iterator's current position.
*/
public int key();
/**
* Provides access to the value of the mapping at the iterator's position.
* Note that you must <tt>advance()</tt> the iterator at least once
* before invoking this method.
*
* @return the value of the entry at the iterator's current position.
*/
public long value();
/**
* Replace the value of the mapping at the iterator's position with the
* specified value. Note that you must <tt>advance()</tt> the iterator at
* least once before invoking this method.
*
* @param val the value to set in the current entry
* @return the old value of the entry.
*/
public long setValue( long val );
}
| {
"pile_set_name": "Github"
} |
package;
import js.Browser;
using StringTools;
// The keys for reading/writing preset settings in a URL query string
// These settings keys concern the name generator parameters and result filtering
@:enum abstract GeneratorSettingKey(String) from String to String {
var PRESET_WORD_KEY = "w";
var RESULT_WORD_KEY = "r";
var NAME_DATA_PRESET = "name_data_preset";
var NUMBER_TO_GENERATE = "number_to_generate";
var LENGTH_RANGE_MIN = "length_range_min";
var LENGTH_RANGE_MAX = "length_range_max";
var ORDER = "order";
var PRIOR = "prior";
var MAX_WORDS = "max_words";
var MAX_PROCESSING_TIME = "max_processing_time";
var STARTS_WITH = "starts_with";
var ENDS_WITH = "ends_width";
var INCLUDES = "includes";
var EXCLUDES = "excludes";
var SIMILAR_TO = "similar_to";
var REGEX_MATCH = "regex_match";
}
// The data that should be saved into the custom query string
// Note, should really use bitset/flags if more options are added
enum CustomQueryStringOption {
EVERYTHING;
NO_TRAINING_DATA;
}
/**
* Helper functions for sharing the generated words
*/
@:access(Main)
class ShareResults
{
public static inline function isQueryStringEmpty():Bool {
var params = Browser.window.location.search.substring(1);
if (params == null || params == "") {
return true;
}
return false;
}
/*
* Applies any custom settings encoded in the query string
*/
public static function applySettings(m:Main):Void {
if (isQueryStringEmpty()) {
return;
}
var params = Browser.window.location.search.substring(1);
var splitParams = params.split("&");
var customTrainingData = new Array<String>();
var sharedResultData = new Array<String>();
for (param in splitParams) {
var kv = param.split("=");
if (kv.length < 2) {
continue;
}
var k = kv[0].urlDecode();
var v = kv[1].urlDecode();
switch(k) {
case GeneratorSettingKey.RESULT_WORD_KEY:
sharedResultData.push(v);
case GeneratorSettingKey.PRESET_WORD_KEY:
customTrainingData.push(v);
case GeneratorSettingKey.LENGTH_RANGE_MIN:
m.minLength = Std.parseInt(v);
case GeneratorSettingKey.LENGTH_RANGE_MAX:
m.maxLength = Std.parseInt(v);
case GeneratorSettingKey.ORDER:
m.order = Std.parseInt(v);
case GeneratorSettingKey.PRIOR:
m.prior = Std.parseFloat(v);
case GeneratorSettingKey.MAX_WORDS:
m.maxWordsToGenerate = Std.parseInt(v);
case GeneratorSettingKey.MAX_PROCESSING_TIME:
m.maxProcessingTime = Std.parseInt(v);
case GeneratorSettingKey.STARTS_WITH:
m.startsWith = v;
case GeneratorSettingKey.ENDS_WITH:
m.endsWith = v;
case GeneratorSettingKey.INCLUDES:
m.includes = v;
case GeneratorSettingKey.EXCLUDES:
m.excludes = v;
case GeneratorSettingKey.SIMILAR_TO:
m.similar = v;
case GeneratorSettingKey.REGEX_MATCH:
m.regexMatch = v;
}
}
if (sharedResultData.length > 0) {
m.setNames(sharedResultData);
}
if (customTrainingData.length > 3) { // Arbitrary minimum, just in case something goes a bit wrong when reading the query string
Reflect.setField(TrainingData, "custom", customTrainingData);
m.trainingDataKeys = [ "custom" ];
}
}
/*
* Creates a settings query string for the current settings
*/
public static function makeCustomQueryString(m:Main, mode:CustomQueryStringOption):String {
var s:String = Main.WEBSITE_URL;
var appendKv = function(k:String, v:String, sep = "&") {
if (k == null || k.length == 0 || v == null || v.length == 0) {
return;
}
s += (sep + k.urlEncode() + "=" + v.urlEncode());
}
appendKv(GeneratorSettingKey.LENGTH_RANGE_MIN, Std.string(m.minLength), "?");
appendKv(GeneratorSettingKey.LENGTH_RANGE_MAX, Std.string(m.maxLength));
appendKv(GeneratorSettingKey.ORDER, Std.string(m.order));
appendKv(GeneratorSettingKey.PRIOR, Std.string(m.prior));
appendKv(GeneratorSettingKey.MAX_WORDS, Std.string(m.maxWordsToGenerate));
appendKv(GeneratorSettingKey.MAX_PROCESSING_TIME, Std.string(m.maxProcessingTime));
appendKv(GeneratorSettingKey.STARTS_WITH, m.startsWith);
appendKv(GeneratorSettingKey.ENDS_WITH, m.endsWith);
appendKv(GeneratorSettingKey.INCLUDES, m.includes);
appendKv(GeneratorSettingKey.EXCLUDES, m.excludes);
appendKv(GeneratorSettingKey.SIMILAR_TO, m.similar);
appendKv(GeneratorSettingKey.REGEX_MATCH, m.regexMatch);
if(mode != CustomQueryStringOption.NO_TRAINING_DATA) {
var data = m.trainingDataTextEdit.value.split(" ");
if (data.length > 1) {
for (word in data) {
if (word != null && word.length != 0) {
appendKv(GeneratorSettingKey.PRESET_WORD_KEY, word);
}
}
}
}
if(m.lastGeneratedNames.length > 0) {
for (name in m.lastGeneratedNames) {
if (name != null && name.length != 0) {
appendKv(GeneratorSettingKey.RESULT_WORD_KEY, name);
}
}
}
return s;
}
} | {
"pile_set_name": "Github"
} |
void f() {}
void g() {}
| {
"pile_set_name": "Github"
} |
import JSONTree from '../../JsonTree'
import FileType from '../../FileType'
import TabSystem from '../../../TabSystem'
import LightningCache from '../../LightningCache'
import { IDisposable } from '../../../Types/disposable'
import { compare } from 'compare-versions'
import ProjectConfig from '../../../Project/Config'
import * as MoLang from 'molang'
export const ENV = (
disposables: IDisposable[],
Node: JSONTree,
filePath = TabSystem.getCurrentFilePath()
) => ({
Node,
get GlobalNode() {
let currentNode = Node
while (currentNode.parent) {
currentNode = currentNode.parent
}
return currentNode
},
get FileType() {
return FileType.get(filePath)
},
MoLang: MoLang,
Version: {
ProjectTarget: ProjectConfig.getFormatVersionSync(),
compare,
},
File: {
usesEntity(identifier: string) {
let globalNode = Node
while (globalNode.parent) {
globalNode = globalNode.parent
}
return (
globalNode.get('#;bridge_node_skip;#/description/identifier')
?.data === identifier ||
globalNode.get(
'#;bridge_node_skip;#/description/runtime_identifier'
)?.data === identifier
)
},
},
Tab: {
setUnsaved: () => TabSystem.setCurrentUnsaved(),
},
get LightningCache() {
return LightningCache.getCompiled()
},
})
| {
"pile_set_name": "Github"
} |
---
title: "COR_TYPE_LAYOUT Structure"
ms.date: "03/30/2017"
api_name:
- "COR_TYPE_LAYOUT"
api_location:
- "mscordbi.dll"
api_type:
- "COM"
f1_keywords:
- "COR_TYPE_LAYOUT"
helpviewer_keywords:
- "COR_TYPE_LAYOUT structure [.NET Framework debugging]"
ms.assetid: 43a7addd-f25a-4049-9907-abec3eb17af2
topic_type:
- "apiref"
---
# COR_TYPE_LAYOUT Structure
Provides information about the layout of an object in memory.
## Syntax
```cpp
typedef struct COR_TYPE_LAYOUT {
COR_TYPEID parentID;
ULONG32 objectSize;
ULONG32 numFields;
ULONG32 boxOffset;
CorElementType type;
} COR_TYPE_LAYOUT;
```
## Members
|Member|Description|
|------------|-----------------|
|`parentID`|The identifier of the parent type to this type. This will be the NULL type id (token1= 0, token2 = 0) if the type id corresponds to <xref:System.Object?displayProperty=nameWithType>.|
|`objectSize`|The base size of an object of this type. This is the total size for non-variable sized objects.|
|`numFields`|The number of fields included in objects of this type.|
|`boxOffset`|If this type is boxed, the beginning offset of an object's fields. This field is valid only for value types such as primitives and structures.|
|`type`|The CorElementType to which this type belongs.|
## Remarks
If `numFields` is greater than zero, you can call the [ICorDebugProcess5::GetTypeFields](icordebugprocess5-gettypefields-method.md) method to obtain information about the fields in this type. If `type` is `ELEMENT_TYPE_STRING`, `ELEMENT_TYPE_ARRAY`, or `ELEMENT_TYPE_SZARRAY`, the size of objects of this type is variable, and you can pass the [COR_TYPEID](cor-typeid-structure.md) structure to the [ICorDebugProcess5::GetArrayLayout](icordebugprocess5-getarraylayout-method.md) method.
## Requirements
**Platforms:** See [System Requirements](../../get-started/system-requirements.md).
**Header:** CorDebug.idl, CorDebug.h
**Library:** CorGuids.lib
**.NET Framework Versions:** [!INCLUDE[net_current_v45plus](../../../../includes/net-current-v45plus-md.md)]
## See also
- [Debugging Structures](debugging-structures.md)
- [Debugging](index.md)
| {
"pile_set_name": "Github"
} |
See [](https://godoc.org/github.com/prometheus/client_golang/prometheus).
| {
"pile_set_name": "Github"
} |
/*===================================================================================
*
* Copyright (c) Userware/OpenSilver.net
*
* This file is part of the OpenSilver Runtime (https://opensilver.net), which is
* licensed under the MIT license: https://opensource.org/licenses/MIT
*
* As stated in the MIT license, "the above copyright notice and this permission
* notice shall be included in all copies or substantial portions of the Software."
*
\*====================================================================================*/
using System;
#if MIGRATION
using System.Windows;
#else
using Windows.UI.Xaml;
#endif
#if MIGRATION
namespace System.Windows.Media.Animation
#else
namespace Windows.UI.Xaml.Media.Animation
#endif
{
#if WORKINPROGRESS
public partial interface IAnimatable
{
}
#endif
}
| {
"pile_set_name": "Github"
} |
precision highp float;
#include <transform>
#include <particle-common>
#include <cc-local>
in vec3 a_position; // center position
in vec3 a_texCoord; // xy:vertex index,z:frame index
in vec3 a_texCoord1; // size
in vec3 a_texCoord2; // rotation
in vec4 a_color;
#if CC_USE_STRETCHED_BILLBOARD
in vec3 a_color1; // velocity.x, velocity.y, velocity.z, scale
#endif
#if CC_USE_MESH
in vec3 a_texCoord3; // mesh vertices
in vec3 a_normal; // mesh normal
in vec4 a_color1; // mesh color
#endif
vec4 lpvs_main() {
vec3 compScale = scale.xyz * a_texCoord1;
vec4 pos = vec4(a_position, 1);
#if CC_USE_STRETCHED_BILLBOARD
vec4 velocity = vec4(a_color1.xyz, 0);
#endif
#if !CC_USE_WORLD_SPACE
// simulate in world space. apply cc_matWorld matrix on CPU side.
pos = cc_matWorld * pos;
#if CC_USE_STRETCHED_BILLBOARD
velocity = cc_matWorld * velocity;
#endif
#endif
#if !CC_USE_MESH
vec2 cornerOffset = vec2((a_texCoord.xy - 0.5));
#if CC_USE_BILLBOARD
vec3 rotEuler = a_texCoord2;
#elif CC_USE_STRETCHED_BILLBOARD
vec3 rotEuler = vec3(0.);
#else
vec3 rotEuler = vec3(0., 0., a_texCoord2.z);
#endif
computeVertPos(pos, cornerOffset, quaternionFromEuler(rotEuler), compScale
#if CC_USE_BILLBOARD || CC_USE_VERTICAL_BILLBOARD
, cc_matViewInv
#endif
#if CC_USE_STRETCHED_BILLBOARD
, cc_cameraPos.xyz
, velocity
, frameTile_velLenScale.z
, frameTile_velLenScale.w
, a_texCoord.x
#endif
);
color = a_color;
#else
mat4 xformNoScale = matrixFromRT(quaternionFromEuler(a_texCoord2), pos.xyz);
mat4 xform = matFromRTS(quaternionFromEuler(a_texCoord2), pos.xyz, compScale);
pos = xform * vec4(a_texCoord3, 1);
vec4 normal = xformNoScale * vec4(a_normal, 0);
color = a_color * a_color1;
#endif
uv = computeUV(a_texCoord.z, a_texCoord.xy, frameTile_velLenScale.xy) * mainTiling_Offset.xy + mainTiling_Offset.zw;
pos = cc_matViewProj * pos;
return pos;
}
| {
"pile_set_name": "Github"
} |
/* { dg-do run } */
#include <stdlib.h>
#include <openacc.h>
int i;
int
is_mapped (void *p, size_t n)
{
#if ACC_MEM_SHARED
return 1;
#else
return acc_is_present (p, n);
#endif
}
int main(void)
{
int j;
i = -1;
j = -2;
#pragma acc data copyin (i, j)
{
if (!is_mapped (&i, sizeof (i)) || !is_mapped (&j, sizeof (j)))
abort ();
if (i != -1 || j != -2)
abort ();
i = 2;
j = 1;
if (i != 2 || j != 1)
abort ();
}
if (i != 2 || j != 1)
abort ();
i = -1;
j = -2;
#pragma acc data copyout (i, j)
{
if (!is_mapped (&i, sizeof (i)) || !is_mapped (&j, sizeof (j)))
abort ();
if (i != -1 || j != -2)
abort ();
i = 2;
j = 1;
if (i != 2 || j != 1)
abort ();
#pragma acc parallel present (i, j)
{
i = 4;
j = 2;
}
}
if (i != 4 || j != 2)
abort ();
i = -1;
j = -2;
#pragma acc data create (i, j)
{
if (!is_mapped (&i, sizeof (i)) || !is_mapped (&j, sizeof (j)))
abort ();
if (i != -1 || j != -2)
abort ();
i = 2;
j = 1;
if (i != 2 || j != 1)
abort ();
}
if (i != 2 || j != 1)
abort ();
i = -1;
j = -2;
#pragma acc data present_or_copyin (i, j)
{
if (!is_mapped (&i, sizeof (i)) || !is_mapped (&j, sizeof (j)))
abort ();
if (i != -1 || j != -2)
abort ();
i = 2;
j = 1;
if (i != 2 || j != 1)
abort ();
}
if (i != 2 || j != 1)
abort ();
i = -1;
j = -2;
#pragma acc data present_or_copyout (i, j)
{
if (!is_mapped (&i, sizeof (i)) || !is_mapped (&j, sizeof (j)))
abort ();
if (i != -1 || j != -2)
abort ();
i = 2;
j = 1;
if (i != 2 || j != 1)
abort ();
#pragma acc parallel present (i, j)
{
i = 4;
j = 2;
}
}
if (i != 4 || j != 2)
abort ();
i = -1;
j = -2;
#pragma acc data present_or_copy (i, j)
{
if (!is_mapped (&i, sizeof (i)) || !is_mapped (&j, sizeof (j)))
abort ();
if (i != -1 || j != -2)
abort ();
i = 2;
j = 1;
if (i != 2 || j != 1)
abort ();
}
#if ACC_MEM_SHARED
if (i != 2 || j != 1)
abort ();
#else
if (i != -1 || j != -2)
abort ();
#endif
i = -1;
j = -2;
#pragma acc data present_or_create (i, j)
{
if (!is_mapped (&i, sizeof (i)) || !is_mapped (&j, sizeof (j)))
abort ();
i = 2;
j = 1;
if (i != 2 || j != 1)
abort ();
}
if (i != 2 || j != 1)
abort ();
i = -1;
j = -2;
#pragma acc data copyin (i, j)
{
#pragma acc data present (i, j)
{
if (!is_mapped (&i, sizeof (i)) || !is_mapped (&j, sizeof (j)))
abort ();
if (i != -1 || j != -2)
abort ();
i = 2;
j = 1;
if (i != 2 || j != 1)
abort ();
}
}
if (i != 2 || j != 1)
abort ();
i = -1;
j = -2;
#pragma acc data
{
#if !ACC_MEM_SHARED
if (is_mapped (&i, sizeof (i)) || is_mapped (&j, sizeof (j)))
abort ();
#endif
if (i != -1 || j != -2)
abort ();
i = 2;
j = 1;
if (i != 2 || j != 1)
abort ();
}
if (i != 2 || j != 1)
abort ();
return 0;
}
| {
"pile_set_name": "Github"
} |
# Event 31 - HbdrvIrpTag
###### Version: 1
## Description
None
## Data Dictionary
|Standard Name|Field Name|Type|Description|Sample Value|
|---|---|---|---|---|
|TBD|VolumeOffset|UInt64|None|`None`|
|TBD|Length|UInt32|None|`None`|
|TBD|Read|UInt8|None|`None`|
|TBD|Priority|UInt16|None|`None`|
|TBD|PartialBmpHit|UInt8|None|`None`|
## Tags
* etw_level_Informational
* etw_keywords_StoreDiag
* etw_task_HbdrvIrpTag | {
"pile_set_name": "Github"
} |
#include "burger.h"
/********************************
Set the lock flag to a given handle and return the data pointer
********************************/
void *LockAHandle(void **TheHandle)
{
((MyHandle *)TheHandle)->Flags |= HANDLELOCK;
return *TheHandle;
}
| {
"pile_set_name": "Github"
} |
using System.Collections.Generic;
using System.Text.Json;
using System.Text.Json.Serialization;
using Dapper;
using NzbDrone.Common.Extensions;
using NzbDrone.Common.Reflection;
using NzbDrone.Core.Datastore;
using NzbDrone.Core.Datastore.Converters;
using NzbDrone.Core.Messaging.Events;
namespace NzbDrone.Core.ThingiProvider
{
public class ProviderRepository<TProviderDefinition> : BasicRepository<TProviderDefinition>, IProviderRepository<TProviderDefinition>
where TProviderDefinition : ProviderDefinition,
new()
{
protected readonly JsonSerializerOptions _serializerSettings;
protected ProviderRepository(IMainDatabase database, IEventAggregator eventAggregator)
: base(database, eventAggregator)
{
var serializerSettings = new JsonSerializerOptions
{
AllowTrailingCommas = true,
IgnoreNullValues = true,
PropertyNameCaseInsensitive = true,
DictionaryKeyPolicy = JsonNamingPolicy.CamelCase,
PropertyNamingPolicy = JsonNamingPolicy.CamelCase,
WriteIndented = true
};
serializerSettings.Converters.Add(new JsonStringEnumConverter(JsonNamingPolicy.CamelCase, true));
serializerSettings.Converters.Add(new TimeSpanConverter());
serializerSettings.Converters.Add(new UtcConverter());
_serializerSettings = serializerSettings;
}
protected override List<TProviderDefinition> Query(SqlBuilder builder)
{
var type = typeof(TProviderDefinition);
var sql = builder.Select(type).AddSelectTemplate(type);
var results = new List<TProviderDefinition>();
using (var conn = _database.OpenConnection())
using (var reader = conn.ExecuteReader(sql.RawSql, sql.Parameters))
{
var parser = reader.GetRowParser<TProviderDefinition>(typeof(TProviderDefinition));
var settingsIndex = reader.GetOrdinal(nameof(ProviderDefinition.Settings));
while (reader.Read())
{
var body = reader.IsDBNull(settingsIndex) ? null : reader.GetString(settingsIndex);
var item = parser(reader);
var impType = typeof(IProviderConfig).Assembly.FindTypeByName(item.ConfigContract);
if (body.IsNullOrWhiteSpace())
{
item.Settings = NullConfig.Instance;
}
else
{
item.Settings = (IProviderConfig)JsonSerializer.Deserialize(body, impType, _serializerSettings);
}
results.Add(item);
}
}
return results;
}
}
}
| {
"pile_set_name": "Github"
} |
{
"name": "guzzle/service",
"description": "Guzzle service component for abstracting RESTful web services",
"homepage": "http://guzzlephp.org/",
"keywords": ["web service", "webservice", "REST", "guzzle"],
"license": "MIT",
"authors": [
{
"name": "Michael Dowling",
"email": "[email protected]",
"homepage": "https://github.com/mtdowling"
}
],
"require": {
"php": ">=5.3.2",
"guzzle/cache": "self.version",
"guzzle/http": "self.version",
"guzzle/inflection": "self.version"
},
"autoload": {
"psr-0": { "Guzzle\\Service": "" }
},
"target-dir": "Guzzle/Service",
"extra": {
"branch-alias": {
"dev-master": "3.7-dev"
}
}
}
| {
"pile_set_name": "Github"
} |
<?xml version="1.0" encoding="utf-8"?>
<Project ToolsVersion="12.0" DefaultTargets="Build" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<Import Project="$(MSBuildExtensionsPath)\$(MSBuildToolsVersion)\Microsoft.Common.props" Condition="Exists('$(MSBuildExtensionsPath)\$(MSBuildToolsVersion)\Microsoft.Common.props')" />
<PropertyGroup>
<Configuration Condition=" '$(Configuration)' == '' ">Debug</Configuration>
<Platform Condition=" '$(Platform)' == '' ">AnyCPU</Platform>
<SchemaVersion>2.0</SchemaVersion>
<ProjectGuid>331e2b2c-eee0-460d-a28c-82f93c58237e</ProjectGuid>
<OutputType>Exe</OutputType>
<RootNamespace>FsAdvent</RootNamespace>
<AssemblyName>StarWars</AssemblyName>
<TargetFrameworkVersion>v4.5</TargetFrameworkVersion>
<AutoGenerateBindingRedirects>true</AutoGenerateBindingRedirects>
<TargetFSharpCoreVersion>4.4.0.0</TargetFSharpCoreVersion>
<Name>FsAdvent</Name>
<TargetFrameworkProfile />
</PropertyGroup>
<PropertyGroup Condition=" '$(Configuration)|$(Platform)' == 'Debug|AnyCPU' ">
<DebugSymbols>true</DebugSymbols>
<DebugType>full</DebugType>
<Optimize>false</Optimize>
<Tailcalls>false</Tailcalls>
<OutputPath>bin\Debug\</OutputPath>
<DefineConstants>DEBUG;TRACE</DefineConstants>
<WarningLevel>3</WarningLevel>
<PlatformTarget>AnyCPU</PlatformTarget>
<DocumentationFile>bin\Debug\FsAdvent.XML</DocumentationFile>
<Prefer32Bit>true</Prefer32Bit>
</PropertyGroup>
<PropertyGroup Condition=" '$(Configuration)|$(Platform)' == 'Release|AnyCPU' ">
<DebugType>pdbonly</DebugType>
<Optimize>true</Optimize>
<Tailcalls>true</Tailcalls>
<OutputPath>bin\Release\</OutputPath>
<DefineConstants>TRACE</DefineConstants>
<WarningLevel>3</WarningLevel>
<PlatformTarget>AnyCPU</PlatformTarget>
<DocumentationFile>bin\Release\FsAdvent.XML</DocumentationFile>
<Prefer32Bit>true</Prefer32Bit>
</PropertyGroup>
<PropertyGroup>
<MinimumVisualStudioVersion Condition="'$(MinimumVisualStudioVersion)' == ''">11</MinimumVisualStudioVersion>
</PropertyGroup>
<Choose>
<When Condition="'$(VisualStudioVersion)' == '11.0'">
<PropertyGroup Condition="Exists('$(MSBuildExtensionsPath32)\..\Microsoft SDKs\F#\3.0\Framework\v4.0\Microsoft.FSharp.Targets')">
<FSharpTargetsPath>$(MSBuildExtensionsPath32)\..\Microsoft SDKs\F#\3.0\Framework\v4.0\Microsoft.FSharp.Targets</FSharpTargetsPath>
</PropertyGroup>
</When>
<Otherwise>
<PropertyGroup Condition="Exists('$(MSBuildExtensionsPath32)\Microsoft\VisualStudio\v$(VisualStudioVersion)\FSharp\Microsoft.FSharp.Targets')">
<FSharpTargetsPath>$(MSBuildExtensionsPath32)\Microsoft\VisualStudio\v$(VisualStudioVersion)\FSharp\Microsoft.FSharp.Targets</FSharpTargetsPath>
</PropertyGroup>
</Otherwise>
</Choose>
<Import Project="$(FSharpTargetsPath)" />
<!-- To modify your build process, add your task inside one of the targets below and uncomment it.
Other similar extension points exist, see Microsoft.Common.targets.
<Target Name="BeforeBuild">
</Target>
<Target Name="AfterBuild">
</Target>
-->
<ItemGroup>
<Content Include="App.config">
<CopyToOutputDirectory>Always</CopyToOutputDirectory>
</Content>
<Content Include="Google.DataTable.Net.Wrapper.XML">
<Paket>True</Paket>
</Content>
<Compile Include="parseScripts.fs" />
<Compile Include="1_getNames.fsx" />
<None Include="2_getMentions.fsx" />
<None Include="3_getInteractions.fsx" />
<None Include="4_timelines.fsx" />
<None Include="5_centrality.fsx" />
</ItemGroup>
<ItemGroup>
<Reference Include="mscorlib" />
<Reference Include="FSharp.Core, Version=$(TargetFSharpCoreVersion), Culture=neutral, PublicKeyToken=b03f5f7f11d50a3a">
<Private>True</Private>
</Reference>
<Reference Include="System" />
<Reference Include="System.Core" />
<Reference Include="System.Numerics" />
<Reference Include="System.Drawing" />
<Reference Include="System.Windows.Forms" />
</ItemGroup>
<Choose>
<When Condition="$(TargetFrameworkIdentifier) == '.NETFramework' And ($(TargetFrameworkVersion) == 'v4.0' Or $(TargetFrameworkVersion) == 'v4.5' Or $(TargetFrameworkVersion) == 'v4.5.1' Or $(TargetFrameworkVersion) == 'v4.5.2' Or $(TargetFrameworkVersion) == 'v4.5.3' Or $(TargetFrameworkVersion) == 'v4.6')">
<ItemGroup>
<Reference Include="Deedle">
<HintPath>packages\Deedle\lib\net40\Deedle.dll</HintPath>
<Private>True</Private>
<Paket>True</Paket>
</Reference>
</ItemGroup>
</When>
</Choose>
<Choose>
<When Condition="$(TargetFrameworkIdentifier) == '.NETFramework' And ($(TargetFrameworkVersion) == 'v4.0' Or $(TargetFrameworkVersion) == 'v4.5' Or $(TargetFrameworkVersion) == 'v4.5.1' Or $(TargetFrameworkVersion) == 'v4.5.2' Or $(TargetFrameworkVersion) == 'v4.5.3' Or $(TargetFrameworkVersion) == 'v4.6')">
<ItemGroup>
<Reference Include="Deedle.RProvider.Plugin">
<HintPath>packages\Deedle.RPlugin\lib\net40\Deedle.RProvider.Plugin.dll</HintPath>
<Private>True</Private>
<Paket>True</Paket>
</Reference>
</ItemGroup>
</When>
</Choose>
<Choose>
<When Condition="$(TargetFrameworkIdentifier) == '.NETFramework' And ($(TargetFrameworkVersion) == 'v4.0' Or $(TargetFrameworkVersion) == 'v4.5' Or $(TargetFrameworkVersion) == 'v4.5.1' Or $(TargetFrameworkVersion) == 'v4.5.2' Or $(TargetFrameworkVersion) == 'v4.5.3' Or $(TargetFrameworkVersion) == 'v4.6')">
<ItemGroup>
<Reference Include="DynamicInterop">
<HintPath>packages\DynamicInterop\lib\net40\DynamicInterop.dll</HintPath>
<Private>True</Private>
<Paket>True</Paket>
</Reference>
</ItemGroup>
</When>
</Choose>
<Choose>
<When Condition="$(TargetFrameworkIdentifier) == '.NETFramework' And ($(TargetFrameworkVersion) == 'v4.0' Or $(TargetFrameworkVersion) == 'v4.5' Or $(TargetFrameworkVersion) == 'v4.5.1' Or $(TargetFrameworkVersion) == 'v4.5.2' Or $(TargetFrameworkVersion) == 'v4.5.3' Or $(TargetFrameworkVersion) == 'v4.6')">
<ItemGroup>
<Reference Include="Foogle.Charts">
<HintPath>packages\Foogle.Charts\lib\net40\Foogle.Charts.dll</HintPath>
<Private>True</Private>
<Paket>True</Paket>
</Reference>
</ItemGroup>
</When>
</Choose>
<Choose>
<When Condition="$(TargetFrameworkIdentifier) == '.NETFramework' And ($(TargetFrameworkVersion) == 'v4.0' Or $(TargetFrameworkVersion) == 'v4.5' Or $(TargetFrameworkVersion) == 'v4.5.1' Or $(TargetFrameworkVersion) == 'v4.5.2' Or $(TargetFrameworkVersion) == 'v4.5.3' Or $(TargetFrameworkVersion) == 'v4.6')">
<ItemGroup>
<Reference Include="FSharp.Charting">
<HintPath>packages\FSharp.Charting\lib\net40\FSharp.Charting.dll</HintPath>
<Private>True</Private>
<Paket>True</Paket>
</Reference>
</ItemGroup>
</When>
</Choose>
<Choose>
<When Condition="$(TargetFrameworkIdentifier) == '.NETFramework' And ($(TargetFrameworkVersion) == 'v4.0' Or $(TargetFrameworkVersion) == 'v4.5' Or $(TargetFrameworkVersion) == 'v4.5.1' Or $(TargetFrameworkVersion) == 'v4.5.2' Or $(TargetFrameworkVersion) == 'v4.5.3' Or $(TargetFrameworkVersion) == 'v4.6')">
<ItemGroup>
<Reference Include="FSharp.Data">
<HintPath>packages\FSharp.Data\lib\net40\FSharp.Data.dll</HintPath>
<Private>True</Private>
<Paket>True</Paket>
</Reference>
<Reference Include="System.Xml.Linq">
<Paket>True</Paket>
</Reference>
</ItemGroup>
</When>
<When Condition="($(TargetFrameworkIdentifier) == '.NETCore') Or ($(TargetFrameworkIdentifier) == 'Silverlight' And $(TargetFrameworkVersion) == 'v5.0') Or ($(TargetFrameworkIdentifier) == 'WindowsPhone' And ($(TargetFrameworkVersion) == 'v8.0' Or $(TargetFrameworkVersion) == 'v8.1')) Or ($(TargetFrameworkIdentifier) == 'MonoAndroid') Or ($(TargetFrameworkIdentifier) == 'MonoTouch') Or ($(TargetFrameworkIdentifier) == 'Xamarin.iOS') Or ($(TargetFrameworkIdentifier) == 'Xamarin.Mac') Or ($(TargetFrameworkProfile) == 'Profile5') Or ($(TargetFrameworkProfile) == 'Profile6') Or ($(TargetFrameworkProfile) == 'Profile7') Or ($(TargetFrameworkProfile) == 'Profile14') Or ($(TargetFrameworkProfile) == 'Profile19') Or ($(TargetFrameworkProfile) == 'Profile24') Or ($(TargetFrameworkProfile) == 'Profile31') Or ($(TargetFrameworkProfile) == 'Profile37') Or ($(TargetFrameworkProfile) == 'Profile42') Or ($(TargetFrameworkProfile) == 'Profile44') Or ($(TargetFrameworkProfile) == 'Profile47') Or ($(TargetFrameworkProfile) == 'Profile49') Or ($(TargetFrameworkProfile) == 'Profile78') Or ($(TargetFrameworkProfile) == 'Profile147') Or ($(TargetFrameworkProfile) == 'Profile158')">
<ItemGroup>
<Reference Include="FSharp.Data">
<HintPath>packages\FSharp.Data\lib\portable-net40+sl5+wp8+win8\FSharp.Data.dll</HintPath>
<Private>True</Private>
<Paket>True</Paket>
</Reference>
</ItemGroup>
</When>
</Choose>
<ItemGroup>
<Reference Include="Google.DataTable.Net.Wrapper">
<HintPath>packages\Google.DataTable.Net.Wrapper\lib\Google.DataTable.Net.Wrapper.dll</HintPath>
<Private>True</Private>
<Paket>True</Paket>
</Reference>
</ItemGroup>
<Choose>
<When Condition="$(TargetFrameworkIdentifier) == '.NETFramework' And ($(TargetFrameworkVersion) == 'v4.0' Or $(TargetFrameworkVersion) == 'v4.5' Or $(TargetFrameworkVersion) == 'v4.5.1' Or $(TargetFrameworkVersion) == 'v4.5.2' Or $(TargetFrameworkVersion) == 'v4.5.3' Or $(TargetFrameworkVersion) == 'v4.6')">
<ItemGroup>
<Reference Include="HttpClient">
<HintPath>packages\Http.fs\lib\net40\HttpClient.dll</HintPath>
<Private>True</Private>
<Paket>True</Paket>
</Reference>
</ItemGroup>
</When>
</Choose>
<Choose>
<When Condition="$(TargetFrameworkIdentifier) == '.NETFramework' And $(TargetFrameworkVersion) == 'v3.5'">
<ItemGroup>
<Reference Include="MathNet.Numerics">
<HintPath>packages\MathNet.Numerics\lib\net35\MathNet.Numerics.dll</HintPath>
<Private>True</Private>
<Paket>True</Paket>
</Reference>
</ItemGroup>
</When>
<When Condition="$(TargetFrameworkIdentifier) == '.NETFramework' And ($(TargetFrameworkVersion) == 'v4.0' Or $(TargetFrameworkVersion) == 'v4.5' Or $(TargetFrameworkVersion) == 'v4.5.1' Or $(TargetFrameworkVersion) == 'v4.5.2' Or $(TargetFrameworkVersion) == 'v4.5.3' Or $(TargetFrameworkVersion) == 'v4.6')">
<ItemGroup>
<Reference Include="MathNet.Numerics">
<HintPath>packages\MathNet.Numerics\lib\net40\MathNet.Numerics.dll</HintPath>
<Private>True</Private>
<Paket>True</Paket>
</Reference>
</ItemGroup>
</When>
<When Condition="($(TargetFrameworkIdentifier) == '.NETCore') Or ($(TargetFrameworkIdentifier) == 'MonoAndroid') Or ($(TargetFrameworkIdentifier) == 'MonoTouch') Or ($(TargetFrameworkIdentifier) == 'Xamarin.iOS') Or ($(TargetFrameworkIdentifier) == 'Xamarin.Mac') Or ($(TargetFrameworkProfile) == 'Profile7') Or ($(TargetFrameworkProfile) == 'Profile44')">
<ItemGroup>
<Reference Include="MathNet.Numerics">
<HintPath>packages\MathNet.Numerics\lib\portable-net45+netcore45+MonoAndroid1+MonoTouch1\MathNet.Numerics.dll</HintPath>
<Private>True</Private>
<Paket>True</Paket>
</Reference>
</ItemGroup>
</When>
<When Condition="($(TargetFrameworkIdentifier) == 'Silverlight' And $(TargetFrameworkVersion) == 'v5.0') Or ($(TargetFrameworkProfile) == 'Profile24') Or ($(TargetFrameworkProfile) == 'Profile47')">
<ItemGroup>
<Reference Include="MathNet.Numerics">
<HintPath>packages\MathNet.Numerics\lib\portable-net45+sl5+netcore45+MonoAndroid1+MonoTouch1\MathNet.Numerics.dll</HintPath>
<Private>True</Private>
<Paket>True</Paket>
</Reference>
</ItemGroup>
</When>
<When Condition="($(TargetFrameworkIdentifier) == 'WindowsPhone' And ($(TargetFrameworkVersion) == 'v8.0' Or $(TargetFrameworkVersion) == 'v8.1')) Or ($(TargetFrameworkProfile) == 'Profile31') Or ($(TargetFrameworkProfile) == 'Profile49') Or ($(TargetFrameworkProfile) == 'Profile78')">
<ItemGroup>
<Reference Include="MathNet.Numerics">
<HintPath>packages\MathNet.Numerics\lib\portable-net45+netcore45+wp8+MonoAndroid1+MonoTouch1\MathNet.Numerics.dll</HintPath>
<Private>True</Private>
<Paket>True</Paket>
</Reference>
</ItemGroup>
</When>
<When Condition="($(TargetFrameworkIdentifier) == 'WindowsPhoneApp') Or ($(TargetFrameworkProfile) == 'Profile32') Or ($(TargetFrameworkProfile) == 'Profile84') Or ($(TargetFrameworkProfile) == 'Profile111') Or ($(TargetFrameworkProfile) == 'Profile151') Or ($(TargetFrameworkProfile) == 'Profile157') Or ($(TargetFrameworkProfile) == 'Profile259')">
<ItemGroup>
<Reference Include="MathNet.Numerics">
<HintPath>packages\MathNet.Numerics\lib\portable-net45+netcore45+wpa81+wp8+MonoAndroid1+MonoTouch1\MathNet.Numerics.dll</HintPath>
<Private>True</Private>
<Paket>True</Paket>
</Reference>
</ItemGroup>
</When>
<When Condition="($(TargetFrameworkProfile) == 'Profile5') Or ($(TargetFrameworkProfile) == 'Profile6') Or ($(TargetFrameworkProfile) == 'Profile14') Or ($(TargetFrameworkProfile) == 'Profile19') Or ($(TargetFrameworkProfile) == 'Profile37') Or ($(TargetFrameworkProfile) == 'Profile42') Or ($(TargetFrameworkProfile) == 'Profile92') Or ($(TargetFrameworkProfile) == 'Profile102') Or ($(TargetFrameworkProfile) == 'Profile136') Or ($(TargetFrameworkProfile) == 'Profile147') Or ($(TargetFrameworkProfile) == 'Profile158') Or ($(TargetFrameworkProfile) == 'Profile225') Or ($(TargetFrameworkProfile) == 'Profile240') Or ($(TargetFrameworkProfile) == 'Profile255') Or ($(TargetFrameworkProfile) == 'Profile328') Or ($(TargetFrameworkProfile) == 'Profile336') Or ($(TargetFrameworkProfile) == 'Profile344')">
<ItemGroup>
<Reference Include="MathNet.Numerics">
<HintPath>packages\MathNet.Numerics\lib\portable-net4+sl5+netcore45+wpa81+wp8+MonoAndroid1+MonoTouch1\MathNet.Numerics.dll</HintPath>
<Private>True</Private>
<Paket>True</Paket>
</Reference>
</ItemGroup>
</When>
</Choose>
<Choose>
<When Condition="$(TargetFrameworkIdentifier) == '.NETFramework' And $(TargetFrameworkVersion) == 'v3.5'">
<ItemGroup>
<Reference Include="MathNet.Numerics.FSharp">
<HintPath>packages\MathNet.Numerics.FSharp\lib\net35\MathNet.Numerics.FSharp.dll</HintPath>
<Private>True</Private>
<Paket>True</Paket>
</Reference>
</ItemGroup>
</When>
<When Condition="$(TargetFrameworkIdentifier) == '.NETFramework' And ($(TargetFrameworkVersion) == 'v4.0' Or $(TargetFrameworkVersion) == 'v4.5' Or $(TargetFrameworkVersion) == 'v4.5.1' Or $(TargetFrameworkVersion) == 'v4.5.2' Or $(TargetFrameworkVersion) == 'v4.5.3' Or $(TargetFrameworkVersion) == 'v4.6')">
<ItemGroup>
<Reference Include="MathNet.Numerics.FSharp">
<HintPath>packages\MathNet.Numerics.FSharp\lib\net40\MathNet.Numerics.FSharp.dll</HintPath>
<Private>True</Private>
<Paket>True</Paket>
</Reference>
</ItemGroup>
</When>
<When Condition="($(TargetFrameworkIdentifier) == '.NETCore') Or ($(TargetFrameworkIdentifier) == 'Silverlight' And $(TargetFrameworkVersion) == 'v5.0') Or ($(TargetFrameworkIdentifier) == 'MonoAndroid') Or ($(TargetFrameworkIdentifier) == 'MonoTouch') Or ($(TargetFrameworkIdentifier) == 'Xamarin.iOS') Or ($(TargetFrameworkIdentifier) == 'Xamarin.Mac') Or ($(TargetFrameworkProfile) == 'Profile7') Or ($(TargetFrameworkProfile) == 'Profile24') Or ($(TargetFrameworkProfile) == 'Profile44') Or ($(TargetFrameworkProfile) == 'Profile47')">
<ItemGroup>
<Reference Include="MathNet.Numerics.FSharp">
<HintPath>packages\MathNet.Numerics.FSharp\lib\portable-net45+sl5+netcore45+MonoAndroid1+MonoTouch1\MathNet.Numerics.FSharp.dll</HintPath>
<Private>True</Private>
<Paket>True</Paket>
</Reference>
</ItemGroup>
</When>
</Choose>
<Choose>
<When Condition="$(TargetFrameworkIdentifier) == '.NETFramework' And $(TargetFrameworkVersion) == 'v3.5'">
<ItemGroup>
<Reference Include="Newtonsoft.Json">
<HintPath>packages\Newtonsoft.Json\lib\net35\Newtonsoft.Json.dll</HintPath>
<Private>True</Private>
<Paket>True</Paket>
</Reference>
</ItemGroup>
</When>
<When Condition="$(TargetFrameworkIdentifier) == '.NETFramework' And ($(TargetFrameworkVersion) == 'v2.0' Or $(TargetFrameworkVersion) == 'v3.0')">
<ItemGroup>
<Reference Include="Newtonsoft.Json">
<HintPath>packages\Newtonsoft.Json\lib\net20\Newtonsoft.Json.dll</HintPath>
<Private>True</Private>
<Paket>True</Paket>
</Reference>
</ItemGroup>
</When>
<When Condition="$(TargetFrameworkIdentifier) == '.NETFramework' And ($(TargetFrameworkVersion) == 'v4.0')">
<ItemGroup>
<Reference Include="Newtonsoft.Json">
<HintPath>packages\Newtonsoft.Json\lib\net40\Newtonsoft.Json.dll</HintPath>
<Private>True</Private>
<Paket>True</Paket>
</Reference>
</ItemGroup>
</When>
<When Condition="$(TargetFrameworkIdentifier) == '.NETFramework' And ($(TargetFrameworkVersion) == 'v4.5' Or $(TargetFrameworkVersion) == 'v4.5.1' Or $(TargetFrameworkVersion) == 'v4.5.2' Or $(TargetFrameworkVersion) == 'v4.5.3' Or $(TargetFrameworkVersion) == 'v4.6')">
<ItemGroup>
<Reference Include="Newtonsoft.Json">
<HintPath>packages\Newtonsoft.Json\lib\net45\Newtonsoft.Json.dll</HintPath>
<Private>True</Private>
<Paket>True</Paket>
</Reference>
</ItemGroup>
</When>
<When Condition="($(TargetFrameworkIdentifier) == 'MonoAndroid') Or ($(TargetFrameworkIdentifier) == 'MonoTouch') Or ($(TargetFrameworkIdentifier) == 'Xamarin.iOS') Or ($(TargetFrameworkIdentifier) == 'Xamarin.Mac') Or ($(TargetFrameworkProfile) == 'Profile7') Or ($(TargetFrameworkProfile) == 'Profile44') Or ($(TargetFrameworkProfile) == 'Profile49') Or ($(TargetFrameworkProfile) == 'Profile78') Or ($(TargetFrameworkProfile) == 'Profile111') Or ($(TargetFrameworkProfile) == 'Profile151') Or ($(TargetFrameworkProfile) == 'Profile259')">
<ItemGroup>
<Reference Include="Newtonsoft.Json">
<HintPath>packages\Newtonsoft.Json\lib\portable-net45+wp80+win8+wpa81+dnxcore50\Newtonsoft.Json.dll</HintPath>
<Private>True</Private>
<Paket>True</Paket>
</Reference>
</ItemGroup>
</When>
<When Condition="($(TargetFrameworkIdentifier) == 'WindowsPhoneApp') Or ($(TargetFrameworkIdentifier) == '.NETCore') Or ($(TargetFrameworkIdentifier) == 'Silverlight' And $(TargetFrameworkVersion) == 'v5.0') Or ($(TargetFrameworkIdentifier) == 'WindowsPhone' And ($(TargetFrameworkVersion) == 'v8.0' Or $(TargetFrameworkVersion) == 'v8.1')) Or ($(TargetFrameworkProfile) == 'Profile5') Or ($(TargetFrameworkProfile) == 'Profile6') Or ($(TargetFrameworkProfile) == 'Profile14') Or ($(TargetFrameworkProfile) == 'Profile19') Or ($(TargetFrameworkProfile) == 'Profile24') Or ($(TargetFrameworkProfile) == 'Profile31') Or ($(TargetFrameworkProfile) == 'Profile32') Or ($(TargetFrameworkProfile) == 'Profile37') Or ($(TargetFrameworkProfile) == 'Profile42') Or ($(TargetFrameworkProfile) == 'Profile47') Or ($(TargetFrameworkProfile) == 'Profile84') Or ($(TargetFrameworkProfile) == 'Profile92') Or ($(TargetFrameworkProfile) == 'Profile102') Or ($(TargetFrameworkProfile) == 'Profile136') Or ($(TargetFrameworkProfile) == 'Profile147') Or ($(TargetFrameworkProfile) == 'Profile157') Or ($(TargetFrameworkProfile) == 'Profile158') Or ($(TargetFrameworkProfile) == 'Profile225') Or ($(TargetFrameworkProfile) == 'Profile240') Or ($(TargetFrameworkProfile) == 'Profile255') Or ($(TargetFrameworkProfile) == 'Profile328') Or ($(TargetFrameworkProfile) == 'Profile336') Or ($(TargetFrameworkProfile) == 'Profile344')">
<ItemGroup>
<Reference Include="Newtonsoft.Json">
<HintPath>packages\Newtonsoft.Json\lib\portable-net40+sl5+wp80+win8+wpa81\Newtonsoft.Json.dll</HintPath>
<Private>True</Private>
<Paket>True</Paket>
</Reference>
</ItemGroup>
</When>
</Choose>
<Choose>
<When Condition="$(TargetFrameworkIdentifier) == '.NETFramework' And ($(TargetFrameworkVersion) == 'v4.0' Or $(TargetFrameworkVersion) == 'v4.5' Or $(TargetFrameworkVersion) == 'v4.5.1' Or $(TargetFrameworkVersion) == 'v4.5.2' Or $(TargetFrameworkVersion) == 'v4.5.3' Or $(TargetFrameworkVersion) == 'v4.6')">
<ItemGroup>
<Reference Include="RDotNet.NativeLibrary">
<HintPath>packages\R.NET.Community\lib\net40\RDotNet.NativeLibrary.dll</HintPath>
<Private>True</Private>
<Paket>True</Paket>
</Reference>
<Reference Include="RDotNet">
<HintPath>packages\R.NET.Community\lib\net40\RDotNet.dll</HintPath>
<Private>True</Private>
<Paket>True</Paket>
</Reference>
</ItemGroup>
</When>
</Choose>
<Choose>
<When Condition="$(TargetFrameworkIdentifier) == '.NETFramework' And ($(TargetFrameworkVersion) == 'v4.0' Or $(TargetFrameworkVersion) == 'v4.5' Or $(TargetFrameworkVersion) == 'v4.5.1' Or $(TargetFrameworkVersion) == 'v4.5.2' Or $(TargetFrameworkVersion) == 'v4.5.3' Or $(TargetFrameworkVersion) == 'v4.6')">
<ItemGroup>
<Reference Include="RDotNet.FSharp">
<HintPath>packages\R.NET.Community.FSharp\lib\net40\RDotNet.FSharp.dll</HintPath>
<Private>True</Private>
<Paket>True</Paket>
</Reference>
</ItemGroup>
</When>
</Choose>
<Choose>
<When Condition="$(TargetFrameworkIdentifier) == '.NETFramework' And ($(TargetFrameworkVersion) == 'v4.0' Or $(TargetFrameworkVersion) == 'v4.5' Or $(TargetFrameworkVersion) == 'v4.5.1' Or $(TargetFrameworkVersion) == 'v4.5.2' Or $(TargetFrameworkVersion) == 'v4.5.3' Or $(TargetFrameworkVersion) == 'v4.6')">
<ItemGroup>
<Reference Include="RProvider.Runtime">
<HintPath>packages\RProvider\lib\net40\RProvider.Runtime.dll</HintPath>
<Private>True</Private>
<Paket>True</Paket>
</Reference>
<Reference Include="RProvider">
<HintPath>packages\RProvider\lib\net40\RProvider.dll</HintPath>
<Private>True</Private>
<Paket>True</Paket>
</Reference>
</ItemGroup>
</When>
</Choose>
<Choose>
<When Condition="$(TargetFrameworkIdentifier) == '.NETFramework' And ($(TargetFrameworkVersion) == 'v4.0' Or $(TargetFrameworkVersion) == 'v4.5' Or $(TargetFrameworkVersion) == 'v4.5.1' Or $(TargetFrameworkVersion) == 'v4.5.2' Or $(TargetFrameworkVersion) == 'v4.5.3' Or $(TargetFrameworkVersion) == 'v4.6')">
<ItemGroup>
<Reference Include="Suave">
<HintPath>packages\Suave\lib\net40\Suave.dll</HintPath>
<Private>True</Private>
<Paket>True</Paket>
</Reference>
</ItemGroup>
</When>
</Choose>
<Choose>
<When Condition="$(TargetFrameworkIdentifier) == '.NETFramework' And $(TargetFrameworkVersion) == 'v3.5'">
<ItemGroup>
<Reference Include="System.Threading">
<HintPath>packages\TaskParallelLibrary\lib\Net35\System.Threading.dll</HintPath>
<Private>True</Private>
<Paket>True</Paket>
</Reference>
</ItemGroup>
</When>
</Choose>
<Choose>
<When Condition="$(TargetFrameworkIdentifier) == '.NETFramework' And ($(TargetFrameworkVersion) == 'v4.5' Or $(TargetFrameworkVersion) == 'v4.5.1' Or $(TargetFrameworkVersion) == 'v4.5.2' Or $(TargetFrameworkVersion) == 'v4.5.3' Or $(TargetFrameworkVersion) == 'v4.6')">
<ItemGroup>
<Reference Include="XPlot.GoogleCharts">
<HintPath>packages\XPlot.GoogleCharts\lib\net45\XPlot.GoogleCharts.dll</HintPath>
<Private>True</Private>
<Paket>True</Paket>
</Reference>
</ItemGroup>
</When>
</Choose>
<Choose>
<When Condition="$(TargetFrameworkIdentifier) == '.NETFramework' And ($(TargetFrameworkVersion) == 'v4.5' Or $(TargetFrameworkVersion) == 'v4.5.1' Or $(TargetFrameworkVersion) == 'v4.5.2' Or $(TargetFrameworkVersion) == 'v4.5.3' Or $(TargetFrameworkVersion) == 'v4.6')">
<ItemGroup>
<Reference Include="XPlot.GoogleCharts.Deedle">
<HintPath>packages\XPlot.GoogleCharts.Deedle\lib\net45\XPlot.GoogleCharts.Deedle.dll</HintPath>
<Private>True</Private>
<Paket>True</Paket>
</Reference>
</ItemGroup>
</When>
</Choose>
<Choose>
<When Condition="$(TargetFrameworkIdentifier) == '.NETFramework' And ($(TargetFrameworkVersion) == 'v4.5' Or $(TargetFrameworkVersion) == 'v4.5.1' Or $(TargetFrameworkVersion) == 'v4.5.2' Or $(TargetFrameworkVersion) == 'v4.5.3' Or $(TargetFrameworkVersion) == 'v4.6')">
<ItemGroup>
<Reference Include="XPlot.Plotly">
<HintPath>packages\XPlot.Plotly\lib\net45\XPlot.Plotly.dll</HintPath>
<Private>True</Private>
<Paket>True</Paket>
</Reference>
</ItemGroup>
</When>
</Choose>
<Choose>
<When Condition="($(TargetFrameworkProfile) == 'Profile5') Or ($(TargetFrameworkProfile) == 'Profile6') Or ($(TargetFrameworkProfile) == 'Profile7') Or ($(TargetFrameworkProfile) == 'Profile14') Or ($(TargetFrameworkProfile) == 'Profile19') Or ($(TargetFrameworkProfile) == 'Profile24') Or ($(TargetFrameworkProfile) == 'Profile31') Or ($(TargetFrameworkProfile) == 'Profile32') Or ($(TargetFrameworkProfile) == 'Profile37') Or ($(TargetFrameworkProfile) == 'Profile42') Or ($(TargetFrameworkProfile) == 'Profile44') Or ($(TargetFrameworkProfile) == 'Profile47') Or ($(TargetFrameworkProfile) == 'Profile49') Or ($(TargetFrameworkProfile) == 'Profile78') Or ($(TargetFrameworkProfile) == 'Profile84') Or ($(TargetFrameworkProfile) == 'Profile92') Or ($(TargetFrameworkProfile) == 'Profile102') Or ($(TargetFrameworkProfile) == 'Profile111') Or ($(TargetFrameworkProfile) == 'Profile136') Or ($(TargetFrameworkProfile) == 'Profile147') Or ($(TargetFrameworkProfile) == 'Profile151') Or ($(TargetFrameworkProfile) == 'Profile157') Or ($(TargetFrameworkProfile) == 'Profile158') Or ($(TargetFrameworkProfile) == 'Profile225') Or ($(TargetFrameworkProfile) == 'Profile240') Or ($(TargetFrameworkProfile) == 'Profile255') Or ($(TargetFrameworkProfile) == 'Profile259') Or ($(TargetFrameworkProfile) == 'Profile328') Or ($(TargetFrameworkProfile) == 'Profile336') Or ($(TargetFrameworkProfile) == 'Profile344')">
<ItemGroup>
<Reference Include="Zlib.Portable">
<HintPath>packages\Zlib.Portable\lib\portable-net4+sl5+wp8+win8+wpa81+MonoTouch+MonoAndroid\Zlib.Portable.dll</HintPath>
<Private>True</Private>
<Paket>True</Paket>
</Reference>
</ItemGroup>
</When>
</Choose>
</Project> | {
"pile_set_name": "Github"
} |
package org.test;
import loon.Session;
import loon.Stage;
public class SessionTest extends Stage {
@Override
public void create() {
Session session = Session.load("session_test");
int count = session.getInt("count", 0);
addLabel("你是第" + count + "次访问此Screen", 66, 66);
session.set("count", count += 1);
session.save();
add(MultiScreenTest.getBackButton(this,0));
}
}
| {
"pile_set_name": "Github"
} |
; BEGIN ISPPBUILTINS.ISS
//
// Inno Setup Preprocessor 5
//
// Copyright (C) 2001-2004 Alex Yackimoff. All Rights Reserved.
// Portions by Martijn Laan.
// http://ispp.sourceforge.net
//
// Inno Setup (C) 1997-2009 Jordan Russell. All Rights Reserved.
// Portions by Martijn Laan.
//
// $Id: ISPPBuiltins.iss,v 1.3 2010/12/29 15:20:26 mlaan Exp $
//
#if defined(ISPP_INVOKED) && !defined(_BUILTINS_ISS_)
//
#if PREPROCVER < 0x01000000
# error Inno Setup Preprocessor version is outdated
#endif
//
#define _BUILTINS_ISS_
//
// ===========================================================================
//
// Default states for options.
//
//#pragma parseroption -b+ ; short circuit boolean evaluation: on
//#pragma parseroption -m- ; short circuit multiplication evaluation (0 * A will not eval A): off
//#pragma parseroption -p+ ; string literals without escape sequences: on
//#pragma parseroption -u- ; allow undeclared identifiers: off
//#pragma option -c+ ; pass script to the compiler: on
//#pragma option -e- ; emit empty lines to translation: off
//#pragma option -v- ; verbose mode: off
//
// ---------------------------------------------------------------------------
//
// Verbose levels:
// 0 - #include and #file acknowledgements
// 1 - information about any temp files created by #file
// 2 - #insert and #append acknowledgements
// 3 - reserved
// 4 - #dim, #define and #undef acknowledgements
// 5 - reserved
// 6 - conditional inclusion acknowledgements
// 7 - reserved
// 8 - show strings emitted with #emit directive
// 9 - macro and functions successfull call acknowledgements
//10 - Local macro array allocation acknowledgements
//
//#pragma verboselevel 0
//
#ifndef __POPT_P__
# define private CStrings
# pragma parseroption -p+
#endif
//
#pragma spansymbol "\"
//
#define True 1
#define False 0
#define Yes True
#define No False
//
#define MaxInt 0x7FFFFFFFL
#define MinInt 0x80000000L
//
#define NULL
#define void
//
// TypeOf constants
//
#define TYPE_ERROR 0
#define TYPE_NULL 1
#define TYPE_INTEGER 2
#define TYPE_STRING 3
#define TYPE_MACRO 4
#define TYPE_FUNC 5
#define TYPE_ARRAY 6
//
// Helper macro to find out the type of an array element or expression. TypeOf
// standard function only allows identifier as its parameter. Use this macro
// to convert an expression to identifier.
//
#define TypeOf2(any Expr) TypeOf(Expr)
//
// ReadReg constants
//
#define HKEY_CLASSES_ROOT 0x80000000UL
#define HKEY_CURRENT_USER 0x80000001UL
#define HKEY_LOCAL_MACHINE 0x80000002UL
#define HKEY_USERS 0x80000003UL
//
#define HKCR HKEY_CLASSES_ROOT
#define HKCU HKEY_CURRENT_USER
#define HKLM HKEY_LOCAL_MACHINE
#define HKU HKEY_USERS
//
// Exec constants
//
#define SW_HIDE 0
#define SW_SHOWNORMAL 1
#define SW_NORMAL 1
#define SW_SHOWMINIMIZED 2
#define SW_SHOWMAXIMIZED 3
#define SW_MAXIMIZE 3
#define SW_SHOWNOACTIVATE 4
#define SW_SHOW 5
#define SW_MINIMIZE 6
#define SW_SHOWMINNOACTIVE 7
#define SW_SHOWNA 8
#define SW_RESTORE 9
#define SW_SHOWDEFAULT 10
#define SW_MAX 10
//
// Find constants
//
#define FIND_MATCH 0x00
#define FIND_BEGINS 0x01
#define FIND_ENDS 0x02
#define FIND_CONTAINS 0x03
#define FIND_CASESENSITIVE 0x04
#define FIND_SENSITIVE FIND_CASESENSITIVE
#define FIND_AND 0x00
#define FIND_OR 0x08
#define FIND_NOT 0x10
#define FIND_TRIM 0x20
//
// FindFirst constants
//
#define faReadOnly 0x00000001
#define faHidden 0x00000002
#define faSysFile 0x00000004
#define faVolumeID 0x00000008
#define faDirectory 0x00000010
#define faArchive 0x00000020
#define faSymLink 0x00000040
#define faAnyFile 0x0000003F
//
// GetStringFileInfo standard names
//
#define COMPANY_NAME "CompanyName"
#define FILE_DESCRIPTION "FileDescription"
#define FILE_VERSION "FileVersion"
#define INTERNAL_NAME "InternalName"
#define LEGAL_COPYRIGHT "LegalCopyright"
#define ORIGINAL_FILENAME "OriginalFilename"
#define PRODUCT_NAME "ProductName"
#define PRODUCT_VERSION "ProductVersion"
//
// GetStringFileInfo helpers
//
#define GetFileCompany(str FileName) GetStringFileInfo(FileName, COMPANY_NAME)
#define GetFileCopyright(str FileName) GetStringFileInfo(FileName, LEGAL_COPYRIGHT)
#define GetFileDescription(str FileName) GetStringFileInfo(FileName, FILE_DESCRIPTION)
#define GetFileProductVersion(str FileName) GetStringFileInfo(FileName, PRODUCT_VERSION)
#define GetFileVersionString(str FileName) GetStringFileInfo(FileName, FILE_VERSION)
//
// ParseVersion
//
// Macro internally calls GetFileVersion function and parses string returned
// by that function (in form "0.0.0.0"). All four version elements are stored
// in by-reference parameters Major, Minor, Rev, and Build. Macro returns
// string returned by GetFileVersion.
//
#define DeleteToFirstPeriod(str *S) \
Local[1] = Copy(S, 1, (Local[0] = Pos(".", S)) - 1), \
S = Copy(S, Local[0] + 1), \
Local[1]
//
#define ParseVersion(str FileName, *Major, *Minor, *Rev, *Build) \
Local[1] = Local[0] = GetFileVersion(FileName), \
Local[1] == "" ? "" : ( \
Major = Int(DeleteToFirstPeriod(Local[1])), \
Minor = Int(DeleteToFirstPeriod(Local[1])), \
Rev = Int(DeleteToFirstPeriod(Local[1])), \
Build = Int(Local[1]), \
Local[0])
//
// EncodeVer
//
// Encodes given four version elements to a 32 bit integer number (8 bits for
// each element, i.e. elements must be within 0...255 range).
//
#define EncodeVer(int Major, int Minor, int Revision = 0, int Build = -1) \
Major << 24 | (Minor & 0xFF) << 16 | (Revision & 0xFF) << 8 | (Build >= 0 ? Build & 0xFF : 0)
//
// DecodeVer
//
// Decodes given 32 bit integer encoded version to its string representation,
// Digits parameter indicates how many elements to show (if the fourth element
// is 0, it won't be shown anyway).
//
#define DecodeVer(int Ver, int Digits = 3) \
Str(Ver >> 0x18 & 0xFF) + (Digits > 1 ? "." : "") + \
(Digits > 1 ? \
Str(Ver >> 0x10 & 0xFF) + (Digits > 2 ? "." : "") : "") + \
(Digits > 2 ? \
Str(Ver >> 0x08 & 0xFF) + (Digits > 3 && (Local = Ver & 0xFF) ? "." : "") : "") + \
(Digits > 3 && Local ? \
Str(Ver & 0xFF) : "")
//
// FindSection
//
// Returns index of the line following the header of the section. This macro
// is intended to be used with #insert directive.
//
#define FindSection(str Section = "Files") \
Find(0, "[" + Section + "]", FIND_MATCH | FIND_TRIM) + 1
//
// FindSectionEnd
//
// Returns index of the line following last entry of the section. This macro
// is intended to be used with #insert directive.
//
#if VER >= 0x03000000
# define FindNextSection(int Line) \
Find(Line, "[", FIND_BEGINS | FIND_TRIM, "]", FIND_ENDS | FIND_AND)
# define FindSectionEnd(str Section = "Files") \
FindNextSection(FindSection(Section))
#else
# define FindSectionEnd(str Section = "Files") \
FindSection(Section) + EntryCount(Section)
#endif
//
// FindCode
//
// Returns index of the line (of translation) following either [Code] section
// header, or "program" keyword, if any.
//
#define FindCode() \
Local[1] = FindSection("Code"), \
Local[0] = Find(Local[1] - 1, "program", FIND_BEGINS, ";", FIND_ENDS | FIND_AND), \
(Local[0] < 0 ? Local[1] : Local[0] + 1)
//
// ExtractFilePath
//
// Returns directory portion of the given filename without backslash (unless
// it is a root directory). If PathName doesn't contain directory portion,
// the result is an empty string.
//
#define ExtractFilePath(str PathName) \
(Local[0] = \
!(Local[1] = RPos("\", PathName)) ? \
"" : \
Copy(PathName, 1, Local[1] - 1)), \
Local[0] + \
((Local[2] = Len(Local[0])) == 2 && Copy(Local[0], Local[2]) == ":" ? \
"\" : \
"")
#define ExtractFileDir(str PathName) \
RemoveBackslash(ExtractFilePath(PathName))
#define ExtractFileExt(str PathName) \
Local[0] = RPos(".", PathName), \
Copy(PathName, Local[0] + 1)
//
// ExtractFileName
//
// Returns name portion of the given filename. If PathName ends with
// a backslash, the result is an empty string.
//
#define ExtractFileName(str PathName) \
!(Local[0] = RPos("\", PathName)) ? \
PathName : \
Copy(PathName, Local[0] + 1)
//
// ChangeFileExt
//
// Changes extension in FileName with NewExt. NewExt must not contain
// period.
//
#define ChangeFileExt(str FileName, str NewExt) \
!(Local[0] = RPos(".", FileName)) ? \
FileName + "." + NewExt : \
Copy(FileName, 1, Local[0]) + NewExt
//
// AddBackslash
//
// Adds a backslash to the string, if it's not already there.
//
#define AddBackslash(str S) \
Copy(S, Len(S)) == "\" ? S : S + "\"
//
// RemoveBackslash
//
// Removes trailing backslash from the string unless the string points to
// a root directory.
//
#define RemoveBackslash(str S) \
Local[0] = Len(S), \
Local[0] > 0 ? \
Copy(S, Local[0]) == "\" ? \
(Local[0] == 3 && Copy(S, 2, 1) == ":" ? \
S : \
Copy(S, 1, Local[0] - 1)) : \
S : \
""
//
// Delete
//
// Deletes specified number of characters beginning with Index from S. S is
// passed by reference (therefore is modified). Acts like Delete function in
// Delphi (from System unit).
//
#define Delete(str *S, int Index, int Count = MaxInt) \
S = Copy(S, 1, Index - 1) + Copy(S, Index + Count)
//
// Insert
//
// Inserts specified Substr at Index'th character into S. S is passed by
// reference (therefore is modified).
//
#define Insert(str *S, int Index, str Substr) \
Index > Len(S) + 1 ? \
S : \
S = Copy(S, 1, Index - 1) + SubStr + Copy(S, Index)
//
// YesNo, IsDirSet
//
// Returns nonzero value if given string is "yes", "true" or "1". Intended to
// be used with SetupSetting function. This macro replaces YesNo function
// available in previous releases.
//
#define YesNo(str S) \
(S = LowerCase(S)) == "yes" || S == "true" || S == "1"
//
#define IsDirSet(str SetupDirective) \
YesNo(SetupSetting(SetupDirective))
//
//
#define Power(int X, int P = 2) \
!P ? 1 : X * Power(X, P - 1)
//
#define Min(int A, int B, int C = MaxInt) \
A < B ? A < C ? Int(A) : Int(C) : Int(B)
//
#define Max(int A, int B, int C = MinInt) \
A > B ? A > C ? Int(A) : Int(C) : Int(B)
//
#ifdef CStrings
# pragma parseroption -p-
#endif
#endif
; END ISPPBUILTINS.ISS
| {
"pile_set_name": "Github"
} |
/* ----------------------------------------------------------------------------
* ATMEL Microcontroller Software Support
* ----------------------------------------------------------------------------
* Copyright (c) 2009, Atmel Corporation
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* - Redistributions of source code must retain the above copyright notice,
* this list of conditions and the disclaimer below.
*
* Atmel's name may not be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* DISCLAIMER: THIS SOFTWARE IS PROVIDED BY ATMEL "AS IS" AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT ARE
* DISCLAIMED. IN NO EVENT SHALL ATMEL BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
* OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
* EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
* ----------------------------------------------------------------------------
*/
/**
* \file
*
* Implementation of USART driver, transfer data through DMA.
*
*/
#ifndef _USART_DMA_H_
#define _USART_DMA_H_
/*----------------------------------------------------------------------------
* Headers
*----------------------------------------------------------------------------*/
#include "chip.h"
/*----------------------------------------------------------------------------
* Definitions
*----------------------------------------------------------------------------*/
/** An unspecified error has occured.*/
#define USARTD_ERROR 1
/** USART driver is currently in use.*/
#define USARTD_ERROR_LOCK 2
#ifdef __cplusplus
extern "C" {
#endif
/*----------------------------------------------------------------------------
* Types
*----------------------------------------------------------------------------*/
/** USART transfer complete callback. */
typedef void (*UsartdCallback)( uint8_t, void* ) ;
/** \brief usart Transfer Request prepared by the application upper layer.
*
* This structure is sent to the USART_Send or USART_Rcv to start the transfer.
* At the end of the transfer, the callback is invoked by the interrupt handler.
*/
typedef struct
{
/** Pointer to the Buffer. */
uint8_t *pBuff;
/** Buff size in bytes. */
uint8_t BuffSize;
/** Dma channel num. */
uint8_t ChNum;
/** Callback function invoked at the end of transfer. */
UsartdCallback callback;
/** Callback arguments. */
void *pArgument;
/** flag to indicate the current transfer. */
volatile uint8_t Done;
} UsartChannel ;
/** Constant structure associated with USART port. This structure prevents
client applications to have access in the same time. */
typedef struct
{
/** Pointer to USART Hardware registers */
Usart* pUsartHw ;
/** Current Usart Rx channel */
UsartChannel *pRxChannel ;
/** Current Usart Tx channel */
UsartChannel *pTxChannel ;
/** Pointer to DMA driver */
sXdmad* pXdmad;
/** USART Id as defined in the product datasheet */
uint8_t usartId ;
} UsartDma;
/*----------------------------------------------------------------------------
* Exported functions
*----------------------------------------------------------------------------*/
extern uint32_t USARTD_Configure( UsartDma *pUsartd ,
Usart *pUsartHw ,
uint8_t USARTId,
uint32_t UsartMode,
uint32_t UsartClk,
sXdmad *pXdmad );
extern uint32_t USARTD_EnableTxChannels( UsartDma *pUsartd, UsartChannel *pTxCh);
extern uint32_t USARTD_EnableRxChannels( UsartDma *pUsartd, UsartChannel *pRxCh);
extern uint32_t USARTD_SendData( UsartDma* pUsartd ) ;
extern uint32_t USARTD_RcvData( UsartDma *pUsartd);
#ifdef __cplusplus
}
#endif
#endif /* #ifndef _USART_DMA_ */
| {
"pile_set_name": "Github"
} |
import React from 'react';
import PropTypes from 'prop-types';
import { get } from 'lodash';
import Response from './Response';
class Responses extends React.Component {
static propTypes = {
responses: PropTypes.arrayOf(PropTypes.object).isRequired,
};
render() {
const { responses } = this.props;
if (!responses || responses.length === 0) {
return <div />;
}
return (
<div className="Responses">
<style jsx>
{`
.Responses {
width: 100%;
display: inline-block;
}
.innerResponses {
margin: 3rem auto;
max-width: 960px;
}
`}
</style>
<div className="innerResponses">
{responses.map(response => (
<Response key={`${get(response, 'user.id', 0)}-${response.createdAt}`} response={response} />
))}
</div>
</div>
);
}
}
export default Responses;
| {
"pile_set_name": "Github"
} |
<?php
/*
* Spring Signage Ltd - http://www.springsignage.com
* Copyright (C) 2015 Spring Signage Ltd
* (UserType.php)
*/
namespace Xibo\Entity;
use Xibo\Service\LogServiceInterface;
use Xibo\Storage\StorageServiceInterface;
/**
* Class UserType
* @package Xibo\Entity
*
*/
class UserType
{
use EntityTrait;
public $userTypeId;
public $userType;
/**
* Entity constructor.
* @param StorageServiceInterface $store
* @param LogServiceInterface $log
*/
public function __construct($store, $log)
{
$this->setCommonDependencies($store, $log);
}
public function getId()
{
return $this->userTypeId;
}
public function getOwnerId()
{
return 1;
}
} | {
"pile_set_name": "Github"
} |
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
"go_test",
)
go_library(
name = "go_default_library",
srcs = [
"doc.go",
"filter.go",
"mux.go",
"streamwatcher.go",
"until.go",
"watch.go",
"zz_generated.deepcopy.go",
],
importpath = "k8s.io/apimachinery/pkg/watch",
deps = [
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/net:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
],
)
go_test(
name = "go_default_xtest",
srcs = [
"filter_test.go",
"mux_test.go",
"streamwatcher_test.go",
"watch_test.go",
],
deps = [
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/watch:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = ["until_test.go"],
embed = [":go_default_library"],
deps = [
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)
| {
"pile_set_name": "Github"
} |
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Adapted for dns package usage by Miek Gieben.
package dns
import "sync"
import "time"
// call is an in-flight or completed singleflight.Do call
type call struct {
wg sync.WaitGroup
val *Msg
rtt time.Duration
err error
dups int
}
// singleflight represents a class of work and forms a namespace in
// which units of work can be executed with duplicate suppression.
type singleflight struct {
sync.Mutex // protects m
m map[string]*call // lazily initialized
dontDeleteForTesting bool // this is only to be used by TestConcurrentExchanges
}
// Do executes and returns the results of the given function, making
// sure that only one execution is in-flight for a given key at a
// time. If a duplicate comes in, the duplicate caller waits for the
// original to complete and receives the same results.
// The return value shared indicates whether v was given to multiple callers.
func (g *singleflight) Do(key string, fn func() (*Msg, time.Duration, error)) (v *Msg, rtt time.Duration, err error, shared bool) {
g.Lock()
if g.m == nil {
g.m = make(map[string]*call)
}
if c, ok := g.m[key]; ok {
c.dups++
g.Unlock()
c.wg.Wait()
return c.val, c.rtt, c.err, true
}
c := new(call)
c.wg.Add(1)
g.m[key] = c
g.Unlock()
c.val, c.rtt, c.err = fn()
c.wg.Done()
if !g.dontDeleteForTesting {
g.Lock()
delete(g.m, key)
g.Unlock()
}
return c.val, c.rtt, c.err, c.dups > 0
}
| {
"pile_set_name": "Github"
} |
TARGET = tremor-test
SOURCES = \
main.c \
bitwise.c \
block.c \
codebook.c \
floor0.c \
floor1.c \
framing.c \
info.c \
mapping0.c \
mdct.c \
registry.c \
res012.c \
sharedbook.c \
synthesis.c \
vorbisfile.c \
window.c
CFLAGS = -g -idirafter . -Wall -D_LOW_ACCURACY_
OBJECTS = $(SOURCES:.c=.o)
all : $(TARGET)
$(TARGET): $(OBJECTS)
$(CC) $(OBJECTS) -o $(TARGET)
.SUFFIXES: .c .o
.c.o:
$(CC) $(CFLAGS) $(INCLUDE) -c $< -o $@
depend:
$(CC) -MM $(INCLUDE) $(CFLAGS) $(SOURCES) > dependencies
clean :
rm -f $(OBJECTS) $(TARGET)
-include dependencies
| {
"pile_set_name": "Github"
} |
/*
* Copyright (C) 2017 优客服-多渠道客服系统
* Modifications copyright (C) 2018-2019 Chatopera Inc, <https://www.chatopera.com>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.chatopera.cc.persistence.impl;
import com.chatopera.cc.model.Organ;
import com.chatopera.cc.persistence.interfaces.DataExchangeInterface;
import com.chatopera.cc.persistence.repository.OrganRepository;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;
import java.io.Serializable;
import java.util.List;
@Service("organdata")
public class OrganDataExchangeImpl implements DataExchangeInterface {
@Autowired
private OrganRepository organRes ;
public String getDataByIdAndOrgi(String id, String orgi){
Organ organ = organRes.findByIdAndOrgi(id, orgi) ;
return organ!=null ? organ.getName() : id;
}
@Override
public List<Serializable> getListDataByIdAndOrgi(String id , String creater, String orgi) {
return null ;
}
public void process(Object data , String orgi) {
}
}
| {
"pile_set_name": "Github"
} |
#ifndef _ASM_SOCKET_H
#define _ASM_SOCKET_H
#include <asm/sockios.h>
/* For setsockoptions(2) */
#define SOL_SOCKET 1
#define SO_DEBUG 1
#define SO_REUSEADDR 2
#define SO_TYPE 3
#define SO_ERROR 4
#define SO_DONTROUTE 5
#define SO_BROADCAST 6
#define SO_SNDBUF 7
#define SO_RCVBUF 8
#define SO_SNDBUFFORCE 32
#define SO_RCVBUFFORCE 33
#define SO_KEEPALIVE 9
#define SO_OOBINLINE 10
#define SO_NO_CHECK 11
#define SO_PRIORITY 12
#define SO_LINGER 13
#define SO_BSDCOMPAT 14
/* To add :#define SO_REUSEPORT 15 */
#define SO_PASSCRED 16
#define SO_PEERCRED 17
#define SO_RCVLOWAT 18
#define SO_SNDLOWAT 19
#define SO_RCVTIMEO 20
#define SO_SNDTIMEO 21
/* Security levels - as per NRL IPv6 - don't actually do anything */
#define SO_SECURITY_AUTHENTICATION 22
#define SO_SECURITY_ENCRYPTION_TRANSPORT 23
#define SO_SECURITY_ENCRYPTION_NETWORK 24
#define SO_BINDTODEVICE 25
/* Socket filtering */
#define SO_ATTACH_FILTER 26
#define SO_DETACH_FILTER 27
#define SO_PEERNAME 28
#define SO_TIMESTAMP 29
#define SCM_TIMESTAMP SO_TIMESTAMP
#define SO_ACCEPTCONN 30
#define SO_PEERSEC 31
#define SO_PASSSEC 34
#define SO_TIMESTAMPNS 35
#define SCM_TIMESTAMPNS SO_TIMESTAMPNS
#define SO_MARK 36
#define SO_TIMESTAMPING 37
#define SCM_TIMESTAMPING SO_TIMESTAMPING
#define SO_PROTOCOL 38
#define SO_DOMAIN 39
#endif /* _ASM_SOCKET_H */
| {
"pile_set_name": "Github"
} |
/*
* drm_irq.c IRQ and vblank support
*
* \author Rickard E. (Rik) Faith <[email protected]>
* \author Gareth Hughes <[email protected]>
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
/*
* Created: Fri Mar 19 14:30:16 1999 by [email protected]
*
* Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include <drm/drm_irq.h>
#include <drm/drmP.h>
#include <linux/interrupt.h> /* For task queue support */
#include <linux/vgaarb.h>
#include <linux/export.h>
#include "drm_internal.h"
/**
* DOC: irq helpers
*
* The DRM core provides very simple support helpers to enable IRQ handling on a
* device through the drm_irq_install() and drm_irq_uninstall() functions. This
* only supports devices with a single interrupt on the main device stored in
* &drm_device.dev and set as the device paramter in drm_dev_alloc().
*
* These IRQ helpers are strictly optional. Drivers which roll their own only
* need to set &drm_device.irq_enabled to signal the DRM core that vblank
* interrupts are working. Since these helpers don't automatically clean up the
* requested interrupt like e.g. devm_request_irq() they're not really
* recommended.
*/
/**
* drm_irq_install - install IRQ handler
* @dev: DRM device
* @irq: IRQ number to install the handler for
*
* Initializes the IRQ related data. Installs the handler, calling the driver
* &drm_driver.irq_preinstall and &drm_driver.irq_postinstall functions before
* and after the installation.
*
* This is the simplified helper interface provided for drivers with no special
* needs. Drivers which need to install interrupt handlers for multiple
* interrupts must instead set &drm_device.irq_enabled to signal the DRM core
* that vblank interrupts are available.
*
* @irq must match the interrupt number that would be passed to request_irq(),
* if called directly instead of using this helper function.
*
* &drm_driver.irq_handler is called to handle the registered interrupt.
*
* Returns:
* Zero on success or a negative error code on failure.
*/
int drm_irq_install(struct drm_device *dev, int irq)
{
int ret;
unsigned long sh_flags = 0;
if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
return -EINVAL;
if (irq == 0)
return -EINVAL;
/* Driver must have been initialized */
if (!dev->dev_private)
return -EINVAL;
if (dev->irq_enabled)
return -EBUSY;
dev->irq_enabled = true;
DRM_DEBUG("irq=%d\n", irq);
/* Before installing handler */
if (dev->driver->irq_preinstall)
dev->driver->irq_preinstall(dev);
/* Install handler */
if (drm_core_check_feature(dev, DRIVER_IRQ_SHARED))
sh_flags = IRQF_SHARED;
ret = request_irq(irq, dev->driver->irq_handler,
sh_flags, dev->driver->name, dev);
if (ret < 0) {
dev->irq_enabled = false;
return ret;
}
/* After installing handler */
if (dev->driver->irq_postinstall)
ret = dev->driver->irq_postinstall(dev);
if (ret < 0) {
dev->irq_enabled = false;
if (drm_core_check_feature(dev, DRIVER_LEGACY))
vga_client_register(dev->pdev, NULL, NULL, NULL);
free_irq(irq, dev);
} else {
dev->irq = irq;
}
return ret;
}
EXPORT_SYMBOL(drm_irq_install);
/**
* drm_irq_uninstall - uninstall the IRQ handler
* @dev: DRM device
*
* Calls the driver's &drm_driver.irq_uninstall function and unregisters the IRQ
* handler. This should only be called by drivers which used drm_irq_install()
* to set up their interrupt handler. Other drivers must only reset
* &drm_device.irq_enabled to false.
*
* Note that for kernel modesetting drivers it is a bug if this function fails.
* The sanity checks are only to catch buggy user modesetting drivers which call
* the same function through an ioctl.
*
* Returns:
* Zero on success or a negative error code on failure.
*/
int drm_irq_uninstall(struct drm_device *dev)
{
unsigned long irqflags;
bool irq_enabled;
int i;
if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
return -EINVAL;
irq_enabled = dev->irq_enabled;
dev->irq_enabled = false;
/*
* Wake up any waiters so they don't hang. This is just to paper over
* issues for UMS drivers which aren't in full control of their
* vblank/irq handling. KMS drivers must ensure that vblanks are all
* disabled when uninstalling the irq handler.
*/
if (dev->num_crtcs) {
spin_lock_irqsave(&dev->vbl_lock, irqflags);
for (i = 0; i < dev->num_crtcs; i++) {
struct drm_vblank_crtc *vblank = &dev->vblank[i];
if (!vblank->enabled)
continue;
WARN_ON(drm_core_check_feature(dev, DRIVER_MODESET));
drm_vblank_disable_and_save(dev, i);
wake_up(&vblank->queue);
}
spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
}
if (!irq_enabled)
return -EINVAL;
DRM_DEBUG("irq=%d\n", dev->irq);
if (drm_core_check_feature(dev, DRIVER_LEGACY))
vga_client_register(dev->pdev, NULL, NULL, NULL);
if (dev->driver->irq_uninstall)
dev->driver->irq_uninstall(dev);
free_irq(dev->irq, dev);
return 0;
}
EXPORT_SYMBOL(drm_irq_uninstall);
int drm_legacy_irq_control(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_control *ctl = data;
int ret = 0, irq;
/* if we haven't irq we fallback for compatibility reasons -
* this used to be a separate function in drm_dma.h
*/
if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
return 0;
if (!drm_core_check_feature(dev, DRIVER_LEGACY))
return 0;
/* UMS was only ever supported on pci devices. */
if (WARN_ON(!dev->pdev))
return -EINVAL;
switch (ctl->func) {
case DRM_INST_HANDLER:
irq = dev->pdev->irq;
if (dev->if_version < DRM_IF_VERSION(1, 2) &&
ctl->irq != irq)
return -EINVAL;
mutex_lock(&dev->struct_mutex);
ret = drm_irq_install(dev, irq);
mutex_unlock(&dev->struct_mutex);
return ret;
case DRM_UNINST_HANDLER:
mutex_lock(&dev->struct_mutex);
ret = drm_irq_uninstall(dev);
mutex_unlock(&dev->struct_mutex);
return ret;
default:
return -EINVAL;
}
}
| {
"pile_set_name": "Github"
} |
#!/bin/sh
set -e
#
# This script is meant for quick & easy install via:
# 'curl -sSL https://get.docker.com/ | sh'
# or:
# 'wget -qO- https://get.docker.com/ | sh'
#
# For test builds (ie. release candidates):
# 'curl -fsSL https://test.docker.com/ | sh'
# or:
# 'wget -qO- https://test.docker.com/ | sh'
#
# For experimental builds:
# 'curl -fsSL https://experimental.docker.com/ | sh'
# or:
# 'wget -qO- https://experimental.docker.com/ | sh'
#
# Docker Maintainers:
# To update this script on https://get.docker.com,
# use hack/release.sh during a normal release,
# or the following one-liner for script hotfixes:
# aws s3 cp --acl public-read hack/install.sh s3://get.docker.com/index
#
url="https://get.docker.com/"
apt_url="https://apt.dockerproject.org"
yum_url="https://yum.dockerproject.org"
gpg_fingerprint="58118E89F3A912897C070ADBF76221572C52609D"
key_servers="
ha.pool.sks-keyservers.net
pgp.mit.edu
keyserver.ubuntu.com
"
mirror=''
while [ $# -gt 0 ]; do
case "$1" in
--mirror)
mirror="$2"
shift
;;
*)
echo "Illegal option $1"
;;
esac
shift $(( $# > 0 ? 1 : 0 ))
done
case "$mirror" in
AzureChinaCloud)
apt_url="https://mirror.azure.cn/docker-engine/apt"
yum_url="https://mirror.azure.cn/docker-engine/yum"
;;
esac
command_exists() {
command -v "$@" > /dev/null 2>&1
}
echo_docker_as_nonroot() {
if command_exists docker && [ -e /var/run/docker.sock ]; then
(
set -x
$sh_c 'docker version'
) || true
fi
your_user=your-user
[ "$user" != 'root' ] && your_user="$user"
# intentionally mixed spaces and tabs here -- tabs are stripped by "<<-EOF", spaces are kept in the output
cat <<-EOF
If you would like to use Docker as a non-root user, you should now consider
adding your user to the "docker" group with something like:
sudo usermod -aG docker $your_user
Remember that you will have to log out and back in for this to take effect!
EOF
}
# Check if this is a forked Linux distro
check_forked() {
# Check for lsb_release command existence, it usually exists in forked distros
if command_exists lsb_release; then
# Check if the `-u` option is supported
set +e
lsb_release -a -u > /dev/null 2>&1
lsb_release_exit_code=$?
set -e
# Check if the command has exited successfully, it means we're in a forked distro
if [ "$lsb_release_exit_code" = "0" ]; then
# Print info about current distro
cat <<-EOF
You're using '$lsb_dist' version '$dist_version'.
EOF
# Get the upstream release info
lsb_dist=$(lsb_release -a -u 2>&1 | tr '[:upper:]' '[:lower:]' | grep -E 'id' | cut -d ':' -f 2 | tr -d '[[:space:]]')
dist_version=$(lsb_release -a -u 2>&1 | tr '[:upper:]' '[:lower:]' | grep -E 'codename' | cut -d ':' -f 2 | tr -d '[[:space:]]')
# Print info about upstream distro
cat <<-EOF
Upstream release is '$lsb_dist' version '$dist_version'.
EOF
else
if [ -r /etc/debian_version ] && [ "$lsb_dist" != "ubuntu" ] && [ "$lsb_dist" != "raspbian" ]; then
# We're Debian and don't even know it!
lsb_dist=debian
dist_version="$(cat /etc/debian_version | sed 's/\/.*//' | sed 's/\..*//')"
case "$dist_version" in
8|'Kali Linux 2')
dist_version="jessie"
;;
7)
dist_version="wheezy"
;;
esac
fi
fi
fi
}
rpm_import_repository_key() {
local key=$1; shift
local tmpdir=$(mktemp -d)
chmod 600 "$tmpdir"
for key_server in $key_servers ; do
gpg --homedir "$tmpdir" --keyserver "$key_server" --recv-keys "$key" && break
done
gpg --homedir "$tmpdir" -k "$key" >/dev/null
gpg --homedir "$tmpdir" --export --armor "$key" > "$tmpdir"/repo.key
rpm --import "$tmpdir"/repo.key
rm -rf "$tmpdir"
}
semverParse() {
major="${1%%.*}"
minor="${1#$major.}"
minor="${minor%%.*}"
patch="${1#$major.$minor.}"
patch="${patch%%[-.]*}"
}
do_install() {
architecture=$(uname -m)
case $architecture in
# officially supported
amd64|x86_64)
;;
# unofficially supported with available repositories
armv6l|armv7l)
;;
# unofficially supported without available repositories
aarch64|arm64|ppc64le|s390x)
cat 1>&2 <<-EOF
Error: Docker doesn't officially support $architecture and no Docker $architecture repository exists.
EOF
exit 1
;;
# not supported
*)
cat >&2 <<-EOF
Error: $architecture is not a recognized platform.
EOF
exit 1
;;
esac
if command_exists docker; then
version="$(docker -v | cut -d ' ' -f3 | cut -d ',' -f1)"
MAJOR_W=1
MINOR_W=10
semverParse $version
shouldWarn=0
if [ $major -lt $MAJOR_W ]; then
shouldWarn=1
fi
if [ $major -le $MAJOR_W ] && [ $minor -lt $MINOR_W ]; then
shouldWarn=1
fi
cat >&2 <<-'EOF'
Warning: the "docker" command appears to already exist on this system.
If you already have Docker installed, this script can cause trouble, which is
why we're displaying this warning and provide the opportunity to cancel the
installation.
If you installed the current Docker package using this script and are using it
EOF
if [ $shouldWarn -eq 1 ]; then
cat >&2 <<-'EOF'
again to update Docker, we urge you to migrate your image store before upgrading
to v1.10+.
You can find instructions for this here:
https://github.com/docker/docker/wiki/Engine-v1.10.0-content-addressability-migration
EOF
else
cat >&2 <<-'EOF'
again to update Docker, you can safely ignore this message.
EOF
fi
cat >&2 <<-'EOF'
You may press Ctrl+C now to abort this script.
EOF
( set -x; sleep 20 )
fi
user="$(id -un 2>/dev/null || true)"
sh_c='sh -c'
if [ "$user" != 'root' ]; then
if command_exists sudo; then
sh_c='sudo -E sh -c'
elif command_exists su; then
sh_c='su -c'
else
cat >&2 <<-'EOF'
Error: this installer needs the ability to run commands as root.
We are unable to find either "sudo" or "su" available to make this happen.
EOF
exit 1
fi
fi
curl=''
if command_exists curl; then
curl='curl -sSL'
elif command_exists wget; then
curl='wget -qO-'
elif command_exists busybox && busybox --list-modules | grep -q wget; then
curl='busybox wget -qO-'
fi
# check to see which repo they are trying to install from
if [ -z "$repo" ]; then
repo='main'
if [ "https://test.docker.com/" = "$url" ]; then
repo='testing'
elif [ "https://experimental.docker.com/" = "$url" ]; then
repo='experimental'
fi
fi
# perform some very rudimentary platform detection
lsb_dist=''
dist_version=''
if command_exists lsb_release; then
lsb_dist="$(lsb_release -si)"
fi
if [ -z "$lsb_dist" ] && [ -r /etc/lsb-release ]; then
lsb_dist="$(. /etc/lsb-release && echo "$DISTRIB_ID")"
fi
if [ -z "$lsb_dist" ] && [ -r /etc/debian_version ]; then
lsb_dist='debian'
fi
if [ -z "$lsb_dist" ] && [ -r /etc/fedora-release ]; then
lsb_dist='fedora'
fi
if [ -z "$lsb_dist" ] && [ -r /etc/oracle-release ]; then
lsb_dist='oracleserver'
fi
if [ -z "$lsb_dist" ] && [ -r /etc/centos-release ]; then
lsb_dist='centos'
fi
if [ -z "$lsb_dist" ] && [ -r /etc/redhat-release ]; then
lsb_dist='redhat'
fi
if [ -z "$lsb_dist" ] && [ -r /etc/photon-release ]; then
lsb_dist='photon'
fi
if [ -z "$lsb_dist" ] && [ -r /etc/os-release ]; then
lsb_dist="$(. /etc/os-release && echo "$ID")"
fi
lsb_dist="$(echo "$lsb_dist" | tr '[:upper:]' '[:lower:]')"
# Special case redhatenterpriseserver
if [ "${lsb_dist}" = "redhatenterpriseserver" ]; then
# Set it to redhat, it will be changed to centos below anyways
lsb_dist='redhat'
fi
case "$lsb_dist" in
ubuntu)
if command_exists lsb_release; then
dist_version="$(lsb_release --codename | cut -f2)"
fi
if [ -z "$dist_version" ] && [ -r /etc/lsb-release ]; then
dist_version="$(. /etc/lsb-release && echo "$DISTRIB_CODENAME")"
fi
;;
debian|raspbian)
dist_version="$(cat /etc/debian_version | sed 's/\/.*//' | sed 's/\..*//')"
case "$dist_version" in
8)
dist_version="jessie"
;;
7)
dist_version="wheezy"
;;
esac
;;
oracleserver)
# need to switch lsb_dist to match yum repo URL
lsb_dist="oraclelinux"
dist_version="$(rpm -q --whatprovides redhat-release --queryformat "%{VERSION}\n" | sed 's/\/.*//' | sed 's/\..*//' | sed 's/Server*//')"
;;
fedora|centos|redhat)
dist_version="$(rpm -q --whatprovides ${lsb_dist}-release --queryformat "%{VERSION}\n" | sed 's/\/.*//' | sed 's/\..*//' | sed 's/Server*//' | sort | tail -1)"
;;
"vmware photon")
lsb_dist="photon"
dist_version="$(. /etc/os-release && echo "$VERSION_ID")"
;;
*)
if command_exists lsb_release; then
dist_version="$(lsb_release --codename | cut -f2)"
fi
if [ -z "$dist_version" ] && [ -r /etc/os-release ]; then
dist_version="$(. /etc/os-release && echo "$VERSION_ID")"
fi
;;
esac
# Check if this is a forked Linux distro
check_forked
# Run setup for each distro accordingly
case "$lsb_dist" in
amzn)
(
set -x
$sh_c 'sleep 3; yum -y -q install docker'
)
echo_docker_as_nonroot
exit 0
;;
'opensuse project'|opensuse)
echo 'Going to perform the following operations:'
if [ "$repo" != 'main' ]; then
echo ' * add repository obs://Virtualization:containers'
fi
echo ' * install Docker'
$sh_c 'echo "Press CTRL-C to abort"; sleep 3'
if [ "$repo" != 'main' ]; then
# install experimental packages from OBS://Virtualization:containers
(
set -x
zypper -n ar -f obs://Virtualization:containers Virtualization:containers
rpm_import_repository_key 55A0B34D49501BB7CA474F5AA193FBB572174FC2
)
fi
(
set -x
zypper -n install docker
)
echo_docker_as_nonroot
exit 0
;;
'suse linux'|sle[sd])
echo 'Going to perform the following operations:'
if [ "$repo" != 'main' ]; then
echo ' * add repository obs://Virtualization:containers'
echo ' * install experimental Docker using packages NOT supported by SUSE'
else
echo ' * add the "Containers" module'
echo ' * install Docker using packages supported by SUSE'
fi
$sh_c 'echo "Press CTRL-C to abort"; sleep 3'
if [ "$repo" != 'main' ]; then
# install experimental packages from OBS://Virtualization:containers
echo >&2 'Warning: installing experimental packages from OBS, these packages are NOT supported by SUSE'
(
set -x
zypper -n ar -f obs://Virtualization:containers/SLE_12 Virtualization:containers
rpm_import_repository_key 55A0B34D49501BB7CA474F5AA193FBB572174FC2
)
else
# Add the containers module
# Note well-1: the SLE machine must already be registered against SUSE Customer Center
# Note well-2: the `-r ""` is required to workaround a known issue of SUSEConnect
(
set -x
SUSEConnect -p sle-module-containers/12/x86_64 -r ""
)
fi
(
set -x
zypper -n install docker
)
echo_docker_as_nonroot
exit 0
;;
ubuntu|debian|raspbian)
export DEBIAN_FRONTEND=noninteractive
did_apt_get_update=
apt_get_update() {
if [ -z "$did_apt_get_update" ]; then
( set -x; $sh_c 'sleep 3; apt-get update' )
did_apt_get_update=1
fi
}
if [ "$lsb_dist" != "raspbian" ]; then
# aufs is preferred over devicemapper; try to ensure the driver is available.
if ! grep -q aufs /proc/filesystems && ! $sh_c 'modprobe aufs'; then
if uname -r | grep -q -- '-generic' && dpkg -l 'linux-image-*-generic' | grep -qE '^ii|^hi' 2>/dev/null; then
kern_extras="linux-image-extra-$(uname -r) linux-image-extra-virtual"
apt_get_update
( set -x; $sh_c 'sleep 3; apt-get install -y -q '"$kern_extras" ) || true
if ! grep -q aufs /proc/filesystems && ! $sh_c 'modprobe aufs'; then
echo >&2 'Warning: tried to install '"$kern_extras"' (for AUFS)'
echo >&2 ' but we still have no AUFS. Docker may not work. Proceeding anyways!'
( set -x; sleep 10 )
fi
else
echo >&2 'Warning: current kernel is not supported by the linux-image-extra-virtual'
echo >&2 ' package. We have no AUFS support. Consider installing the packages'
echo >&2 ' linux-image-virtual kernel and linux-image-extra-virtual for AUFS support.'
( set -x; sleep 10 )
fi
fi
fi
# install apparmor utils if they're missing and apparmor is enabled in the kernel
# otherwise Docker will fail to start
if [ "$(cat /sys/module/apparmor/parameters/enabled 2>/dev/null)" = 'Y' ]; then
if command -v apparmor_parser >/dev/null 2>&1; then
echo 'apparmor is enabled in the kernel and apparmor utils were already installed'
else
echo 'apparmor is enabled in the kernel, but apparmor_parser is missing. Trying to install it..'
apt_get_update
( set -x; $sh_c 'sleep 3; apt-get install -y -q apparmor' )
fi
fi
if [ ! -e /usr/lib/apt/methods/https ]; then
apt_get_update
( set -x; $sh_c 'sleep 3; apt-get install -y -q apt-transport-https ca-certificates' )
fi
if [ -z "$curl" ]; then
apt_get_update
( set -x; $sh_c 'sleep 3; apt-get install -y -q curl ca-certificates' )
curl='curl -sSL'
fi
if [ ! -e /usr/bin/gpg ]; then
apt_get_update
( set -x; $sh_c 'sleep 3; apt-get install -y -q gnupg2 || apt-get install -y -q gnupg' )
fi
(
set -x
for key_server in $key_servers ; do
$sh_c "apt-key adv --keyserver hkp://${key_server}:80 --recv-keys ${gpg_fingerprint}" && break
done
$sh_c "apt-key adv -k ${gpg_fingerprint} >/dev/null"
$sh_c "mkdir -p /etc/apt/sources.list.d"
$sh_c "echo deb \[arch=$(dpkg --print-architecture)\] ${apt_url}/repo ${lsb_dist}-${dist_version} ${repo} > /etc/apt/sources.list.d/docker.list"
$sh_c 'sleep 3; apt-get update; apt-get install -y -q docker-engine'
)
echo_docker_as_nonroot
exit 0
;;
fedora|centos|redhat|oraclelinux|photon)
if [ "${lsb_dist}" = "redhat" ]; then
# we use the centos repository for both redhat and centos releases
lsb_dist='centos'
fi
$sh_c "cat >/etc/yum.repos.d/docker-${repo}.repo" <<-EOF
[docker-${repo}-repo]
name=Docker ${repo} Repository
baseurl=${yum_url}/repo/${repo}/${lsb_dist}/${dist_version}
enabled=1
gpgcheck=1
gpgkey=${yum_url}/gpg
EOF
if [ "$lsb_dist" = "fedora" ] && [ "$dist_version" -ge "22" ]; then
(
set -x
$sh_c 'sleep 3; dnf -y -q install docker-engine'
)
elif [ "$lsb_dist" = "photon" ]; then
(
set -x
$sh_c 'sleep 3; tdnf -y install docker-engine'
)
else
(
set -x
$sh_c 'sleep 3; yum -y -q install docker-engine'
)
fi
echo_docker_as_nonroot
exit 0
;;
gentoo)
if [ "$url" = "https://test.docker.com/" ]; then
# intentionally mixed spaces and tabs here -- tabs are stripped by "<<-'EOF'", spaces are kept in the output
cat >&2 <<-'EOF'
You appear to be trying to install the latest nightly build in Gentoo.'
The portage tree should contain the latest stable release of Docker, but'
if you want something more recent, you can always use the live ebuild'
provided in the "docker" overlay available via layman. For more'
instructions, please see the following URL:'
https://github.com/tianon/docker-overlay#using-this-overlay'
After adding the "docker" overlay, you should be able to:'
emerge -av =app-emulation/docker-9999'
EOF
exit 1
fi
(
set -x
$sh_c 'sleep 3; emerge app-emulation/docker'
)
exit 0
;;
esac
# intentionally mixed spaces and tabs here -- tabs are stripped by "<<-'EOF'", spaces are kept in the output
cat >&2 <<-'EOF'
Either your platform is not easily detectable, is not supported by this
installer script (yet - PRs welcome! [hack/install.sh]), or does not yet have
a package for Docker. Please visit the following URL for more detailed
installation instructions:
https://docs.docker.com/engine/installation/
EOF
exit 1
}
# wrapped up in a function so that we have some protection against only getting
# half the file during "curl | sh"
do_install
| {
"pile_set_name": "Github"
} |
<?xml version="1.0" encoding="utf-8"?>
<android.support.constraint.ConstraintLayout xmlns:android="http://schemas.android.com/apk/res/android"
xmlns:app="http://schemas.android.com/apk/res-auto"
android:layout_width="match_parent"
android:layout_height="wrap_content">
<android.support.v7.widget.CardView
android:id="@+id/action_new_backup"
android:layout_width="0dp"
android:layout_height="wrap_content"
android:layout_marginEnd="8dp"
android:layout_marginStart="8dp"
android:layout_marginTop="16dp"
android:foreground="?selectableItemBackground"
app:cardCornerRadius="8dp"
app:layout_constraintEnd_toStartOf="@+id/action_restore_backup"
app:layout_constraintHorizontal_bias="0.5"
app:layout_constraintStart_toStartOf="parent"
app:layout_constraintTop_toTopOf="parent">
<TextView
android:layout_width="match_parent"
android:layout_height="60dp"
android:layout_gravity="center"
android:drawablePadding="16dp"
android:drawableStart="@drawable/ic_backup"
android:gravity="center_vertical"
android:padding="16dp"
android:text="@string/new_backup"
android:textAppearance="@style/TextAppearance.AppCompat.Body1" />
</android.support.v7.widget.CardView>
<android.support.v7.widget.CardView
android:id="@+id/action_restore_backup"
android:layout_width="0dp"
android:layout_height="wrap_content"
android:layout_marginEnd="8dp"
android:layout_marginStart="8dp"
android:layout_marginTop="16dp"
android:foreground="?selectableItemBackground"
app:cardCornerRadius="8dp"
app:layout_constraintEnd_toEndOf="parent"
app:layout_constraintHorizontal_bias="0.5"
app:layout_constraintStart_toEndOf="@+id/action_new_backup"
app:layout_constraintTop_toTopOf="parent">
<TextView
android:layout_width="match_parent"
android:layout_height="60dp"
android:layout_gravity="center"
android:drawablePadding="16dp"
android:drawableStart="@drawable/ic_restore"
android:gravity="center_vertical"
android:padding="16dp"
android:text="@string/restore_backup"
android:textAppearance="@style/TextAppearance.AppCompat.Body1" />
</android.support.v7.widget.CardView>
<TextView
android:id="@+id/local_backup_title"
android:layout_width="match_parent"
android:layout_height="wrap_content"
android:paddingEnd="?android:attr/listPreferredItemPaddingEnd"
android:paddingStart="?android:attr/listPreferredItemPaddingStart"
android:paddingTop="16dip"
android:paddingBottom="8dp"
android:text="@string/local_backups"
android:textAppearance="@android:style/TextAppearance.Material.Body2"
android:textColor="?android:attr/colorAccent"
app:layout_constraintTop_toBottomOf="@+id/action_new_backup" />
</android.support.constraint.ConstraintLayout>
| {
"pile_set_name": "Github"
} |
#include <algorithm>
#include <cmath>
#include <vector>
#include "caffe/layer.hpp"
#include "caffe/layers/lstm_layer.hpp"
namespace caffe {
template <typename Dtype>
__device__ Dtype sigmoid(const Dtype x) {
return Dtype(1) / (Dtype(1) + exp(-x));
}
template <typename Dtype>
__device__ Dtype tanh(const Dtype x) {
return Dtype(2) * sigmoid(Dtype(2) * x) - Dtype(1);
}
template <typename Dtype>
__global__ void LSTMActsForward(const int nthreads, const int dim,
const Dtype* X, Dtype* X_acts) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int x_dim = 4 * dim;
const int d = index % x_dim;
if (d < 3 * dim) {
X_acts[index] = sigmoid(X[index]);
} else {
X_acts[index] = tanh(X[index]);
}
}
}
template <typename Dtype>
__global__ void LSTMUnitForward(const int nthreads, const int dim,
const Dtype* C_prev, const Dtype* X, const Dtype* cont,
Dtype* C, Dtype* H) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / dim;
const int d = index % dim;
const Dtype* X_offset = X + 4 * dim * n;
const Dtype i = X_offset[d];
const Dtype f = X_offset[1 * dim + d];
const Dtype o = X_offset[2 * dim + d];
const Dtype g = X_offset[3 * dim + d];
const Dtype c_prev = C_prev[index];
const Dtype c = cont[n] * f * c_prev + i * g;
C[index] = c;
const Dtype tanh_c = tanh(c);
H[index] = o * tanh_c;
}
}
template <typename Dtype>
void LSTMUnitLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const int count = top[1]->count();
const Dtype* C_prev = bottom[0]->gpu_data();
const Dtype* X = bottom[1]->gpu_data();
const Dtype* cont = bottom[2]->gpu_data();
Dtype* X_acts = X_acts_.mutable_gpu_data();
Dtype* C = top[0]->mutable_gpu_data();
Dtype* H = top[1]->mutable_gpu_data();
const int X_count = bottom[1]->count();
// NOLINT_NEXT_LINE(whitespace/operators)
LSTMActsForward<Dtype><<<CAFFE_GET_BLOCKS(X_count), CAFFE_CUDA_NUM_THREADS>>>(
X_count, hidden_dim_, X, X_acts);
CUDA_POST_KERNEL_CHECK;
// NOLINT_NEXT_LINE(whitespace/operators)
LSTMUnitForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, hidden_dim_, C_prev, X_acts, cont, C, H);
CUDA_POST_KERNEL_CHECK;
}
template <typename Dtype>
__global__ void LSTMUnitBackward(const int nthreads, const int dim,
const Dtype* C_prev, const Dtype* X, const Dtype* C, const Dtype* H,
const Dtype* cont, const Dtype* C_diff, const Dtype* H_diff,
Dtype* C_prev_diff, Dtype* X_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / dim;
const int d = index % dim;
const Dtype* X_offset = X + 4 * dim * n;
const Dtype i = X_offset[d];
const Dtype f = X_offset[1 * dim + d];
const Dtype o = X_offset[2 * dim + d];
const Dtype g = X_offset[3 * dim + d];
const Dtype c_prev = C_prev[index];
const Dtype c = C[index];
const Dtype tanh_c = tanh(c);
Dtype* c_prev_diff = C_prev_diff + index;
Dtype* X_diff_offset = X_diff + 4 * dim * n;
Dtype* i_diff = X_diff_offset + d;
Dtype* f_diff = X_diff_offset + 1 * dim + d;
Dtype* o_diff = X_diff_offset + 2 * dim + d;
Dtype* g_diff = X_diff_offset + 3 * dim + d;
const Dtype c_term_diff =
C_diff[index] + H_diff[index] * o * (1 - tanh_c * tanh_c);
const Dtype cont_n = cont[n];
*c_prev_diff = cont_n * c_term_diff * f;
*i_diff = c_term_diff * g;
*f_diff = cont_n * c_term_diff * c_prev;
*o_diff = H_diff[index] * tanh_c;
*g_diff = c_term_diff * i;
}
}
template <typename Dtype>
__global__ void LSTMActsBackward(const int nthreads, const int dim,
const Dtype* X_acts, const Dtype* X_acts_diff, Dtype* X_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int x_dim = 4 * dim;
const int d = index % x_dim;
const Dtype X_act = X_acts[index];
if (d < 3 * dim) {
X_diff[index] = X_acts_diff[index] * X_act * (Dtype(1) - X_act);
} else {
X_diff[index] = X_acts_diff[index] * (Dtype(1) - X_act * X_act);
}
}
}
template <typename Dtype>
void LSTMUnitLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
CHECK(!propagate_down[2]) << "Cannot backpropagate to sequence indicators.";
if (!propagate_down[0] && !propagate_down[1]) { return; }
const int count = top[1]->count();
const Dtype* C_prev = bottom[0]->gpu_data();
const Dtype* X_acts = X_acts_.gpu_data();
const Dtype* cont = bottom[2]->gpu_data();
const Dtype* C = top[0]->gpu_data();
const Dtype* H = top[1]->gpu_data();
const Dtype* C_diff = top[0]->gpu_diff();
const Dtype* H_diff = top[1]->gpu_diff();
Dtype* C_prev_diff = bottom[0]->mutable_gpu_diff();
Dtype* X_acts_diff = X_acts_.mutable_gpu_diff();
LSTMUnitBackward<Dtype> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(count, hidden_dim_,
C_prev, X_acts, C, H, cont, C_diff, H_diff, C_prev_diff, X_acts_diff);
CUDA_POST_KERNEL_CHECK;
const int X_count = bottom[1]->count();
Dtype* X_diff = bottom[1]->mutable_gpu_diff();
LSTMActsBackward<Dtype> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(X_count), CAFFE_CUDA_NUM_THREADS>>>(
X_count, hidden_dim_, X_acts, X_acts_diff, X_diff);
CUDA_POST_KERNEL_CHECK;
}
INSTANTIATE_LAYER_GPU_FUNCS(LSTMUnitLayer);
} // namespace caffe
| {
"pile_set_name": "Github"
} |
import numpy as np
import torch
import torch.sparse
import torch.nn as nn
import torch.nn.functional as F
from utils import *
class AttentionPooling(nn.Module):
'''
Graph pooling layer implementing top-k and threshold-based pooling.
'''
def __init__(self,
in_features, # feature dimensionality in the current graph layer
in_features_prev, # feature dimensionality in the previous graph layer
pool_type,
pool_arch,
large_graph,
attn_gnn=None,
kl_weight=None,
drop_nodes=True,
init='normal',
scale=None,
debug=False):
super(AttentionPooling, self).__init__()
self.pool_type = pool_type
self.pool_arch = pool_arch
self.large_graph = large_graph
self.kl_weight = kl_weight
self.proj = None
self.drop_nodes = drop_nodes
self.is_topk = self.pool_type[2].lower() == 'topk'
self.scale =scale
self.init = init
self.debug = debug
self.clamp_value = 60
self.torch = torch.__version__
if self.is_topk:
self.topk_ratio = float(self.pool_type[3]) # r
assert self.topk_ratio > 0 and self.topk_ratio <= 1, ('invalid top-k ratio', self.topk_ratio, self.pool_type)
else:
self.threshold = float(self.pool_type[3]) # \tilde{alpha}
assert self.threshold >= 0 and self.threshold <= 1, ('invalid pooling threshold', self.threshold, self.pool_type)
if self.pool_type[1] in ['unsup', 'sup']:
assert self.pool_arch not in [None, 'None'], self.pool_arch
n_in = in_features_prev if self.pool_arch[1] == 'prev' else in_features
if self.pool_arch[0] == 'fc':
p_optimal = torch.from_numpy(np.pad(np.array([0, 1]), (0, n_in - 2), 'constant')).float().view(1, n_in)
if len(self.pool_arch) == 2:
# single layer projection
self.proj = nn.Linear(n_in, 1, bias=False)
p = self.proj.weight.data
if scale is not None:
if init == 'normal':
p = torch.randn(n_in) # std=1, seed 9753 for optimal initialization
elif init == 'uniform':
p = torch.rand(n_in) * 2 - 1 # [-1,1]
else:
raise NotImplementedError(init)
p *= scale # multiply std for normal or change range for uniform
else:
print('Default PyTorch init is used for layer %s, std=%.3f' % (str(p.shape), p.std()))
self.proj.weight.data = p.view_as(self.proj.weight.data)
p = self.proj.weight.data.view(1, n_in)
else:
# multi-layer projection
filters = list(map(int, self.pool_arch[2:]))
self.proj = []
for layer in range(len(filters)):
self.proj.append(nn.Linear(in_features=n_in if layer == 0 else filters[layer - 1],
out_features=filters[layer]))
if layer == 0:
p = self.proj[0].weight.data
if scale is not None:
if init == 'normal':
p = torch.randn(filters[layer], n_in)
elif init == 'uniform':
p = torch.rand(filters[layer], n_in) * 2 - 1 # [-1,1]
else:
raise NotImplementedError(init)
p *= scale # multiply std for normal or change range for uniform
else:
print('Default PyTorch init is used for layer %s, std=%.3f' % (str(p.shape), p.std()))
self.proj[0].weight.data = p.view_as(self.proj[0].weight.data)
p = self.proj[0].weight.data.view(-1, n_in)
self.proj.append(nn.ReLU(True))
self.proj.append(nn.Linear(filters[-1], 1))
self.proj = nn.Sequential(*self.proj)
# Compute cosine similarity with the optimal vector and print values
# ignore the last dimension, because it does not receive gradients during training
# n_in=4 for colors-3 because some of our test subsets have 4 dimensional features
cos_sim = self.cosine_sim(p[:, :-1], p_optimal[:, :-1])
if p.shape[0] == 1:
print('p values', p[0].data.cpu().numpy())
print('cos_sim', cos_sim.item())
else:
for fn in [torch.max, torch.min, torch.mean, torch.std]:
print('cos_sim', fn(cos_sim).item())
elif self.pool_arch[0] == 'gnn':
self.proj = attn_gnn(n_in)
else:
raise ValueError('invalid pooling layer architecture', self.pool_arch)
elif self.pool_type[1] == 'gt':
if not self.is_topk and self.threshold > 0:
print('For ground truth attention threshold should be 0, but it is %f' % self.threshold)
else:
raise NotImplementedError(self.pool_type[1])
def __repr__(self):
return 'AttentionPooling(pool_type={}, pool_arch={}, topk={}, kl_weight={}, init={}, scale={}, proj={})'.format(
self.pool_type,
self.pool_arch,
self.is_topk,
self.kl_weight,
self.init,
self.scale,
self.proj)
def cosine_sim(self, a, b):
return torch.mm(a, b.t()) / (torch.norm(a, dim=1, keepdim=True) * torch.norm(b, dim=1, keepdim=True))
def mask_out(self, x, mask):
return x.view_as(mask) * mask
def drop_nodes_edges(self, x, A, mask):
N_nodes = torch.sum(mask, dim=1).long() # B
N_nodes_max = N_nodes.max()
idx = None
if N_nodes_max > 0:
B, N, C = x.shape
# Drop nodes
mask, idx = torch.topk(mask.byte(), N_nodes_max, dim=1, largest=True, sorted=False)
x = torch.gather(x, dim=1, index=idx.unsqueeze(2).expand(-1, -1, C))
# Drop edges
A = torch.gather(A, dim=1, index=idx.unsqueeze(2).expand(-1, -1, N))
A = torch.gather(A, dim=2, index=idx.unsqueeze(1).expand(-1, N_nodes_max, -1))
return x, A, mask, N_nodes, idx
def forward(self, data):
KL_loss = None
x, A, mask, _, params_dict = data[:5]
mask_float = mask.float()
N_nodes_float = params_dict['N_nodes'].float()
B, N, C = x.shape
A = A.view(B, N, N)
alpha_gt = None
if 'node_attn' in params_dict:
if not isinstance(params_dict['node_attn'], list):
params_dict['node_attn'] = [params_dict['node_attn']]
alpha_gt = params_dict['node_attn'][-1].view(B, N)
if 'node_attn_eval' in params_dict:
if not isinstance(params_dict['node_attn_eval'], list):
params_dict['node_attn_eval'] = [params_dict['node_attn_eval']]
if (self.pool_type[1] == 'gt' or (self.pool_type[1] == 'sup' and self.training)) and alpha_gt is None:
raise ValueError('ground truth node attention values node_attn required for %s' % self.pool_type)
if self.pool_type[1] in ['unsup', 'sup']:
attn_input = data[-1] if self.pool_arch[1] == 'prev' else x.clone()
if self.pool_arch[0] == 'fc':
alpha_pre = self.proj(attn_input).view(B, N)
else:
# to support python2
input = [attn_input]
input.extend(data[1:])
alpha_pre = self.proj(input)[0].view(B, N)
# softmax with masking out dummy nodes
alpha_pre = torch.clamp(alpha_pre, -self.clamp_value, self.clamp_value)
alpha = normalize_batch(self.mask_out(torch.exp(alpha_pre), mask_float).view(B, N))
if self.pool_type[1] == 'sup' and self.training:
if self.torch.find('1.') == 0:
KL_loss_per_node = self.mask_out(F.kl_div(torch.log(alpha + 1e-14), alpha_gt, reduction='none'),
mask_float.view(B,N))
else:
KL_loss_per_node = self.mask_out(F.kl_div(torch.log(alpha + 1e-14), alpha_gt, reduce=False),
mask_float.view(B, N))
KL_loss = self.kl_weight * torch.mean(KL_loss_per_node.sum(dim=1) / (N_nodes_float + 1e-7)) # mean over nodes, then mean over batches
else:
alpha = alpha_gt
x = x * alpha.view(B, N, 1)
if self.large_graph:
# For large graphs during training, all alpha values can be very small hindering training
x = x * N_nodes_float.view(B, 1, 1)
if self.is_topk:
N_remove = torch.round(N_nodes_float * (1 - self.topk_ratio)).long() # number of nodes to be removed for each graph
idx = torch.sort(alpha, dim=1, descending=False)[1] # indices of alpha in ascending order
mask = mask.clone().view(B, N)
for b in range(B):
idx_b = idx[b, mask[b, idx[b]]] # take indices of non-dummy nodes for current data example
mask[b, idx_b[:N_remove[b]]] = 0
else:
mask = (mask & (alpha.view_as(mask) > self.threshold)).view(B, N)
if self.drop_nodes:
x, A, mask, N_nodes_pooled, idx = self.drop_nodes_edges(x, A, mask)
if idx is not None and 'node_attn' in params_dict:
# update ground truth (or weakly labeled) attention for a reduced graph
params_dict['node_attn'].append(normalize_batch(self.mask_out(torch.gather(alpha_gt, dim=1, index=idx), mask.float())))
if idx is not None and 'node_attn_eval' in params_dict:
# update ground truth (or weakly labeled) attention for a reduced graph
params_dict['node_attn_eval'].append(normalize_batch(self.mask_out(torch.gather(params_dict['node_attn_eval'][-1], dim=1, index=idx), mask.float())))
else:
N_nodes_pooled = torch.sum(mask, dim=1).long() # B
if 'node_attn' in params_dict:
params_dict['node_attn'].append((self.mask_out(params_dict['node_attn'][-1], mask.float())))
if 'node_attn_eval' in params_dict:
params_dict['node_attn_eval'].append((self.mask_out(params_dict['node_attn_eval'][-1], mask.float())))
params_dict['N_nodes'] = N_nodes_pooled
mask_matrix = mask.unsqueeze(2) & mask.unsqueeze(1)
A = A * mask_matrix.float() # or A[~mask_matrix] = 0
# Add additional losses regularizing the model
if KL_loss is not None:
if 'reg' not in params_dict:
params_dict['reg'] = []
params_dict['reg'].append(KL_loss)
# Keep attention coefficients for evaluation
for key, value in zip(['alpha', 'mask'], [alpha, mask]):
if key not in params_dict:
params_dict[key] = []
params_dict[key].append(value.detach())
if self.debug and alpha_gt is not None:
idx_correct_pool = (alpha_gt > 0)
idx_correct_drop = (alpha_gt == 0)
alpha_correct_pool = alpha[idx_correct_pool].sum() / N_nodes_float.sum()
alpha_correct_drop = alpha[idx_correct_drop].sum() / N_nodes_float.sum()
ratio_avg = (N_nodes_pooled.float() / N_nodes_float).mean()
for key, values in zip(['alpha_correct_pool_debug', 'alpha_correct_drop_debug', 'ratio_avg_debug'],
[alpha_correct_pool, alpha_correct_drop, ratio_avg]):
if key not in params_dict:
params_dict[key] = []
params_dict[key].append(values.detach())
output = [x, A, mask]
output.extend(data[3:])
return output
| {
"pile_set_name": "Github"
} |
#!/bin/sh -e
./configure \
--prefix=/usr \
--enable-cscope \
--enable-multibyte \
--with-ex-name=vim-ex \
--with-view-name=vim-view \
--disable-perlinterp \
--disable-pythoninterp \
--disable-rubyinterp \
--disable-netbeans \
--disable-gpm \
--disable-hangulinput \
--disable-xim \
--disable-gui \
--disable-nls \
--without-x
make
make DESTDIR="$1" install
| {
"pile_set_name": "Github"
} |
/*
* Licensed to Elasticsearch B.V. under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch B.V. licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import { rgbToHex } from './rgb_to_hex';
describe('rgbToHex ', () => {
describe('validation', () => {
it('should return an empty string for malformed input', () => {
expect(rgbToHex('fred')).toEqual('');
expect(rgbToHex('rgb(fred')).toEqual('');
expect(rgbToHex('rgb(fred, bob, banana')).toEqual('');
expect(rgbToHex('rgb(0, 3, 5')).toEqual('');
expect(rgbToHex('rgba(0, 3, 5')).toEqual('');
expect(rgbToHex('rgba(0, 3, 5, 99)')).toEqual('');
});
});
describe('rgb()', () => {
it('should handle rgb() without whitespace', () => {
expect(rgbToHex('rgb(12,34,56)')).toEqual('#0c2238');
});
it('should handle rgb() with whitespace', () => {
expect(rgbToHex('rgb ( 12 , 34 , 56 )')).toEqual('#0c2238');
});
});
describe('rgba()', () => {
it('should handle no whitespace', () => {
expect(rgbToHex('rgba(12,34,56,0.4)')).toEqual('#0c2238');
});
it('should handle whitespace', () => {
expect(rgbToHex('rgba ( 12 , 34 , 56 , 0.4 )')).toEqual('#0c2238');
});
it('should handle integer maximum alpha', () => {
expect(rgbToHex('rgba(12,34,56,1)')).toEqual('#0c2238');
});
it('should handle decimal maximum alpha', () => {
expect(rgbToHex('rgba(12,34,56,1.00000)')).toEqual('#0c2238');
});
it('should handle integer zero alpha', () => {
expect(rgbToHex('rgba(12,34,56,0)')).toEqual('#0c2238');
});
it('should handle decimal zero alpha', () => {
expect(rgbToHex('rgba(12,34,56,0.0000)')).toEqual('#0c2238');
});
});
});
| {
"pile_set_name": "Github"
} |
fileFormatVersion: 2
guid: 782df8531fabddc4ea7773901c4b4e6b
NativeFormatImporter:
externalObjects: {}
mainObjectFileID: 2100000
userData:
assetBundleName:
assetBundleVariant:
| {
"pile_set_name": "Github"
} |
import Vue from 'vue';
let scrollBarWidth;
export default function() {
if (Vue.prototype.$isServer) return 0;
if (scrollBarWidth !== undefined) return scrollBarWidth;
const outer = document.createElement('div');
outer.className = 'el-scrollbar__wrap';
outer.style.visibility = 'hidden';
outer.style.width = '100px';
outer.style.position = 'absolute';
outer.style.top = '-9999px';
document.body.appendChild(outer);
const widthNoScroll = outer.offsetWidth;
outer.style.overflow = 'scroll';
const inner = document.createElement('div');
inner.style.width = '100%';
outer.appendChild(inner);
const widthWithScroll = inner.offsetWidth;
outer.parentNode.removeChild(outer);
scrollBarWidth = widthNoScroll - widthWithScroll;
return scrollBarWidth;
};
| {
"pile_set_name": "Github"
} |
<?xml version="1.0" encoding="utf-8"?>
<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<ItemGroup>
<Filter Include="Resources">
<UniqueIdentifier>5c145889-00bb-46c0-b5f2-6a3c79eb517c</UniqueIdentifier>
<Extensions>rc;ico;cur;bmp;dlg;rc2;rct;bin;rgs;gif;jpg;jpeg;jpe;resx;tiff;tif;png;wav;mfcribbon-ms</Extensions>
</Filter>
</ItemGroup>
<ItemGroup>
<ClCompile Include="pch.cpp" />
<ClCompile Include="Class1.cpp" />
</ItemGroup>
<ItemGroup>
<ClInclude Include="pch.h" />
<ClInclude Include="Class1.h" />
</ItemGroup>
</Project> | {
"pile_set_name": "Github"
} |
define(["dojo/_base/lang", "./_base"], function(lang, validate){
validate.isValidIsbn = function(/* String */value) {
// summary:
// Validate ISBN-10 or ISBN-13 based on the length of value
// value: String
// An ISBN to validate
// returns: Boolean
var len, sum = 0, weight;
if(!lang.isString(value)){
value = String(value);
}
value = value.replace(/[- ]/g,''); //ignore dashes and whitespaces
len = value.length;
switch(len){
case 10:
weight = len;
// ISBN-10 validation algorithm
for(var i = 0; i < 9; i++){
sum += parseInt(value.charAt(i)) * weight;
weight--;
}
var t = value.charAt(9).toUpperCase();
sum += t == 'X' ? 10 : parseInt(t);
return sum % 11 == 0; // Boolean
break;
case 13:
weight = -1;
for(var i = 0; i< len; i++){
sum += parseInt(value.charAt(i)) * (2 + weight);
weight *= -1;
}
return sum % 10 == 0; // Boolean
break;
}
return false;
};
return validate.isValidIsbn;
});
| {
"pile_set_name": "Github"
} |
#!/usr/bin/perl
#
# hp3585b-control.pl
# version 0.8 -- 23 June 2003
#
# Basic control of HP 3585B Spectrum Analyzer
#
# Copyright 2003 by John R. Ackermann N8UR ([email protected])
# Licensed under the GPL version 2 or later; see the file COPYING
# included with this distribution. I request, but do not require, that
# any modifications that correct bugs or errors, or increase the program's
# functionality, be sent via email to the author at the address above.
#
# Current status:
#
# Version 0.8 -- No code changes, but now licensed under the GPL.
#
# Version 0.7 -- basic functionality seems to work OK, with (very) modest
# input validation.
#----------
use strict;
use POSIX qw(setsid);
use Getopt::Std;
use Time::HiRes qw(usleep time gettimeofday);
use LinuxGpib;
use GD;
use GD::Text;
use GD::Graph::mixed;
use GD::Graph::colour;
use Number::Format;
use n8ur qw(trim squash parse_value);
use hp3585b qw(get_settings fix_freq fix_db fix_sweep);
my $device = "hp3585b";
my $j;
my $command;
my $reading;
my $counter = 1;
my $gpib_status;
my $tmp_val;
my $tmp_suffix;
#----------
# usage and option initialization
my $opt_string = 'hpc:d:s:l:a:r:v:t:b:';
sub usage() {
print STDERR << "EOF";
usage: $0 [-p] [-c center freq] [-s span] [-l ref level] [-a range]
[-r rbw] [-v vbw] [t sweep time] [-b db/div] [-h help] [-d device]
This program provides basic control of an HP 8569B spectrum analyzer and
displays current settings.
Use value suffixes of Hz, kHz, MHz, dB, dBm, dBv as appropriate.
If not specified, params are unchanged. Default device is "hp3585b".
"-p" does an instrument preset
EOF
}
#----------------------
getopts( "$opt_string", \my %opt ) or usage() and exit;
usage() and exit if $opt{h};
my $preset = 0;
if ($opt{p}) {
$preset = 1;
}
if ($opt{d}) {
$device = $opt{d};
}
my $range = "";
if ($opt{a}) {
my $tmp = trim($opt{a});
if ($tmp eq "-25") {$range = "AR0R01" };
if ($tmp eq "-20") {$range = "AR0R02" };
if ($tmp eq "-15") {$range = "AR0R03" };
if ($tmp eq "-10") {$range = "AR0R04" };
if (($tmp eq "-5") || ($tmp eq "-05")) {$range = "AR0R05" };
if ($tmp eq "0") {$range = "AR0R06" };
if (($tmp eq "5") || ($tmp eq "05")) {$range = "AR0R07" };
if ($tmp eq "10") {$range = "AR0R08" };
if ($tmp eq "15") {$range = "AR0R09" };
if ($tmp eq "20") {$range = "AR0R10" };
if ($tmp eq "25") {$range = "AR0R11" };
if ($tmp eq "30") {$range = "AR0R12" };
}
my $db_div = "";
if ($opt{b}) {
$tmp_val = fix_db($opt{b});
if ( ($tmp_val ne "10") &&
($tmp_val ne "5") &&
($tmp_val ne "2") &&
($tmp_val ne "1") ) { die "Invalid dB/div value!"}
else {
$db_div = "DD" . $tmp_val . "DB";
}
}
my $cf = "";
if ($opt{c}) {
$tmp_val = fix_freq($opt{c});
if ($tmp_val eq "") { die "Invalid Center Frequency value!"}
else { $cf = "CF" . $tmp_val; }
}
my $fs = "";
if ($opt{s}) {
$tmp_val = fix_freq($opt{s});
if ($tmp_val eq "") { die "Invalid Span value!"}
else { $fs = "FS" . $tmp_val; }
}
my $rbw = "";
if ($opt{r}) {
$tmp_val = fix_freq($opt{r});
if ($tmp_val eq "") { die "Invalid RBW value!"}
else { $rbw = "RB" . $tmp_val; }
}
my $vbw = "";
if ($opt{v}) {
$tmp_val = fix_freq($opt{v});
if ($tmp_val eq "") { die "Invalid RBW value!"}
else { $rbw = "VB" . $tmp_val; }
}
my $rl = "";
if ($opt{l}) {
$tmp_val = fix_db($opt{l});
if ($tmp_val eq "") { die "Invalid ref level value!"}
else { $rbw = "RL" . $tmp_val; }
}
my $t = "";
if ($opt{t}) {
$tmp_val = fix_sweep($opt{t});
if ($tmp_val eq "") { die "Invalid sweep time value!"}
else { $rbw = "ST" . $tmp_val; }
}
#----------
# initialize instrument
my $dev = LinuxGpib::ibfind($device) ||
die "Can't open device $device!\n";
#----------
# Send command
if ($preset) {
$command = "PO0";
}
else {
$command = "AC0" . $cf . $fs . $rbw . $rl . $range . $db_div . "AC1";
}
LinuxGpib::ibwrt($dev,$command,length($command));
sleep 3;
#----------
# get instrument state
my ($start_freq,$center_freq,$stop_freq,$bin_freq,$top_right_pre,
$top_right_val,$top_right_suf,$marker_freq,$marker_val,$rbw_val,
$vbw_val,$db_div_val,$sweep_val,$sweep_suf,$range_val,$range_suf,
$ref_val,$ref_suf,$ref_bottom,$span_val) = get_settings($dev);
#----------
# create annotation strings for plotting
my $numfmt = new Number::Format(-thousands_sep => ' ', -decimal_point => '.');
my $annotation1= $top_right_pre . ": " .
$numfmt->format_number($top_right_val) . " Hz, " .
$marker_val . " " . $ref_suf;
my $annotation2=
"RBW: " . $rbw_val . " Hz VBW: " . $vbw_val . " Hz Scale: " .
$db_div_val . " dB/ Sweep: " . $sweep_val . " Sec. Range: " .
$range_val . " " . $range_suf;
print "\n";
print "Center: " . $numfmt->format_number($center_freq) .
" Hz Span: " . $numfmt->format_number(squash($span_val)) .
" Hz (" . squash($span_val)/10 . " Hz/)\n";
print "Start: " . $numfmt->format_number($start_freq) . " Hz Stop: " .
$numfmt->format_number($stop_freq) . " Hz\n\n";
print $annotation1,"\n\n";
print "Ref. Level: " . $ref_val . $ref_suf . "\n";
print $annotation2,"\n\n";
| {
"pile_set_name": "Github"
} |
/*
CryptoJS v3.1.2
code.google.com/p/crypto-js
(c) 2009-2013 by Jeff Mott. All rights reserved.
code.google.com/p/crypto-js/wiki/License
*/
(function () {
// Shortcuts
var C = CryptoJS;
var C_lib = C.lib;
var Base = C_lib.Base;
var C_enc = C.enc;
var Utf8 = C_enc.Utf8;
var C_algo = C.algo;
/**
* HMAC algorithm.
*/
var HMAC = C_algo.HMAC = Base.extend({
/**
* Initializes a newly created HMAC.
*
* @param {Hasher} hasher The hash algorithm to use.
* @param {WordArray|string} key The secret key.
*
* @example
*
* var hmacHasher = CryptoJS.algo.HMAC.create(CryptoJS.algo.SHA256, key);
*/
init: function (hasher, key) {
// Init hasher
hasher = this._hasher = new hasher.init();
// Convert string to WordArray, else assume WordArray already
if (typeof key == 'string') {
key = Utf8.parse(key);
}
// Shortcuts
var hasherBlockSize = hasher.blockSize;
var hasherBlockSizeBytes = hasherBlockSize * 4;
// Allow arbitrary length keys
if (key.sigBytes > hasherBlockSizeBytes) {
key = hasher.finalize(key);
}
// Clamp excess bits
key.clamp();
// Clone key for inner and outer pads
var oKey = this._oKey = key.clone();
var iKey = this._iKey = key.clone();
// Shortcuts
var oKeyWords = oKey.words;
var iKeyWords = iKey.words;
// XOR keys with pad constants
for (var i = 0; i < hasherBlockSize; i++) {
oKeyWords[i] ^= 0x5c5c5c5c;
iKeyWords[i] ^= 0x36363636;
}
oKey.sigBytes = iKey.sigBytes = hasherBlockSizeBytes;
// Set initial values
this.reset();
},
/**
* Resets this HMAC to its initial state.
*
* @example
*
* hmacHasher.reset();
*/
reset: function () {
// Shortcut
var hasher = this._hasher;
// Reset
hasher.reset();
hasher.update(this._iKey);
},
/**
* Updates this HMAC with a message.
*
* @param {WordArray|string} messageUpdate The message to append.
*
* @return {HMAC} This HMAC instance.
*
* @example
*
* hmacHasher.update('message');
* hmacHasher.update(wordArray);
*/
update: function (messageUpdate) {
this._hasher.update(messageUpdate);
// Chainable
return this;
},
/**
* Finalizes the HMAC computation.
* Note that the finalize operation is effectively a destructive, read-once operation.
*
* @param {WordArray|string} messageUpdate (Optional) A final message update.
*
* @return {WordArray} The HMAC.
*
* @example
*
* var hmac = hmacHasher.finalize();
* var hmac = hmacHasher.finalize('message');
* var hmac = hmacHasher.finalize(wordArray);
*/
finalize: function (messageUpdate) {
// Shortcut
var hasher = this._hasher;
// Compute HMAC
var innerHash = hasher.finalize(messageUpdate);
hasher.reset();
var hmac = hasher.finalize(this._oKey.clone().concat(innerHash));
return hmac;
}
});
}());
| {
"pile_set_name": "Github"
} |
'use strict';
/**
* MongoDB configuration using generators (with the help of co-mongo package).
* You can require this config file in your controllers and start using named collections directly.
* See /controllers directory for sample usage.
*/
var mongodb = require('mongodb'),
connect = mongodb.connect,
config = require('./config');
// extending and exposing top co-mongo namespace like this is not optimal but it saves the user from one extra require();
module.exports = mongodb;
/**
* Opens a new connection to the mongo database, closing the existing one if exists.
*/
mongodb.connect = async function () {
if (mongodb.db) {
await mongodb.db.close();
}
// export mongo db instance
var db = mongodb.db = await connect(config.mongo.url);
// export default collections
mongodb.counters = db.collection('counters');
mongodb.users = db.collection('users');
mongodb.posts = db.collection('posts');
};
/**
* Retrieves the next sequence number for the given counter (indicated by @counterName).
* Useful for generating sequential integer IDs for certain collections (i.e. user collection).
*/
mongodb.getNextSequence = async function (counterName) {
var results = await mongodb.counters.findOneAndUpdate(
{_id: counterName},
{$inc: {seq: 1}},
{returnOriginal: false}
);
return results.value.seq
}; | {
"pile_set_name": "Github"
} |
package edu.stanford.nlp.stats;
import junit.framework.TestCase;
/**
*
* @author lmthang
*
*/
public class DistributionTest extends TestCase {
public void testGetDistributionFromLogValues(){
Counter<String> c1 = new ClassicCounter<>();
c1.setCount("p", 1.0);
c1.setCount("q", 2.0);
c1.setCount("r", 3.0);
c1.setCount("s", 4.0);
// take log
Counters.logInPlace(c1);
// now call distribution
Distribution<String> distribution = Distribution.getDistributionFromLogValues(c1);
// test
assertEquals(distribution.keySet().size(), 4); // size
// keys
assertEquals(distribution.containsKey("p"), true);
assertEquals(distribution.containsKey("q"), true);
assertEquals(distribution.containsKey("r"), true);
assertEquals(distribution.containsKey("s"), true);
// values
assertEquals(distribution.getCount("p"), 1.0E-1, 1E-10);
assertEquals(distribution.getCount("q"), 2.0E-1, 1E-10);
assertEquals(distribution.getCount("r"), 3.0E-1, 1E-10);
assertEquals(distribution.getCount("s"), 4.0E-1, 1E-10);
}
}
| {
"pile_set_name": "Github"
} |
debug printing
printActivationsOf: aMethodObj
"Scan the heap printing the oops of any and all contexts that refer to anOop"
| oop |
<api>
oop := self firstAccessibleObject.
[oop = nil] whileFalse:
[((self isContextNonImm: oop)
and: [aMethodObj = (self fetchPointer: MethodIndex ofObject: oop)]) ifTrue:
[self interpreter
printHex: oop; space; printOopShort: oop; print: ' pc ';
printHex: (self fetchPointer: InstructionPointerIndex ofObject: oop); cr].
oop := self accessibleObjectAfter: oop] | {
"pile_set_name": "Github"
} |
// go generate gen.go
// GENERATED BY THE COMMAND ABOVE; DO NOT EDIT
package ipv4
// Internet Control Message Protocol (ICMP) Parameters, Updated: 2013-04-19
const (
ICMPTypeEchoReply ICMPType = 0 // Echo Reply
ICMPTypeDestinationUnreachable ICMPType = 3 // Destination Unreachable
ICMPTypeRedirect ICMPType = 5 // Redirect
ICMPTypeEcho ICMPType = 8 // Echo
ICMPTypeRouterAdvertisement ICMPType = 9 // Router Advertisement
ICMPTypeRouterSolicitation ICMPType = 10 // Router Solicitation
ICMPTypeTimeExceeded ICMPType = 11 // Time Exceeded
ICMPTypeParameterProblem ICMPType = 12 // Parameter Problem
ICMPTypeTimestamp ICMPType = 13 // Timestamp
ICMPTypeTimestampReply ICMPType = 14 // Timestamp Reply
ICMPTypePhoturis ICMPType = 40 // Photuris
)
// Internet Control Message Protocol (ICMP) Parameters, Updated: 2013-04-19
var icmpTypes = map[ICMPType]string{
0: "echo reply",
3: "destination unreachable",
5: "redirect",
8: "echo",
9: "router advertisement",
10: "router solicitation",
11: "time exceeded",
12: "parameter problem",
13: "timestamp",
14: "timestamp reply",
40: "photuris",
}
| {
"pile_set_name": "Github"
} |
// Package S016 defines an Analyzer that checks for
// Schema including Set without TypeSet
package S016
import (
"go/ast"
"golang.org/x/tools/go/analysis"
"github.com/bflad/tfproviderlint/helper/terraformtype/helper/schema"
"github.com/bflad/tfproviderlint/passes/commentignore"
"github.com/bflad/tfproviderlint/passes/helper/schema/schemainfo"
)
const Doc = `check for Schema including Set without TypeSet
The S016 analyzer reports cases of schema including Set without TypeSet,
which will fail schema validation.`
const analyzerName = "S016"
var Analyzer = &analysis.Analyzer{
Name: analyzerName,
Doc: Doc,
Requires: []*analysis.Analyzer{
schemainfo.Analyzer,
commentignore.Analyzer,
},
Run: run,
}
func run(pass *analysis.Pass) (interface{}, error) {
ignorer := pass.ResultOf[commentignore.Analyzer].(*commentignore.Ignorer)
schemaInfos := pass.ResultOf[schemainfo.Analyzer].([]*schema.SchemaInfo)
for _, schemaInfo := range schemaInfos {
if ignorer.ShouldIgnore(analyzerName, schemaInfo.AstCompositeLit) {
continue
}
if !schemaInfo.DeclaresField(schema.SchemaFieldSet) {
continue
}
if schemaInfo.IsType(schema.SchemaValueTypeSet) {
continue
}
switch t := schemaInfo.AstCompositeLit.Type.(type) {
default:
pass.Reportf(schemaInfo.AstCompositeLit.Lbrace, "%s: schema Set should only be included for TypeSet", analyzerName)
case *ast.SelectorExpr:
pass.Reportf(t.Sel.Pos(), "%s: schema Set should only be included for TypeSet", analyzerName)
}
}
return nil, nil
}
| {
"pile_set_name": "Github"
} |
<template>
<div>
<span></span>
</div>
</template>
| {
"pile_set_name": "Github"
} |
.\" $OpenBSD: ssh-pkcs11-helper.8,v 1.3 2010/02/10 23:20:38 markus Exp $
.\"
.\" Copyright (c) 2010 Markus Friedl. All rights reserved.
.\"
.\" Permission to use, copy, modify, and distribute this software for any
.\" purpose with or without fee is hereby granted, provided that the above
.\" copyright notice and this permission notice appear in all copies.
.\"
.\" THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
.\" WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
.\" MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
.\" ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
.\" WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
.\" ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
.\" OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
.\"
.Dd $Mdocdate: February 10 2010 $
.Dt SSH-PKCS11-HELPER 8
.Os
.Sh NAME
.Nm ssh-pkcs11-helper
.Nd ssh-agent helper program for PKCS#11 support
.Sh SYNOPSIS
.Nm
.Sh DESCRIPTION
.Nm
is used by
.Xr ssh-agent 1
to access keys provided by a PKCS#11 token.
.Pp
.Nm
is not intended to be invoked by the user, but from
.Xr ssh-agent 1 .
.Sh SEE ALSO
.Xr ssh 1 ,
.Xr ssh-add 1 ,
.Xr ssh-agent 1
.Sh HISTORY
.Nm
first appeared in
.Ox 4.7 .
.Sh AUTHORS
.An Markus Friedl Aq [email protected]
| {
"pile_set_name": "Github"
} |
// NeL - MMORPG Framework <http://dev.ryzom.com/projects/nel/>
// Copyright (C) 2010 Winch Gate Property Limited
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as
// published by the Free Software Foundation, either version 3 of the
// License, or (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
#ifndef NL_PS_ITERATOR_H
#define NL_PS_ITERATOR_H
#include "nel/3d/animation_time.h"
namespace NL3D
{
#ifdef NL_OS_WINDOWS
#define GET_INLINE __forceinline
#else
#define GET_INLINE
#endif
/** Class that manage iterator progression with a step of 1
* We assume that T::value_type gives the operator* return type
* T is the type of the iterator.
* NOTE: this iterator is not intended to serves with STL algo, it doesn't support all the needed features,
* ++ and * operator are provided as syntaxic sugars..
* This iterator is READ-ONLY
* TP is the type pointed by T
*/
template <typename T, typename PT>
struct CAdvance1Iterator
{
T It;
CAdvance1Iterator() {}
/// create this iterator from the start of a container, and a given position (expressed in fixed point)
CAdvance1Iterator(T it, uint32 index = 0)
{
It = it + index;
}
const PT &get() const { return *It; }
void advance() { ++It; }
void advance(uint numSteps)
{
It = It + numSteps;
}
CAdvance1Iterator &operator++() { advance(); return *this; }
CAdvance1Iterator operator++(int) { CAdvance1Iterator tmp = this; advance(); return tmp; }
const PT &operator * () const { return get(); }
/// const T &operator -> () const { return It; }
/// return the step in a 16:16 format
bool operator==(const CAdvance1Iterator &other) const { return other.It == It; }
bool operator!=(const CAdvance1Iterator &other) const { return !(*this == other); }
CAdvance1Iterator operator+(sint quantity) { return CAdvance1Iterator(It + quantity); }
CAdvance1Iterator &operator+=(sint quantity) { It = It + quantity; return *this; }
};
/** Class that manage iterator progression with a step of 16:16 in fixed point
* We assume that T::value_type gives the operator* return type
* T is the type of the iterator
* This iterator is READ-ONLY
* NOTE: this iterator is not intended to serves with STL algo, it doesn't support all the needed features,
* ++ and * operator are provided as syntaxic sugars..
*/
template<typename T, typename PT>
struct CAdvance1616Iterator
{
T It;
uint32 CurrPos;
uint32 Step;
CAdvance1616Iterator() {}
/// create this iterator from the start of a container, and a given position (expressed in fixed point)
CAdvance1616Iterator(T it, uint32 index, uint32 step)
{
It = it;
CurrPos = index * step;
Step = step;
}
const PT &get() const { return *(It + (CurrPos >> 16)); }
void advance() { CurrPos += Step; }
void advance(uint numSteps)
{
CurrPos = CurrPos + numSteps * Step;
}
const PT &operator * () const { return get(); }
/// T operator -> () const { return It + (CurrPos >> 16); }
CAdvance1616Iterator &operator++() { advance(); return *this; }
CAdvance1616Iterator operator++(int) { CAdvance1616Iterator tmp = this; advance(); return tmp; }
bool operator==(const CAdvance1616Iterator &other) const
{
#ifdef _DEBUG
nlassert(other.It == It);
nlassert(other.Step == Step);
#endif
return other.CurrPos == CurrPos;
}
bool operator!=(const CAdvance1616Iterator &other) const { return !(*this == other); }
CAdvance1616Iterator operator+(sint quantity)
{
CAdvance1616Iterator res;
res.It = It;
res.CurrPos = CurrPos + Step * quantity;
res.Step = Step;
return res;
}
CAdvance1616Iterator &operator+=(sint quantity)
{
CurrPos += quantity * Step;
return *this;
}
};
//////////////////////////////////////////////////
//////////////////////////////////////////////////
/// Some typedefs
typedef CAdvance1Iterator<TPSAttribFloat::const_iterator, float> TIteratorFloatStep1;
typedef CAdvance1Iterator<TPSAttribFloat::const_iterator, TAnimationTime> TIteratorFloatStep1;
typedef CAdvance1Iterator<TPSAttribVector::const_iterator, NLMISC::CVector> TIteratorVectStep1;
typedef CAdvance1616Iterator<TPSAttribFloat::const_iterator, float> TIteratorFloatStep1616;
typedef CAdvance1616Iterator<TPSAttribFloat::const_iterator, TAnimationTime> TIteratorTimeStep1616;
typedef CAdvance1616Iterator<TPSAttribVector::const_iterator, NLMISC::CVector> TIteratorVectStep1616;
} // NL3D
#endif
| {
"pile_set_name": "Github"
} |
/*
* Copyright (c) 2017, 2018 Oracle and/or its affiliates. All rights reserved.
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License v. 2.0, which is available at
* http://www.eclipse.org/legal/epl-2.0.
*
* This Source Code may also be made available under the following Secondary
* Licenses when the conditions for such availability set forth in the
* Eclipse Public License v. 2.0 are satisfied: GNU General Public License,
* version 2 with the GNU Classpath Exception, which is available at
* https://www.gnu.org/software/classpath/license.html.
*
* SPDX-License-Identifier: EPL-2.0 OR GPL-2.0 WITH Classpath-exception-2.0
*/
package com.sun.s1asdev.jdbc.markconnectionasbad.local.ejb;
import jakarta.ejb.*;
import java.rmi.*;
public interface SimpleBMPHome
extends EJBHome {
SimpleBMP create()
throws RemoteException, CreateException;
}
| {
"pile_set_name": "Github"
} |
/**
* Copyright IBM Corp. 2016, 2018
*
* This source code is licensed under the Apache-2.0 license found in the
* LICENSE file in the root directory of this source tree.
*/
import React from 'react';
import {
StructuredListWrapper,
StructuredListHead,
StructuredListInput,
StructuredListBody,
StructuredListRow,
StructuredListCell,
} from '../StructuredList';
import { mount, shallow } from 'enzyme';
describe('StructuredListWrapper', () => {
describe('Renders as expected', () => {
const wrapper = shallow(
<StructuredListWrapper className="extra-class">hi</StructuredListWrapper>
);
it('should have the expected classes', () => {
expect(wrapper.hasClass('bx--structured-list')).toEqual(true);
});
it('Should add extra classes that are passed via className', () => {
expect(wrapper.hasClass('extra-class')).toEqual(true);
});
it('By default, border prop is false', () => {
wrapper.setProps({ border: false });
expect(wrapper.hasClass('bx--structured-list--border')).toEqual(false);
});
it('By default, selection prop is false', () => {
wrapper.setProps({ border: false });
expect(wrapper.hasClass('bx--structured-list--selection')).toEqual(false);
});
it('Should add the modifier class for border when border prop is true', () => {
wrapper.setProps({ border: true });
expect(wrapper.hasClass('bx--structured-list--border')).toEqual(true);
});
it('Should add the modifier class for selection when selection prop is true', () => {
wrapper.setProps({ selection: true });
expect(wrapper.hasClass('bx--structured-list--selection')).toEqual(true);
});
});
});
describe('StructuredListHead', () => {
describe('Renders as expected', () => {
const wrapper = shallow(
<StructuredListHead className="extra-class">hi</StructuredListHead>
);
it('should have the expected classes', () => {
expect(wrapper.hasClass('bx--structured-list-thead')).toEqual(true);
});
it('Should add extra classes that are passed via className', () => {
expect(wrapper.hasClass('extra-class')).toEqual(true);
});
it('Should accept other props from ...other', () => {
const wrapperProps = shallow(
<StructuredListHead title="title">hi</StructuredListHead>
);
expect(wrapperProps.props().title).toEqual('title');
});
});
});
describe('StructuredListInput', () => {
describe('Renders as expected', () => {
const wrapper = shallow(<StructuredListInput className="extra-class" />);
it('should have the expected classes', () => {
expect(wrapper.hasClass('bx--structured-list-input')).toEqual(true);
});
it('Should add extra classes that are passed via className', () => {
expect(wrapper.hasClass('extra-class')).toEqual(true);
});
it('Should accept other props from ...other', () => {
const wrapperProps = shallow(<StructuredListInput title="title" />);
expect(wrapperProps.props().title).toEqual('title');
});
it('Should render unique id with multiple inputs when no id prop is given', () => {
const wrapper1 = mount(<StructuredListInput className="extra-class" />);
const wrapper2 = mount(<StructuredListInput className="extra-class" />);
expect(wrapper1.instance().uid).not.toEqual(wrapper2.instance().uid);
});
});
});
describe('StructuredListRow', () => {
describe('Renders as expected', () => {
const wrapper = shallow(<StructuredListRow className="extra-class" />);
it('should have the expected classes', () => {
expect(wrapper.hasClass('bx--structured-list-row')).toEqual(true);
});
it('Should add extra classes that are passed via className', () => {
expect(wrapper.hasClass('extra-class')).toEqual(true);
});
it('should use correct class when head prop is true', () => {
wrapper.setProps({ head: true });
expect(wrapper.hasClass('bx--structured-list-row--header-row')).toEqual(
true
);
});
it('should use <div> HTML by default (or when label prop is false)', () => {
const wrapperLabel = shallow(<StructuredListRow />);
expect(wrapperLabel.getElement().type).toEqual('div');
});
it('should use <label> HTML when label prop is true', () => {
const wrapperLabel = shallow(<StructuredListRow label />);
expect(wrapperLabel.getElement().type).toEqual('label');
});
it('Should accept other props from ...other', () => {
const wrapperProps = shallow(
<StructuredListRow title="title">hi</StructuredListRow>
);
expect(wrapperProps.props().title).toEqual('title');
});
});
});
describe('StructuredListBody', () => {
describe('Renders as expected', () => {
const wrapper = shallow(
<StructuredListBody className="extra-class">hi</StructuredListBody>
);
it('should have the expected classes', () => {
expect(wrapper.hasClass('bx--structured-list-tbody')).toEqual(true);
});
it('Should add extra classes that are passed via className', () => {
expect(wrapper.hasClass('extra-class')).toEqual(true);
});
it('Should accept other props from ...other', () => {
const wrapperProps = shallow(
<StructuredListBody title="title">hi</StructuredListBody>
);
expect(wrapperProps.props().title).toEqual('title');
});
});
});
describe('StructuredListCell', () => {
describe('Renders as expected', () => {
const wrapper = shallow(
<StructuredListCell className="extra-class">hi</StructuredListCell>
);
it('Should add extra classes that are passed via className', () => {
expect(wrapper.hasClass('extra-class')).toEqual(true);
});
it('should have the expected classes', () => {
expect(wrapper.hasClass('bx--structured-list-td')).toEqual(true);
});
it('should use correct class when head prop is true', () => {
wrapper.setProps({ head: true });
expect(wrapper.hasClass('bx--structured-list-th')).toEqual(true);
});
it('should use correct class when noWrap prop is true', () => {
wrapper.setProps({ noWrap: true });
expect(wrapper.hasClass('bx--structured-list-content--nowrap')).toEqual(
true
);
});
it('Should accept other props from ...other', () => {
const wrapperProps = shallow(
<StructuredListCell title="title">hi</StructuredListCell>
);
expect(wrapperProps.props().title).toEqual('title');
});
});
});
| {
"pile_set_name": "Github"
} |
**string_decoder.js** (`require('string_decoder')`) from Node.js core
Copyright Joyent, Inc. and other Node contributors. See LICENCE file for details.
Version numbers match the versions found in Node core, e.g. 0.10.24 matches Node 0.10.24, likewise 0.11.10 matches Node 0.11.10. **Prefer the stable version over the unstable.**
The *build/* directory contains a build script that will scrape the source from the [joyent/node](https://github.com/joyent/node) repo given a specific Node version. | {
"pile_set_name": "Github"
} |
import React from 'react'
import {Button, View, StyleSheet, Text} from 'react-native'
import Ionicons from 'react-native-vector-icons/Ionicons'
export default class SettingsScreen extends React.Component {
static navigationOptions = {
tabBarIcon: ({focused, tintColor}) => (
<Ionicons name={`ios-options${focused ? '' : '-outline'}`} size={25} color={tintColor} />
),
}
render() {
return (
<View style={styles.container}>
<Text style={styles.text}>Settings coming soon.</Text>
</View>
)
}
}
const styles = StyleSheet.create({
container: {
justifyContent: 'center',
flex: 1,
},
text: {
textAlign: 'center',
},
})
| {
"pile_set_name": "Github"
} |
/*
* Copyright 2014 Advanced Micro Devices, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include <linux/bsearch.h>
#include <linux/pci.h>
#include <linux/slab.h>
#include "kfd_priv.h"
#include "kfd_device_queue_manager.h"
#include "kfd_pm4_headers_vi.h"
#include "cwsr_trap_handler.h"
#include "kfd_iommu.h"
#define MQD_SIZE_ALIGNED 768
/*
* kfd_locked is used to lock the kfd driver during suspend or reset
* once locked, kfd driver will stop any further GPU execution.
* create process (open) will return -EAGAIN.
*/
static atomic_t kfd_locked = ATOMIC_INIT(0);
#ifdef KFD_SUPPORT_IOMMU_V2
static const struct kfd_device_info kaveri_device_info = {
.asic_family = CHIP_KAVERI,
.max_pasid_bits = 16,
/* max num of queues for KV.TODO should be a dynamic value */
.max_no_of_hqd = 24,
.doorbell_size = 4,
.ih_ring_entry_size = 4 * sizeof(uint32_t),
.event_interrupt_class = &event_interrupt_class_cik,
.num_of_watch_points = 4,
.mqd_size_aligned = MQD_SIZE_ALIGNED,
.supports_cwsr = false,
.needs_iommu_device = true,
.needs_pci_atomics = false,
.num_sdma_engines = 2,
};
static const struct kfd_device_info carrizo_device_info = {
.asic_family = CHIP_CARRIZO,
.max_pasid_bits = 16,
/* max num of queues for CZ.TODO should be a dynamic value */
.max_no_of_hqd = 24,
.doorbell_size = 4,
.ih_ring_entry_size = 4 * sizeof(uint32_t),
.event_interrupt_class = &event_interrupt_class_cik,
.num_of_watch_points = 4,
.mqd_size_aligned = MQD_SIZE_ALIGNED,
.supports_cwsr = true,
.needs_iommu_device = true,
.needs_pci_atomics = false,
.num_sdma_engines = 2,
};
static const struct kfd_device_info raven_device_info = {
.asic_family = CHIP_RAVEN,
.max_pasid_bits = 16,
.max_no_of_hqd = 24,
.doorbell_size = 8,
.ih_ring_entry_size = 8 * sizeof(uint32_t),
.event_interrupt_class = &event_interrupt_class_v9,
.num_of_watch_points = 4,
.mqd_size_aligned = MQD_SIZE_ALIGNED,
.supports_cwsr = true,
.needs_iommu_device = true,
.needs_pci_atomics = true,
.num_sdma_engines = 1,
};
#endif
static const struct kfd_device_info hawaii_device_info = {
.asic_family = CHIP_HAWAII,
.max_pasid_bits = 16,
/* max num of queues for KV.TODO should be a dynamic value */
.max_no_of_hqd = 24,
.doorbell_size = 4,
.ih_ring_entry_size = 4 * sizeof(uint32_t),
.event_interrupt_class = &event_interrupt_class_cik,
.num_of_watch_points = 4,
.mqd_size_aligned = MQD_SIZE_ALIGNED,
.supports_cwsr = false,
.needs_iommu_device = false,
.needs_pci_atomics = false,
.num_sdma_engines = 2,
};
static const struct kfd_device_info tonga_device_info = {
.asic_family = CHIP_TONGA,
.max_pasid_bits = 16,
.max_no_of_hqd = 24,
.doorbell_size = 4,
.ih_ring_entry_size = 4 * sizeof(uint32_t),
.event_interrupt_class = &event_interrupt_class_cik,
.num_of_watch_points = 4,
.mqd_size_aligned = MQD_SIZE_ALIGNED,
.supports_cwsr = false,
.needs_iommu_device = false,
.needs_pci_atomics = true,
.num_sdma_engines = 2,
};
static const struct kfd_device_info tonga_vf_device_info = {
.asic_family = CHIP_TONGA,
.max_pasid_bits = 16,
.max_no_of_hqd = 24,
.doorbell_size = 4,
.ih_ring_entry_size = 4 * sizeof(uint32_t),
.event_interrupt_class = &event_interrupt_class_cik,
.num_of_watch_points = 4,
.mqd_size_aligned = MQD_SIZE_ALIGNED,
.supports_cwsr = false,
.needs_iommu_device = false,
.needs_pci_atomics = false,
.num_sdma_engines = 2,
};
static const struct kfd_device_info fiji_device_info = {
.asic_family = CHIP_FIJI,
.max_pasid_bits = 16,
.max_no_of_hqd = 24,
.doorbell_size = 4,
.ih_ring_entry_size = 4 * sizeof(uint32_t),
.event_interrupt_class = &event_interrupt_class_cik,
.num_of_watch_points = 4,
.mqd_size_aligned = MQD_SIZE_ALIGNED,
.supports_cwsr = true,
.needs_iommu_device = false,
.needs_pci_atomics = true,
.num_sdma_engines = 2,
};
static const struct kfd_device_info fiji_vf_device_info = {
.asic_family = CHIP_FIJI,
.max_pasid_bits = 16,
.max_no_of_hqd = 24,
.doorbell_size = 4,
.ih_ring_entry_size = 4 * sizeof(uint32_t),
.event_interrupt_class = &event_interrupt_class_cik,
.num_of_watch_points = 4,
.mqd_size_aligned = MQD_SIZE_ALIGNED,
.supports_cwsr = true,
.needs_iommu_device = false,
.needs_pci_atomics = false,
.num_sdma_engines = 2,
};
static const struct kfd_device_info polaris10_device_info = {
.asic_family = CHIP_POLARIS10,
.max_pasid_bits = 16,
.max_no_of_hqd = 24,
.doorbell_size = 4,
.ih_ring_entry_size = 4 * sizeof(uint32_t),
.event_interrupt_class = &event_interrupt_class_cik,
.num_of_watch_points = 4,
.mqd_size_aligned = MQD_SIZE_ALIGNED,
.supports_cwsr = true,
.needs_iommu_device = false,
.needs_pci_atomics = true,
.num_sdma_engines = 2,
};
static const struct kfd_device_info polaris10_vf_device_info = {
.asic_family = CHIP_POLARIS10,
.max_pasid_bits = 16,
.max_no_of_hqd = 24,
.doorbell_size = 4,
.ih_ring_entry_size = 4 * sizeof(uint32_t),
.event_interrupt_class = &event_interrupt_class_cik,
.num_of_watch_points = 4,
.mqd_size_aligned = MQD_SIZE_ALIGNED,
.supports_cwsr = true,
.needs_iommu_device = false,
.needs_pci_atomics = false,
.num_sdma_engines = 2,
};
static const struct kfd_device_info polaris11_device_info = {
.asic_family = CHIP_POLARIS11,
.max_pasid_bits = 16,
.max_no_of_hqd = 24,
.doorbell_size = 4,
.ih_ring_entry_size = 4 * sizeof(uint32_t),
.event_interrupt_class = &event_interrupt_class_cik,
.num_of_watch_points = 4,
.mqd_size_aligned = MQD_SIZE_ALIGNED,
.supports_cwsr = true,
.needs_iommu_device = false,
.needs_pci_atomics = true,
.num_sdma_engines = 2,
};
static const struct kfd_device_info vega10_device_info = {
.asic_family = CHIP_VEGA10,
.max_pasid_bits = 16,
.max_no_of_hqd = 24,
.doorbell_size = 8,
.ih_ring_entry_size = 8 * sizeof(uint32_t),
.event_interrupt_class = &event_interrupt_class_v9,
.num_of_watch_points = 4,
.mqd_size_aligned = MQD_SIZE_ALIGNED,
.supports_cwsr = true,
.needs_iommu_device = false,
.needs_pci_atomics = false,
.num_sdma_engines = 2,
};
static const struct kfd_device_info vega10_vf_device_info = {
.asic_family = CHIP_VEGA10,
.max_pasid_bits = 16,
.max_no_of_hqd = 24,
.doorbell_size = 8,
.ih_ring_entry_size = 8 * sizeof(uint32_t),
.event_interrupt_class = &event_interrupt_class_v9,
.num_of_watch_points = 4,
.mqd_size_aligned = MQD_SIZE_ALIGNED,
.supports_cwsr = true,
.needs_iommu_device = false,
.needs_pci_atomics = false,
.num_sdma_engines = 2,
};
struct kfd_deviceid {
unsigned short did;
const struct kfd_device_info *device_info;
};
static const struct kfd_deviceid supported_devices[] = {
#ifdef KFD_SUPPORT_IOMMU_V2
{ 0x1304, &kaveri_device_info }, /* Kaveri */
{ 0x1305, &kaveri_device_info }, /* Kaveri */
{ 0x1306, &kaveri_device_info }, /* Kaveri */
{ 0x1307, &kaveri_device_info }, /* Kaveri */
{ 0x1309, &kaveri_device_info }, /* Kaveri */
{ 0x130A, &kaveri_device_info }, /* Kaveri */
{ 0x130B, &kaveri_device_info }, /* Kaveri */
{ 0x130C, &kaveri_device_info }, /* Kaveri */
{ 0x130D, &kaveri_device_info }, /* Kaveri */
{ 0x130E, &kaveri_device_info }, /* Kaveri */
{ 0x130F, &kaveri_device_info }, /* Kaveri */
{ 0x1310, &kaveri_device_info }, /* Kaveri */
{ 0x1311, &kaveri_device_info }, /* Kaveri */
{ 0x1312, &kaveri_device_info }, /* Kaveri */
{ 0x1313, &kaveri_device_info }, /* Kaveri */
{ 0x1315, &kaveri_device_info }, /* Kaveri */
{ 0x1316, &kaveri_device_info }, /* Kaveri */
{ 0x1317, &kaveri_device_info }, /* Kaveri */
{ 0x1318, &kaveri_device_info }, /* Kaveri */
{ 0x131B, &kaveri_device_info }, /* Kaveri */
{ 0x131C, &kaveri_device_info }, /* Kaveri */
{ 0x131D, &kaveri_device_info }, /* Kaveri */
{ 0x9870, &carrizo_device_info }, /* Carrizo */
{ 0x9874, &carrizo_device_info }, /* Carrizo */
{ 0x9875, &carrizo_device_info }, /* Carrizo */
{ 0x9876, &carrizo_device_info }, /* Carrizo */
{ 0x9877, &carrizo_device_info }, /* Carrizo */
{ 0x15DD, &raven_device_info }, /* Raven */
#endif
{ 0x67A0, &hawaii_device_info }, /* Hawaii */
{ 0x67A1, &hawaii_device_info }, /* Hawaii */
{ 0x67A2, &hawaii_device_info }, /* Hawaii */
{ 0x67A8, &hawaii_device_info }, /* Hawaii */
{ 0x67A9, &hawaii_device_info }, /* Hawaii */
{ 0x67AA, &hawaii_device_info }, /* Hawaii */
{ 0x67B0, &hawaii_device_info }, /* Hawaii */
{ 0x67B1, &hawaii_device_info }, /* Hawaii */
{ 0x67B8, &hawaii_device_info }, /* Hawaii */
{ 0x67B9, &hawaii_device_info }, /* Hawaii */
{ 0x67BA, &hawaii_device_info }, /* Hawaii */
{ 0x67BE, &hawaii_device_info }, /* Hawaii */
{ 0x6920, &tonga_device_info }, /* Tonga */
{ 0x6921, &tonga_device_info }, /* Tonga */
{ 0x6928, &tonga_device_info }, /* Tonga */
{ 0x6929, &tonga_device_info }, /* Tonga */
{ 0x692B, &tonga_device_info }, /* Tonga */
{ 0x692F, &tonga_vf_device_info }, /* Tonga vf */
{ 0x6938, &tonga_device_info }, /* Tonga */
{ 0x6939, &tonga_device_info }, /* Tonga */
{ 0x7300, &fiji_device_info }, /* Fiji */
{ 0x730F, &fiji_vf_device_info }, /* Fiji vf*/
{ 0x67C0, &polaris10_device_info }, /* Polaris10 */
{ 0x67C1, &polaris10_device_info }, /* Polaris10 */
{ 0x67C2, &polaris10_device_info }, /* Polaris10 */
{ 0x67C4, &polaris10_device_info }, /* Polaris10 */
{ 0x67C7, &polaris10_device_info }, /* Polaris10 */
{ 0x67C8, &polaris10_device_info }, /* Polaris10 */
{ 0x67C9, &polaris10_device_info }, /* Polaris10 */
{ 0x67CA, &polaris10_device_info }, /* Polaris10 */
{ 0x67CC, &polaris10_device_info }, /* Polaris10 */
{ 0x67CF, &polaris10_device_info }, /* Polaris10 */
{ 0x67D0, &polaris10_vf_device_info }, /* Polaris10 vf*/
{ 0x67DF, &polaris10_device_info }, /* Polaris10 */
{ 0x6FDF, &polaris10_device_info }, /* Polaris10 */
{ 0x67E0, &polaris11_device_info }, /* Polaris11 */
{ 0x67E1, &polaris11_device_info }, /* Polaris11 */
{ 0x67E3, &polaris11_device_info }, /* Polaris11 */
{ 0x67E7, &polaris11_device_info }, /* Polaris11 */
{ 0x67E8, &polaris11_device_info }, /* Polaris11 */
{ 0x67E9, &polaris11_device_info }, /* Polaris11 */
{ 0x67EB, &polaris11_device_info }, /* Polaris11 */
{ 0x67EF, &polaris11_device_info }, /* Polaris11 */
{ 0x67FF, &polaris11_device_info }, /* Polaris11 */
{ 0x6860, &vega10_device_info }, /* Vega10 */
{ 0x6861, &vega10_device_info }, /* Vega10 */
{ 0x6862, &vega10_device_info }, /* Vega10 */
{ 0x6863, &vega10_device_info }, /* Vega10 */
{ 0x6864, &vega10_device_info }, /* Vega10 */
{ 0x6867, &vega10_device_info }, /* Vega10 */
{ 0x6868, &vega10_device_info }, /* Vega10 */
{ 0x6869, &vega10_device_info }, /* Vega10 */
{ 0x686A, &vega10_device_info }, /* Vega10 */
{ 0x686B, &vega10_device_info }, /* Vega10 */
{ 0x686C, &vega10_vf_device_info }, /* Vega10 vf*/
{ 0x686D, &vega10_device_info }, /* Vega10 */
{ 0x686E, &vega10_device_info }, /* Vega10 */
{ 0x686F, &vega10_device_info }, /* Vega10 */
{ 0x687F, &vega10_device_info }, /* Vega10 */
};
static int kfd_gtt_sa_init(struct kfd_dev *kfd, unsigned int buf_size,
unsigned int chunk_size);
static void kfd_gtt_sa_fini(struct kfd_dev *kfd);
static int kfd_resume(struct kfd_dev *kfd);
static const struct kfd_device_info *lookup_device_info(unsigned short did)
{
size_t i;
for (i = 0; i < ARRAY_SIZE(supported_devices); i++) {
if (supported_devices[i].did == did) {
WARN_ON(!supported_devices[i].device_info);
return supported_devices[i].device_info;
}
}
dev_warn(kfd_device, "DID %04x is missing in supported_devices\n",
did);
return NULL;
}
struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd,
struct pci_dev *pdev, const struct kfd2kgd_calls *f2g)
{
struct kfd_dev *kfd;
int ret;
const struct kfd_device_info *device_info =
lookup_device_info(pdev->device);
if (!device_info) {
dev_err(kfd_device, "kgd2kfd_probe failed\n");
return NULL;
}
/* Allow BIF to recode atomics to PCIe 3.0 AtomicOps.
* 32 and 64-bit requests are possible and must be
* supported.
*/
ret = pci_enable_atomic_ops_to_root(pdev,
PCI_EXP_DEVCAP2_ATOMIC_COMP32 |
PCI_EXP_DEVCAP2_ATOMIC_COMP64);
if (device_info->needs_pci_atomics && ret < 0) {
dev_info(kfd_device,
"skipped device %x:%x, PCI rejects atomics\n",
pdev->vendor, pdev->device);
return NULL;
}
kfd = kzalloc(sizeof(*kfd), GFP_KERNEL);
if (!kfd)
return NULL;
kfd->kgd = kgd;
kfd->device_info = device_info;
kfd->pdev = pdev;
kfd->init_complete = false;
kfd->kfd2kgd = f2g;
mutex_init(&kfd->doorbell_mutex);
memset(&kfd->doorbell_available_index, 0,
sizeof(kfd->doorbell_available_index));
return kfd;
}
static void kfd_cwsr_init(struct kfd_dev *kfd)
{
if (cwsr_enable && kfd->device_info->supports_cwsr) {
if (kfd->device_info->asic_family < CHIP_VEGA10) {
BUILD_BUG_ON(sizeof(cwsr_trap_gfx8_hex) > PAGE_SIZE);
kfd->cwsr_isa = cwsr_trap_gfx8_hex;
kfd->cwsr_isa_size = sizeof(cwsr_trap_gfx8_hex);
} else {
BUILD_BUG_ON(sizeof(cwsr_trap_gfx9_hex) > PAGE_SIZE);
kfd->cwsr_isa = cwsr_trap_gfx9_hex;
kfd->cwsr_isa_size = sizeof(cwsr_trap_gfx9_hex);
}
kfd->cwsr_enabled = true;
}
}
bool kgd2kfd_device_init(struct kfd_dev *kfd,
const struct kgd2kfd_shared_resources *gpu_resources)
{
unsigned int size;
kfd->shared_resources = *gpu_resources;
kfd->vm_info.first_vmid_kfd = ffs(gpu_resources->compute_vmid_bitmap)-1;
kfd->vm_info.last_vmid_kfd = fls(gpu_resources->compute_vmid_bitmap)-1;
kfd->vm_info.vmid_num_kfd = kfd->vm_info.last_vmid_kfd
- kfd->vm_info.first_vmid_kfd + 1;
/* Verify module parameters regarding mapped process number*/
if ((hws_max_conc_proc < 0)
|| (hws_max_conc_proc > kfd->vm_info.vmid_num_kfd)) {
dev_err(kfd_device,
"hws_max_conc_proc %d must be between 0 and %d, use %d instead\n",
hws_max_conc_proc, kfd->vm_info.vmid_num_kfd,
kfd->vm_info.vmid_num_kfd);
kfd->max_proc_per_quantum = kfd->vm_info.vmid_num_kfd;
} else
kfd->max_proc_per_quantum = hws_max_conc_proc;
/* calculate max size of mqds needed for queues */
size = max_num_of_queues_per_device *
kfd->device_info->mqd_size_aligned;
/*
* calculate max size of runlist packet.
* There can be only 2 packets at once
*/
size += (KFD_MAX_NUM_OF_PROCESSES * sizeof(struct pm4_mes_map_process) +
max_num_of_queues_per_device * sizeof(struct pm4_mes_map_queues)
+ sizeof(struct pm4_mes_runlist)) * 2;
/* Add size of HIQ & DIQ */
size += KFD_KERNEL_QUEUE_SIZE * 2;
/* add another 512KB for all other allocations on gart (HPD, fences) */
size += 512 * 1024;
if (kfd->kfd2kgd->init_gtt_mem_allocation(
kfd->kgd, size, &kfd->gtt_mem,
&kfd->gtt_start_gpu_addr, &kfd->gtt_start_cpu_ptr,
false)) {
dev_err(kfd_device, "Could not allocate %d bytes\n", size);
goto out;
}
dev_info(kfd_device, "Allocated %d bytes on gart\n", size);
/* Initialize GTT sa with 512 byte chunk size */
if (kfd_gtt_sa_init(kfd, size, 512) != 0) {
dev_err(kfd_device, "Error initializing gtt sub-allocator\n");
goto kfd_gtt_sa_init_error;
}
if (kfd_doorbell_init(kfd)) {
dev_err(kfd_device,
"Error initializing doorbell aperture\n");
goto kfd_doorbell_error;
}
if (kfd_topology_add_device(kfd)) {
dev_err(kfd_device, "Error adding device to topology\n");
goto kfd_topology_add_device_error;
}
if (kfd_interrupt_init(kfd)) {
dev_err(kfd_device, "Error initializing interrupts\n");
goto kfd_interrupt_error;
}
kfd->dqm = device_queue_manager_init(kfd);
if (!kfd->dqm) {
dev_err(kfd_device, "Error initializing queue manager\n");
goto device_queue_manager_error;
}
if (kfd_iommu_device_init(kfd)) {
dev_err(kfd_device, "Error initializing iommuv2\n");
goto device_iommu_error;
}
kfd_cwsr_init(kfd);
if (kfd_resume(kfd))
goto kfd_resume_error;
kfd->dbgmgr = NULL;
kfd->init_complete = true;
dev_info(kfd_device, "added device %x:%x\n", kfd->pdev->vendor,
kfd->pdev->device);
pr_debug("Starting kfd with the following scheduling policy %d\n",
kfd->dqm->sched_policy);
goto out;
kfd_resume_error:
device_iommu_error:
device_queue_manager_uninit(kfd->dqm);
device_queue_manager_error:
kfd_interrupt_exit(kfd);
kfd_interrupt_error:
kfd_topology_remove_device(kfd);
kfd_topology_add_device_error:
kfd_doorbell_fini(kfd);
kfd_doorbell_error:
kfd_gtt_sa_fini(kfd);
kfd_gtt_sa_init_error:
kfd->kfd2kgd->free_gtt_mem(kfd->kgd, kfd->gtt_mem);
dev_err(kfd_device,
"device %x:%x NOT added due to errors\n",
kfd->pdev->vendor, kfd->pdev->device);
out:
return kfd->init_complete;
}
void kgd2kfd_device_exit(struct kfd_dev *kfd)
{
if (kfd->init_complete) {
kgd2kfd_suspend(kfd);
device_queue_manager_uninit(kfd->dqm);
kfd_interrupt_exit(kfd);
kfd_topology_remove_device(kfd);
kfd_doorbell_fini(kfd);
kfd_gtt_sa_fini(kfd);
kfd->kfd2kgd->free_gtt_mem(kfd->kgd, kfd->gtt_mem);
}
kfree(kfd);
}
int kgd2kfd_pre_reset(struct kfd_dev *kfd)
{
if (!kfd->init_complete)
return 0;
kgd2kfd_suspend(kfd);
/* hold dqm->lock to prevent further execution*/
dqm_lock(kfd->dqm);
kfd_signal_reset_event(kfd);
return 0;
}
/*
* Fix me. KFD won't be able to resume existing process for now.
* We will keep all existing process in a evicted state and
* wait the process to be terminated.
*/
int kgd2kfd_post_reset(struct kfd_dev *kfd)
{
int ret, count;
if (!kfd->init_complete)
return 0;
dqm_unlock(kfd->dqm);
ret = kfd_resume(kfd);
if (ret)
return ret;
count = atomic_dec_return(&kfd_locked);
WARN_ONCE(count != 0, "KFD reset ref. error");
return 0;
}
bool kfd_is_locked(void)
{
return (atomic_read(&kfd_locked) > 0);
}
void kgd2kfd_suspend(struct kfd_dev *kfd)
{
if (!kfd->init_complete)
return;
/* For first KFD device suspend all the KFD processes */
if (atomic_inc_return(&kfd_locked) == 1)
kfd_suspend_all_processes();
kfd->dqm->ops.stop(kfd->dqm);
kfd_iommu_suspend(kfd);
}
int kgd2kfd_resume(struct kfd_dev *kfd)
{
int ret, count;
if (!kfd->init_complete)
return 0;
ret = kfd_resume(kfd);
if (ret)
return ret;
count = atomic_dec_return(&kfd_locked);
WARN_ONCE(count < 0, "KFD suspend / resume ref. error");
if (count == 0)
ret = kfd_resume_all_processes();
return ret;
}
static int kfd_resume(struct kfd_dev *kfd)
{
int err = 0;
err = kfd_iommu_resume(kfd);
if (err) {
dev_err(kfd_device,
"Failed to resume IOMMU for device %x:%x\n",
kfd->pdev->vendor, kfd->pdev->device);
return err;
}
err = kfd->dqm->ops.start(kfd->dqm);
if (err) {
dev_err(kfd_device,
"Error starting queue manager for device %x:%x\n",
kfd->pdev->vendor, kfd->pdev->device);
goto dqm_start_error;
}
return err;
dqm_start_error:
kfd_iommu_suspend(kfd);
return err;
}
/* This is called directly from KGD at ISR. */
void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry)
{
uint32_t patched_ihre[KFD_MAX_RING_ENTRY_SIZE];
bool is_patched = false;
unsigned long flags;
if (!kfd->init_complete)
return;
if (kfd->device_info->ih_ring_entry_size > sizeof(patched_ihre)) {
dev_err_once(kfd_device, "Ring entry too small\n");
return;
}
spin_lock_irqsave(&kfd->interrupt_lock, flags);
if (kfd->interrupts_active
&& interrupt_is_wanted(kfd, ih_ring_entry,
patched_ihre, &is_patched)
&& enqueue_ih_ring_entry(kfd,
is_patched ? patched_ihre : ih_ring_entry))
queue_work(kfd->ih_wq, &kfd->interrupt_work);
spin_unlock_irqrestore(&kfd->interrupt_lock, flags);
}
int kgd2kfd_quiesce_mm(struct mm_struct *mm)
{
struct kfd_process *p;
int r;
/* Because we are called from arbitrary context (workqueue) as opposed
* to process context, kfd_process could attempt to exit while we are
* running so the lookup function increments the process ref count.
*/
p = kfd_lookup_process_by_mm(mm);
if (!p)
return -ESRCH;
r = kfd_process_evict_queues(p);
kfd_unref_process(p);
return r;
}
int kgd2kfd_resume_mm(struct mm_struct *mm)
{
struct kfd_process *p;
int r;
/* Because we are called from arbitrary context (workqueue) as opposed
* to process context, kfd_process could attempt to exit while we are
* running so the lookup function increments the process ref count.
*/
p = kfd_lookup_process_by_mm(mm);
if (!p)
return -ESRCH;
r = kfd_process_restore_queues(p);
kfd_unref_process(p);
return r;
}
/** kgd2kfd_schedule_evict_and_restore_process - Schedules work queue that will
* prepare for safe eviction of KFD BOs that belong to the specified
* process.
*
* @mm: mm_struct that identifies the specified KFD process
* @fence: eviction fence attached to KFD process BOs
*
*/
int kgd2kfd_schedule_evict_and_restore_process(struct mm_struct *mm,
struct dma_fence *fence)
{
struct kfd_process *p;
unsigned long active_time;
unsigned long delay_jiffies = msecs_to_jiffies(PROCESS_ACTIVE_TIME_MS);
if (!fence)
return -EINVAL;
if (dma_fence_is_signaled(fence))
return 0;
p = kfd_lookup_process_by_mm(mm);
if (!p)
return -ENODEV;
if (fence->seqno == p->last_eviction_seqno)
goto out;
p->last_eviction_seqno = fence->seqno;
/* Avoid KFD process starvation. Wait for at least
* PROCESS_ACTIVE_TIME_MS before evicting the process again
*/
active_time = get_jiffies_64() - p->last_restore_timestamp;
if (delay_jiffies > active_time)
delay_jiffies -= active_time;
else
delay_jiffies = 0;
/* During process initialization eviction_work.dwork is initialized
* to kfd_evict_bo_worker
*/
schedule_delayed_work(&p->eviction_work, delay_jiffies);
out:
kfd_unref_process(p);
return 0;
}
static int kfd_gtt_sa_init(struct kfd_dev *kfd, unsigned int buf_size,
unsigned int chunk_size)
{
unsigned int num_of_longs;
if (WARN_ON(buf_size < chunk_size))
return -EINVAL;
if (WARN_ON(buf_size == 0))
return -EINVAL;
if (WARN_ON(chunk_size == 0))
return -EINVAL;
kfd->gtt_sa_chunk_size = chunk_size;
kfd->gtt_sa_num_of_chunks = buf_size / chunk_size;
num_of_longs = (kfd->gtt_sa_num_of_chunks + BITS_PER_LONG - 1) /
BITS_PER_LONG;
kfd->gtt_sa_bitmap = kcalloc(num_of_longs, sizeof(long), GFP_KERNEL);
if (!kfd->gtt_sa_bitmap)
return -ENOMEM;
pr_debug("gtt_sa_num_of_chunks = %d, gtt_sa_bitmap = %p\n",
kfd->gtt_sa_num_of_chunks, kfd->gtt_sa_bitmap);
mutex_init(&kfd->gtt_sa_lock);
return 0;
}
static void kfd_gtt_sa_fini(struct kfd_dev *kfd)
{
mutex_destroy(&kfd->gtt_sa_lock);
kfree(kfd->gtt_sa_bitmap);
}
static inline uint64_t kfd_gtt_sa_calc_gpu_addr(uint64_t start_addr,
unsigned int bit_num,
unsigned int chunk_size)
{
return start_addr + bit_num * chunk_size;
}
static inline uint32_t *kfd_gtt_sa_calc_cpu_addr(void *start_addr,
unsigned int bit_num,
unsigned int chunk_size)
{
return (uint32_t *) ((uint64_t) start_addr + bit_num * chunk_size);
}
int kfd_gtt_sa_allocate(struct kfd_dev *kfd, unsigned int size,
struct kfd_mem_obj **mem_obj)
{
unsigned int found, start_search, cur_size;
if (size == 0)
return -EINVAL;
if (size > kfd->gtt_sa_num_of_chunks * kfd->gtt_sa_chunk_size)
return -ENOMEM;
*mem_obj = kzalloc(sizeof(struct kfd_mem_obj), GFP_KERNEL);
if (!(*mem_obj))
return -ENOMEM;
pr_debug("Allocated mem_obj = %p for size = %d\n", *mem_obj, size);
start_search = 0;
mutex_lock(&kfd->gtt_sa_lock);
kfd_gtt_restart_search:
/* Find the first chunk that is free */
found = find_next_zero_bit(kfd->gtt_sa_bitmap,
kfd->gtt_sa_num_of_chunks,
start_search);
pr_debug("Found = %d\n", found);
/* If there wasn't any free chunk, bail out */
if (found == kfd->gtt_sa_num_of_chunks)
goto kfd_gtt_no_free_chunk;
/* Update fields of mem_obj */
(*mem_obj)->range_start = found;
(*mem_obj)->range_end = found;
(*mem_obj)->gpu_addr = kfd_gtt_sa_calc_gpu_addr(
kfd->gtt_start_gpu_addr,
found,
kfd->gtt_sa_chunk_size);
(*mem_obj)->cpu_ptr = kfd_gtt_sa_calc_cpu_addr(
kfd->gtt_start_cpu_ptr,
found,
kfd->gtt_sa_chunk_size);
pr_debug("gpu_addr = %p, cpu_addr = %p\n",
(uint64_t *) (*mem_obj)->gpu_addr, (*mem_obj)->cpu_ptr);
/* If we need only one chunk, mark it as allocated and get out */
if (size <= kfd->gtt_sa_chunk_size) {
pr_debug("Single bit\n");
set_bit(found, kfd->gtt_sa_bitmap);
goto kfd_gtt_out;
}
/* Otherwise, try to see if we have enough contiguous chunks */
cur_size = size - kfd->gtt_sa_chunk_size;
do {
(*mem_obj)->range_end =
find_next_zero_bit(kfd->gtt_sa_bitmap,
kfd->gtt_sa_num_of_chunks, ++found);
/*
* If next free chunk is not contiguous than we need to
* restart our search from the last free chunk we found (which
* wasn't contiguous to the previous ones
*/
if ((*mem_obj)->range_end != found) {
start_search = found;
goto kfd_gtt_restart_search;
}
/*
* If we reached end of buffer, bail out with error
*/
if (found == kfd->gtt_sa_num_of_chunks)
goto kfd_gtt_no_free_chunk;
/* Check if we don't need another chunk */
if (cur_size <= kfd->gtt_sa_chunk_size)
cur_size = 0;
else
cur_size -= kfd->gtt_sa_chunk_size;
} while (cur_size > 0);
pr_debug("range_start = %d, range_end = %d\n",
(*mem_obj)->range_start, (*mem_obj)->range_end);
/* Mark the chunks as allocated */
for (found = (*mem_obj)->range_start;
found <= (*mem_obj)->range_end;
found++)
set_bit(found, kfd->gtt_sa_bitmap);
kfd_gtt_out:
mutex_unlock(&kfd->gtt_sa_lock);
return 0;
kfd_gtt_no_free_chunk:
pr_debug("Allocation failed with mem_obj = %p\n", *mem_obj);
mutex_unlock(&kfd->gtt_sa_lock);
kfree(*mem_obj);
return -ENOMEM;
}
int kfd_gtt_sa_free(struct kfd_dev *kfd, struct kfd_mem_obj *mem_obj)
{
unsigned int bit;
/* Act like kfree when trying to free a NULL object */
if (!mem_obj)
return 0;
pr_debug("Free mem_obj = %p, range_start = %d, range_end = %d\n",
mem_obj, mem_obj->range_start, mem_obj->range_end);
mutex_lock(&kfd->gtt_sa_lock);
/* Mark the chunks as free */
for (bit = mem_obj->range_start;
bit <= mem_obj->range_end;
bit++)
clear_bit(bit, kfd->gtt_sa_bitmap);
mutex_unlock(&kfd->gtt_sa_lock);
kfree(mem_obj);
return 0;
}
#if defined(CONFIG_DEBUG_FS)
/* This function will send a package to HIQ to hang the HWS
* which will trigger a GPU reset and bring the HWS back to normal state
*/
int kfd_debugfs_hang_hws(struct kfd_dev *dev)
{
int r = 0;
if (dev->dqm->sched_policy != KFD_SCHED_POLICY_HWS) {
pr_err("HWS is not enabled");
return -EINVAL;
}
r = pm_debugfs_hang_hws(&dev->dqm->packets);
if (!r)
r = dqm_debugfs_execute_queues(dev->dqm);
return r;
}
#endif
| {
"pile_set_name": "Github"
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.