max_stars_count
int64
301
224k
text
stringlengths
6
1.05M
token_count
int64
3
727k
2,151
<filename>third_party/mockito/src/src/main/java/org/mockito/internal/progress/MockingProgressImpl.java /* * Copyright (c) 2007 Mockito contributors * This program is made available under the terms of the MIT License. */ package org.mockito.internal.progress; import static org.mockito.internal.exceptions.Reporter.unfinishedStubbing; import static org.mockito.internal.exceptions.Reporter.unfinishedVerificationException; import org.mockito.internal.configuration.GlobalConfiguration; import org.mockito.internal.debugging.Localized; import org.mockito.internal.debugging.LocationImpl; import org.mockito.internal.listeners.MockingProgressListener; import org.mockito.internal.listeners.MockingStartedListener; import org.mockito.invocation.Invocation; import org.mockito.invocation.Location; import org.mockito.stubbing.OngoingStubbing; import org.mockito.listeners.StubbingListener; import org.mockito.verification.VerificationMode; import org.mockito.verification.VerificationStrategy; @SuppressWarnings("unchecked") public class MockingProgressImpl implements MockingProgress { private final ArgumentMatcherStorage argumentMatcherStorage = new ArgumentMatcherStorageImpl(); private OngoingStubbing<?> ongoingStubbing; private Localized<VerificationMode> verificationMode; private Location stubbingInProgress = null; private MockingProgressListener listener; private VerificationStrategy verificationStrategy; public MockingProgressImpl() { this.verificationStrategy = getDefaultVerificationStrategy(); } public static VerificationStrategy getDefaultVerificationStrategy() { return new VerificationStrategy() { public VerificationMode maybeVerifyLazily(VerificationMode mode) { return mode; } }; } private StubbingListener stubbingListener; public void reportOngoingStubbing(OngoingStubbing iOngoingStubbing) { this.ongoingStubbing = iOngoingStubbing; } public OngoingStubbing<?> pullOngoingStubbing() { OngoingStubbing<?> temp = ongoingStubbing; ongoingStubbing = null; return temp; } public void verificationStarted(VerificationMode verify) { validateState(); resetOngoingStubbing(); verificationMode = new Localized(verify); } /* (non-Javadoc) * @see org.mockito.internal.progress.MockingProgress#resetOngoingStubbing() */ public void resetOngoingStubbing() { ongoingStubbing = null; } public VerificationMode pullVerificationMode() { if (verificationMode == null) { return null; } VerificationMode temp = verificationMode.getObject(); verificationMode = null; return temp; } public void stubbingStarted() { validateState(); stubbingInProgress = new LocationImpl(); } public void validateState() { validateMostStuff(); //validate stubbing: if (stubbingInProgress != null) { Location temp = stubbingInProgress; stubbingInProgress = null; throw unfinishedStubbing(temp); } } private void validateMostStuff() { //State is cool when GlobalConfiguration is already loaded //this cannot really be tested functionally because I cannot dynamically mess up org.mockito.configuration.MockitoConfiguration class GlobalConfiguration.validate(); if (verificationMode != null) { Location location = verificationMode.getLocation(); verificationMode = null; throw unfinishedVerificationException(location); } getArgumentMatcherStorage().validateState(); } public void stubbingCompleted(Invocation invocation) { stubbingInProgress = null; getStubbingListener().newStubbing(invocation); } public String toString() { return "iOngoingStubbing: " + ongoingStubbing + ", verificationMode: " + verificationMode + ", stubbingInProgress: " + stubbingInProgress; } public void reset() { stubbingInProgress = null; verificationMode = null; getArgumentMatcherStorage().reset(); } public ArgumentMatcherStorage getArgumentMatcherStorage() { return argumentMatcherStorage; } public void mockingStarted(Object mock, Class<?> classToMock) { if (listener instanceof MockingStartedListener) { ((MockingStartedListener) listener).mockingStarted(mock, classToMock); } validateMostStuff(); } public void setListener(MockingProgressListener listener) { this.listener = listener; } public void setVerificationStrategy(VerificationStrategy strategy) { this.verificationStrategy = strategy; } public VerificationMode maybeVerifyLazily(VerificationMode mode) { return this.verificationStrategy.maybeVerifyLazily(mode); } public void setStubbingListener(StubbingListener stubbingListener) { this.stubbingListener = stubbingListener; } public StubbingListener getStubbingListener() { if (this.stubbingListener == null) { return NoOpStubbingListener.INSTANCE; } return this.stubbingListener; } }
1,960
679
<reponame>Grosskopf/openoffice /************************************************************** * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. * *************************************************************/ #ifndef _SVX_INSRC_HXX #define _SVX_INSRC_HXX #include <svx/stddlg.hxx> #include <vcl/fixed.hxx> #include <vcl/field.hxx> #include <vcl/button.hxx> #include <vcl/group.hxx> #include <vcl/button.hxx> #include <tools/string.hxx> class SvxInsRowColDlg : public SvxAbstractInsRowColDlg, public ModalDialog { FixedText aCount; NumericField aCountEdit; FixedLine aInsFL; RadioButton aBeforeBtn; RadioButton aAfterBtn; FixedLine aPosFL; String aRow; String aCol; OKButton aOKBtn; CancelButton aCancelBtn; HelpButton aHelpBtn; bool bColumn; public: SvxInsRowColDlg( Window* pParent, bool bCol, const rtl::OString& sHelpId ); virtual short Execute(void); virtual bool isInsertBefore() const; virtual sal_uInt16 getInsertCount() const; }; #endif
602
660
/* * Copyright (c) 2018, Intel Corporation * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included * in all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. */ //! //! \file media_cmd_task.h //! \brief Defines the interface for media cmd task //! \details The media cmd task is dedicated for command buffer submission //! #ifndef __MEDIA_CMD_TASK_H__ #define __MEDIA_CMD_TASK_H__ #include "media_task.h" #include "mos_os.h" #include "codechal_debug.h" class CmdTask : public MediaTask { public: //! //! \brief CmdTask constructor //! \param [in] osInterface //! Pointer to MOS_INTERFACE //! CmdTask(PMOS_INTERFACE osInterface); virtual ~CmdTask() { } virtual MOS_STATUS Submit(bool immediateSubmit, MediaScalability *scalability, CodechalDebugInterface *debugInterface) override; protected: #if (_DEBUG || _RELEASE_INTERNAL) virtual MOS_STATUS DumpCmdBuffer(PMOS_COMMAND_BUFFER cmdBuffer, CodechalDebugInterface *debugInterface, uint8_t pipeIdx = 0); virtual MOS_STATUS DumpCmdBufferAllPipes(PMOS_COMMAND_BUFFER cmdBuffer, CodechalDebugInterface *debugInterface, MediaScalability *scalability); #endif // _DEBUG || _RELEASE_INTERNAL //! \brief Calculate Command Size for all packets in packets list //! //! \return uint32_t //! Command size calculated //! MOS_STATUS CalculateCmdBufferSizeFromActivePackets(); PMOS_INTERFACE m_osInterface = nullptr; //!< PMOS_INTERFACE }; #endif // !__MEDIA_CMD_TASK_H__
805
920
<gh_stars>100-1000 /* * The Dragonite Project * ------------------------- * See the LICENSE file in the root directory for license information. */ package com.vecsight.dragonite.sdk.socket; public class MessageStat { private boolean exist = false; private long RTT; private boolean resended; public MessageStat(final boolean exist, final long RTT, final boolean resended) { this.exist = exist; this.RTT = RTT; this.resended = resended; } public MessageStat() { } @Override public String toString() { return "MessageStat{" + "exist=" + exist + ", RTT=" + RTT + ", resended=" + resended + '}'; } public boolean isExist() { return exist; } public void setExist(final boolean exist) { this.exist = exist; } public long getRTT() { return RTT; } public void setRTT(final long RTT) { this.RTT = RTT; } public boolean isResended() { return resended; } public void setResended(final boolean resended) { this.resended = resended; } }
481
523
<gh_stars>100-1000 // Copyright <NAME> 2021. // Distributed under the Boost Software License, Version 1.0. // (See accompanying file ../LICENSE_1_0.txt or copy at // http://www.boost.org/LICENSE_1_0.txt) #if !defined(CNL_FIXED_POINT_H) #define CNL_FIXED_POINT_H #include "integer.h" /// compositional numeric library namespace cnl { template<typename T> struct is_fixed_point : is_integer<T> { }; template<typename T> inline constexpr auto is_fixed_point_v = is_fixed_point<T>::value; template<typename T> concept fixed_point = is_fixed_point_v<T>; } #endif // CNL_FIXED_POINT_H
268
435
package org.uma.jmetal.problem.multiobjective; import org.uma.jmetal.problem.binaryproblem.impl.AbstractBinaryProblem; import org.uma.jmetal.solution.binarysolution.BinarySolution; import org.uma.jmetal.solution.binarysolution.impl.DefaultBinarySolution; import org.uma.jmetal.util.errorchecking.JMetalException; import java.util.Arrays; import java.util.BitSet; import java.util.List; /** * Class representing problem OneZeroMax. The problem consist of maximizing the * number of '1's and '0's in a binary string. */ @SuppressWarnings("serial") public class OneZeroMax extends AbstractBinaryProblem { private int bits ; /** Constructor */ public OneZeroMax() throws JMetalException { this(512); } /** Constructor */ public OneZeroMax(Integer numberOfBits) throws JMetalException { setNumberOfVariables(1); setNumberOfObjectives(2); setName("OneZeroMax"); bits = numberOfBits ; } @Override public List<Integer> getListOfBitsPerVariable() { return Arrays.asList(bits); } @Override public int getBitsFromVariable(int index) { if (index != 0) { throw new JMetalException("Problem OneZeroMax has only a variable. Index = " + index) ; } return bits ; } @Override public BinarySolution createSolution() { return new DefaultBinarySolution(getListOfBitsPerVariable(), getNumberOfObjectives()) ; } /** Evaluate() method */ @Override public BinarySolution evaluate(BinarySolution solution) { int counterOnes; int counterZeroes; counterOnes = 0; counterZeroes = 0; BitSet bitset = solution.variables().get(0) ; for (int i = 0; i < bitset.length(); i++) { if (bitset.get(i)) { counterOnes++; } else { counterZeroes++; } } // OneZeroMax is a maximization problem: multiply by -1 to minimize solution.objectives()[0] = -1.0 * counterOnes ; solution.objectives()[1] = -1.0 * counterZeroes ; return solution ; } }
686
2,542
<reponame>gridgentoo/ServiceFabricAzure<gh_stars>1000+ // ------------------------------------------------------------ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License (MIT). See License.txt in the repo root for license information. // ------------------------------------------------------------ #pragma once #include <stdint.h> #include <string> using namespace std; #ifdef _WIN32 #include <windows.h> static void * load_library(const char *fileName) { return ::LoadLibraryA(fileName); } static void *load_symbol(void * module, const char *symbolName) { return ::GetProcAddress((HMODULE)module, symbolName); } static string get_load_error() { DWORD err = ::GetLastError(); // @TODO: Call FormatMessage return to_string(err); } // VC++ compiler was giving warning here for unused // function. This was causing linking error as this // function is actually being called. So ignore the // warning here. #pragma warning(push) #pragma warning(disable: 4505) static void unload_library(void *module) { FreeLibrary((HMODULE)module); return; } #pragma warning(pop) #else typedef void *HMODULE; #include <dlfcn.h> static HMODULE load_library(const char *fileName) { HMODULE mod; // Use RTLD_NOW to force early symbol resolution to flush out issues earlier // This enable diagnosing .so symbol binding issues without using a cluster mod = dlopen(fileName, RTLD_NOW); return mod; } static void *load_symbol(HMODULE module, const char *symbolName) { void *ret; ret = dlsym(module, symbolName); if(!ret) printf("[%s@%d] symbol %s not found.\n", __FUNCTION__, __LINE__, symbolName); return ret; } static string get_load_error() { return string(dlerror()); } static void unload_library(void *module) { dlclose(module); return; } #endif
626
984
package me.loda.spring.specification; /******************************************************* * For Vietnamese readers: * Các bạn thân mến, mình rất vui nếu project này giúp * ích được cho các bạn trong việc học tập và công việc. Nếu * bạn sử dụng lại toàn bộ hoặc một phần source code xin để * lại dường dẫn tới github hoặc tên tác giá. * Xin cảm ơn! *******************************************************/ import static me.loda.spring.specification.User.UserType.NORMAL; import static me.loda.spring.specification.UserSpecification.*; import java.util.Arrays; import org.springframework.boot.CommandLineRunner; import org.springframework.boot.SpringApplication; import org.springframework.boot.autoconfigure.SpringBootApplication; import org.springframework.context.annotation.Bean; import org.springframework.data.jpa.domain.Specification; import lombok.RequiredArgsConstructor; import me.loda.spring.specification.User.UserType; /** * Copyright 2019 {@author Loda} (https://loda.me). * This project is licensed under the MIT license. * * @since 12/10/2019 * Github: https://github.com/loda-kun */ @SpringBootApplication @RequiredArgsConstructor public class App { public static void main(String[] args) { SpringApplication.run(App.class, args); } private final UserRepository userRepository; @Bean CommandLineRunner run() { return args -> { // Lấy ra user nằm trong tập ID đã cho và có type là NORMAL // hoặc lấy ra user có ID = 10 Specification conditions = Specification.where(hasIdIn(Arrays.asList(1L, 2L, 3L, 4L, 5L))) .and(hasType(NORMAL)) .or(hasId(10L)); // Truyền Specification vào hàm findAll() userRepository.findAll(conditions).forEach(System.out::println); }; } }
874
745
import json from .oauth import OAuth2Test class StripeOAuth2Test(OAuth2Test): backend_path = 'social_core.backends.stripe.StripeOAuth2' access_token_body = json.dumps({ 'stripe_publishable_key': 'pk_test_foobar', 'access_token': 'foobar', 'livemode': False, 'token_type': 'bearer', 'scope': 'read_only', 'refresh_token': '<PASSWORD>foobar', 'stripe_user_id': 'acct_foobar' }) expected_username = 'acct_foobar' def test_login(self): self.do_login() def test_partial_pipeline(self): self.do_partial_pipeline()
285
984
/* * Copyright DataStax, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.datastax.dse.driver.api.core.auth; import com.datastax.oss.driver.shaded.guava.common.base.Preconditions; import com.datastax.oss.driver.shaded.guava.common.collect.Maps; import com.datastax.oss.driver.shaded.guava.common.collect.Sets; import com.datastax.oss.driver.shaded.guava.common.io.Files; import java.io.BufferedWriter; import java.io.File; import java.io.FileOutputStream; import java.io.IOException; import java.io.OutputStreamWriter; import java.io.PrintWriter; import java.net.InetAddress; import java.net.ServerSocket; import java.net.UnknownHostException; import java.nio.charset.Charset; import java.util.Collections; import java.util.Map; import java.util.UUID; import org.apache.directory.api.ldap.model.constants.SchemaConstants; import org.apache.directory.api.ldap.model.constants.SupportedSaslMechanisms; import org.apache.directory.api.ldap.model.csn.CsnFactory; import org.apache.directory.api.ldap.model.entry.Entry; import org.apache.directory.api.ldap.model.exception.LdapException; import org.apache.directory.api.ldap.model.exception.LdapInvalidDnException; import org.apache.directory.api.ldap.model.name.Dn; import org.apache.directory.api.ldap.model.schema.SchemaManager; import org.apache.directory.api.ldap.schemamanager.impl.DefaultSchemaManager; import org.apache.directory.server.constants.ServerDNConstants; import org.apache.directory.server.core.DefaultDirectoryService; import org.apache.directory.server.core.api.CacheService; import org.apache.directory.server.core.api.DirectoryService; import org.apache.directory.server.core.api.DnFactory; import org.apache.directory.server.core.api.InstanceLayout; import org.apache.directory.server.core.api.schema.SchemaPartition; import org.apache.directory.server.core.kerberos.KeyDerivationInterceptor; import org.apache.directory.server.core.partition.impl.btree.jdbm.JdbmPartition; import org.apache.directory.server.core.partition.ldif.LdifPartition; import org.apache.directory.server.core.shared.DefaultDnFactory; import org.apache.directory.server.kerberos.KerberosConfig; import org.apache.directory.server.kerberos.kdc.KdcServer; import org.apache.directory.server.kerberos.shared.crypto.encryption.KerberosKeyFactory; import org.apache.directory.server.kerberos.shared.keytab.Keytab; import org.apache.directory.server.kerberos.shared.keytab.KeytabEntry; import org.apache.directory.server.ldap.LdapServer; import org.apache.directory.server.ldap.handlers.sasl.MechanismHandler; import org.apache.directory.server.ldap.handlers.sasl.cramMD5.CramMd5MechanismHandler; import org.apache.directory.server.ldap.handlers.sasl.digestMD5.DigestMd5MechanismHandler; import org.apache.directory.server.ldap.handlers.sasl.gssapi.GssapiMechanismHandler; import org.apache.directory.server.ldap.handlers.sasl.plain.PlainMechanismHandler; import org.apache.directory.server.protocol.shared.transport.TcpTransport; import org.apache.directory.server.protocol.shared.transport.UdpTransport; import org.apache.directory.shared.kerberos.KerberosTime; import org.apache.directory.shared.kerberos.codec.types.EncryptionType; import org.apache.directory.shared.kerberos.components.EncryptionKey; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** * A convenience utility for running an Embedded Apache Directory Service with LDAP and optionally a * Kerberos Key Distribution Server. By default listens for LDAP on 10389 and Kerberos on 60088. You * can use something like <a href="https://directory.apache.org/studio/">Apache Directory Studio</a> * to verify the server is configured and running correctly by connecting to localhost:10389 with * username 'uid=admin,ou=system' and password '<PASSWORD>'. * * <p><b>Note:</b> This should only be used for development and testing purposes. */ public class EmbeddedAds { private static final Logger LOG = LoggerFactory.getLogger(EmbeddedAds.class); private final String dn; private final String realm; private int kdcPort; private int ldapPort; private final boolean kerberos; private InetAddress address; private String hostname; private File confDir; private volatile boolean isInit = false; private DirectoryService service; private LdapServer ldapServer; private KdcServer kdcServer; private Dn usersDN; private File krb5Conf; private EmbeddedAds( String dn, String realm, String address, int ldapPort, boolean kerberos, int kdcPort, File confDir) { this.dn = dn; this.realm = realm; try { this.address = InetAddress.getByName(address); } catch (UnknownHostException e) { LOG.error("Failure resolving address '{}', falling back to loopback.", address, e); this.address = InetAddress.getLoopbackAddress(); } this.hostname = this.address.getHostName().toLowerCase(); this.ldapPort = ldapPort; this.kerberos = kerberos; this.kdcPort = kdcPort; this.confDir = confDir; } public void start() throws Exception { if (isInit) { return; } isInit = true; File workDir = Files.createTempDir(); // Set confDir = workDir if not defined. if (confDir == null) { confDir = workDir; } if (kerberos) { kdcPort = kdcPort != -1 ? kdcPort : findAvailablePort(60088); // Set system properties required for kerberos auth to work. Unfortunately admin_server // cannot be expressed via System properties (like realm and kdc can), thus we must create a // config file. krb5Conf = createKrb5Conf(); System.setProperty("java.security.krb5.conf", krb5Conf.getAbsolutePath()); // Useful options for debugging. // System.setProperty("sun.security.krb5.debug", "true"); // System.setProperty("java.security.debug", "configfile,configparser,gssloginconfig"); } // Initialize service and set its filesystem layout. service = new DefaultDirectoryService(); InstanceLayout layout = new InstanceLayout(workDir); service.setInstanceLayout(layout); // Disable ChangeLog as we don't need change tracking. service.getChangeLog().setEnabled(false); // Denormalizes attribute DNs to be human readable, i.e uid=admin,ou=system instead of // 0.9.2.3=admin,2.5=system) service.setDenormalizeOpAttrsEnabled(true); // Create and init cache service which will be used for caching DNs, among other things. CacheService cacheService = new CacheService(); cacheService.initialize(layout); // Create and load SchemaManager which will create the default schema partition. SchemaManager schemaManager = new DefaultSchemaManager(); service.setSchemaManager(schemaManager); schemaManager.loadAllEnabled(); // Create SchemaPartition from schema manager and load ldif from schema directory. SchemaPartition schemaPartition = new SchemaPartition(schemaManager); LdifPartition ldifPartition = new LdifPartition(schemaManager, service.getDnFactory()); ldifPartition.setPartitionPath(new File(layout.getPartitionsDirectory(), "schema").toURI()); schemaPartition.setWrappedPartition(ldifPartition); service.setSchemaPartition(schemaPartition); // Create a DN factory which can be used to create and cache DNs. DnFactory dnFactory = new DefaultDnFactory(schemaManager, cacheService.getCache("dnCache")); service.setDnFactory(dnFactory); // Create mandatory system partition. This is used for storing server configuration. JdbmPartition systemPartition = createPartition("system", dnFactory.create(ServerDNConstants.SYSTEM_DN)); service.setSystemPartition(systemPartition); // Now that we have a schema and system partition, start up the directory service. service.startup(); // Create partition where user, tgt and ldap principals will live. Dn partitionDn = dnFactory.create(dn); String dnName = partitionDn.getRdn().getValue().getString(); JdbmPartition partition = createPartition(dnName, partitionDn); // Add a context entry so the partition can be referenced by entries. Entry context = service.newEntry(partitionDn); context.add("objectClass", "top", "domain", "extensibleObject"); context.add(partitionDn.getRdn().getType(), dnName); partition.setContextEntry(context); service.addPartition(partition); // Create users domain. usersDN = partitionDn.add(dnFactory.create("ou=users")); Entry usersEntry = service.newEntry(usersDN); usersEntry.add("objectClass", "organizationalUnit", "top"); usersEntry.add("ou", "users"); if (kerberos) { usersEntry = kerberize(usersEntry); } service.getAdminSession().add(usersEntry); // Uncomment to allow to connect to ldap server without credentials for convenience. // service.setAllowAnonymousAccess(true); startLdap(); // Create sasl and krbtgt principals and start KDC if kerberos is enabled. if (kerberos) { // Ticket Granting Ticket entry. Dn tgtDN = usersDN.add(dnFactory.create("uid=krbtgt")); String servicePrincipal = "krbtgt/" + realm + "@" + realm; Entry tgtEntry = service.newEntry(tgtDN); tgtEntry.add( "objectClass", "person", "inetOrgPerson", "top", "krb5KDCEntry", "uidObject", "krb5Principal"); tgtEntry.add("krb5KeyVersionNumber", "0"); tgtEntry.add("krb5PrincipalName", servicePrincipal); tgtEntry.add("uid", "krbtgt"); tgtEntry.add("userPassword", "<PASSWORD>"); tgtEntry.add("sn", "Service"); tgtEntry.add("cn", "KDC Service"); service.getAdminSession().add(kerberize(tgtEntry)); // LDAP SASL principal. String saslPrincipal = "ldap/" + hostname + "@" + realm; ldapServer.setSaslPrincipal(saslPrincipal); Dn ldapDN = usersDN.add(dnFactory.create("uid=ldap")); Entry ldapEntry = service.newEntry(ldapDN); ldapEntry.add( "objectClass", "top", "person", "inetOrgPerson", "krb5KDCEntry", "uidObject", "krb5Principal"); ldapEntry.add("krb5KeyVersionNumber", "0"); ldapEntry.add("krb5PrincipalName", saslPrincipal); ldapEntry.add("uid", "ldap"); ldapEntry.add("userPassword", "<PASSWORD>"); ldapEntry.add("sn", "Service"); ldapEntry.add("cn", "LDAP Service"); service.getAdminSession().add(kerberize(ldapEntry)); startKDC(servicePrincipal); } } public boolean isStarted() { return this.isInit; } private File createKrb5Conf() throws IOException { File krb5Conf = new File(confDir, "krb5.conf"); String config = String.format( "[libdefaults]%n" + "default_realm = %s%n" + "default_tgs_enctypes = aes128-cts-hmac-sha1-96 aes256-cts-hmac-sha1-96%n%n" + "[realms]%n" + "%s = {%n" + " kdc = %s:%d%n" + " admin_server = %s:%d%n" + "}%n", realm, realm, hostname, kdcPort, hostname, kdcPort); try (FileOutputStream fios = new FileOutputStream(krb5Conf)) { PrintWriter pw = new PrintWriter( new BufferedWriter(new OutputStreamWriter(fios, Charset.defaultCharset()))); pw.write(config); pw.close(); } return krb5Conf; } /** * @return A specialized krb5.conf file that defines and defaults to the domain expressed by this * server. */ public File getKrb5Conf() { return krb5Conf; } /** * Adds a user with the given password and principal name and creates a keytab file for * authenticating with that user's principal. * * @param user Username to login with (i.e. cassandra). * @param password Password to authenticate with. * @param principal Principal representing the server (i.e. <EMAIL>). * @return Generated keytab file for this user. */ public File addUserAndCreateKeytab(String user, String password, String principal) throws IOException, LdapException { addUser(user, password, principal); return createKeytab(user, password, principal); } /** * Creates a keytab file for authenticating with a given principal. * * @param user Username to login with (i.e. cassandra). * @param password Password to <PASSWORD>. * @param principal Principal representing the server (i.e. <EMAIL>). * @return Generated keytab file for this user. */ public File createKeytab(String user, String password, String principal) throws IOException { File keytabFile = new File(confDir, user + ".keytab"); Keytab keytab = Keytab.getInstance(); KerberosTime timeStamp = new KerberosTime(System.currentTimeMillis()); Map<EncryptionType, EncryptionKey> keys = KerberosKeyFactory.getKerberosKeys(principal, password); KeytabEntry keytabEntry = new KeytabEntry( principal, 0, timeStamp, (byte) 0, keys.get(EncryptionType.AES128_CTS_HMAC_SHA1_96)); keytab.setEntries(Collections.singletonList(keytabEntry)); keytab.write(keytabFile); return keytabFile; } /** * Adds a user with the given password, does not create necessary kerberos attributes. * * @param user Username to login with (i.e. cassandra). * @param password Password to authenticate with. */ public void addUser(String user, String password) throws LdapException { addUser(user, password, null); } /** * Adds a user with the given password and principal. If principal is specified and kerberos is * enabled, user is created with the necessary attributes to authenticate with kerberos (entryCsn, * entryUuid, etc.). * * @param user Username to login with (i.e. cassandra). * @param password <PASSWORD>. * @param principal Principal representing the server (i.e. <EMAIL>). */ public void addUser(String user, String password, String principal) throws LdapException { Preconditions.checkState(isInit); Dn userDN = usersDN.add("uid=" + user); Entry userEntry = service.newEntry(userDN); if (kerberos && principal != null) { userEntry.add( "objectClass", "organizationalPerson", "person", "extensibleObject", "inetOrgPerson", "top", "krb5KDCEntry", "uidObject", "krb5Principal"); userEntry.add("krb5KeyVersionNumber", "0"); userEntry.add("krb5PrincipalName", principal); userEntry = kerberize(userEntry); } else { userEntry.add( "objectClass", "organizationalPerson", "person", "extensibleObject", "inetOrgPerson", "top", "uidObject"); } userEntry.add("uid", user); userEntry.add("sn", user); userEntry.add("cn", user); userEntry.add("userPassword", password); service.getAdminSession().add(userEntry); } /** Stops the server(s) if running. */ public void stop() { if (ldapServer != null) { ldapServer.stop(); } if (kdcServer != null) { kdcServer.stop(); } } /** @return The evaluated hostname that the server is listening with. */ public String getHostname() { return this.hostname; } /** * Adds attributes to the given Entry which will enable krb5key attributes to be added to them. * * @param entry Entry to add attributes to. * @return The provided entry. */ private Entry kerberize(Entry entry) throws LdapException { // Add csn and uuids for kerberos, this is needed to generate krb5keys. entry.add(SchemaConstants.ENTRY_CSN_AT, new CsnFactory(0).newInstance().toString()); entry.add(SchemaConstants.ENTRY_UUID_AT, UUID.randomUUID().toString()); return entry; } /** * Creates a {@link JdbmPartition} with the given id and DN. * * @param id Id to create partition with. * @param dn Distinguished Name to use to create partition. * @return Created partition. */ private JdbmPartition createPartition(String id, Dn dn) throws LdapInvalidDnException { JdbmPartition partition = new JdbmPartition(service.getSchemaManager(), service.getDnFactory()); partition.setId(id); partition.setPartitionPath( new File(service.getInstanceLayout().getPartitionsDirectory(), id).toURI()); partition.setSuffixDn(dn); partition.setSchemaManager(service.getSchemaManager()); return partition; } /** Starts the LDAP Server with SASL enabled. */ private void startLdap() throws Exception { // Create and start LDAP server. ldapServer = new LdapServer(); // Enable SASL layer, this is useful with or without kerberos. Map<String, MechanismHandler> mechanismHandlerMap = Maps.newHashMap(); mechanismHandlerMap.put(SupportedSaslMechanisms.PLAIN, new PlainMechanismHandler()); mechanismHandlerMap.put(SupportedSaslMechanisms.CRAM_MD5, new CramMd5MechanismHandler()); mechanismHandlerMap.put(SupportedSaslMechanisms.DIGEST_MD5, new DigestMd5MechanismHandler()); // GSSAPI is required for kerberos. mechanismHandlerMap.put(SupportedSaslMechanisms.GSSAPI, new GssapiMechanismHandler()); ldapServer.setSaslMechanismHandlers(mechanismHandlerMap); ldapServer.setSaslHost(hostname); // Realms only used by DIGEST_MD5 and GSSAPI. ldapServer.setSaslRealms(Collections.singletonList(realm)); ldapServer.setSearchBaseDn(dn); ldapPort = ldapPort != -1 ? ldapPort : findAvailablePort(10389); ldapServer.setTransports(new TcpTransport(address.getHostAddress(), ldapPort)); ldapServer.setDirectoryService(service); if (kerberos) { // Add an interceptor to attach krb5keys to created principals. KeyDerivationInterceptor interceptor = new KeyDerivationInterceptor(); interceptor.init(service); service.addLast(interceptor); } ldapServer.start(); } /** * Starts the Kerberos Key Distribution Server supporting AES128 using the given principal for the * Ticket-granting ticket. * * @param servicePrincipal TGT principcal service. */ private void startKDC(String servicePrincipal) throws Exception { KerberosConfig config = new KerberosConfig(); // We choose AES128_CTS_HMAC_SHA1_96 for our generated keytabs so we don't need JCE. config.setEncryptionTypes(Sets.newHashSet(EncryptionType.AES128_CTS_HMAC_SHA1_96)); config.setSearchBaseDn(dn); config.setServicePrincipal(servicePrincipal); kdcServer = new KdcServer(config); kdcServer.setDirectoryService(service); kdcServer.setTransports( new TcpTransport(address.getHostAddress(), kdcPort), new UdpTransport(address.getHostAddress(), kdcPort)); kdcServer.start(); } public static Builder builder() { return new Builder(); } public static class Builder { private String dn = "dc=datastax,dc=com"; private String realm = "DATASTAX.COM"; private boolean kerberos = false; private int kdcPort = -1; private int ldapPort = -1; private String address = "127.0.0.1"; private File confDir = null; private Builder() {} public EmbeddedAds build() { return new EmbeddedAds(dn, realm, address, ldapPort, kerberos, kdcPort, confDir); } /** * Configures the base DN to create users under. Defaults to <code>dc=datastax,dc=com</code>. */ public Builder withBaseDn(String dn) { this.dn = dn; return this; } /** Configures the realm to use for SASL and Kerberos. Defaults to <code>DATASTAX.COM</code>. */ public Builder withRealm(String realm) { this.realm = realm; return this; } /** * Sets the directory where krb5.conf and generated keytabs are created. Defaults to current * directory. */ public Builder withConfDir(File confDir) { this.confDir = confDir; return this; } /** * Configures the port to use for LDAP. Defaults to the first available port from 10389+. Must * be greater than 0. */ public Builder withLdapPort(int port) { Preconditions.checkArgument(port > 0); this.ldapPort = port; return this; } /** * Configures the port to use for Kerberos KDC. Defaults to the first available port for 60088+. * Must be greater than 0. */ public Builder withKerberos(int port) { Preconditions.checkArgument(port > 0); this.kdcPort = port; return withKerberos(); } /** * Configures the server to run with a Kerberos KDC using the first available port for 60088+. */ public Builder withKerberos() { this.kerberos = true; return this; } /** * Configures the server to be configured to listen with the given address. Defaults to * 127.0.0.1. You shouldn't need to change this. */ public Builder withAddress(String address) { this.address = address; return this; } } private static int findAvailablePort(int startingWith) { IOException last = null; for (int port = startingWith; port < startingWith + 100; port++) { try { ServerSocket s = new ServerSocket(port); s.close(); return port; } catch (IOException e) { last = e; } } // If for whatever reason a port could not be acquired throw the last encountered exception. throw new RuntimeException("Could not acquire an available port", last); } }
7,980
423
package io.scalecube.services.examples; public class EmptyGreetingRequest {}
24
482
from common_fixtures import * # NOQA import base64 import json from cattle import ApiError @pytest.fixture(scope='module') def host(super_client, context): return super_client.reload(context.host) @pytest.fixture(scope='module') def agent_cli(context): return context.agent_client def test_host_api_proxy_token_create(host, agent_cli): # Token should be created with the supplied reportedUuid in it. token = agent_cli.create_host_api_proxy_token( reportedUuid=host.data.fields['reportedUuid']) assert token is not None parts = token.token.split('.') encoded_claims = parts[1] encoded_claims += '=' * (4 - (len(encoded_claims) % 4)) claims = base64.decodestring(encoded_claims) claims_obj = json.loads(claims) assert claims_obj['reportedUuid'] == host.data.fields['reportedUuid'] def test_bad_host(host, new_context): # If a host doesn't belong to agent submitting the request, it should fail. agent_cli = new_context.agent_client with pytest.raises(ApiError) as e: agent_cli.create_host_api_proxy_token( reportedUuid=host.data.fields['reportedUuid']) assert e.value.error.code == 'InvalidReference'
436
342
<filename>src/gnu/diffutils/cmp.c /* cmp -- compare two files. Copyright (C) 1990, 1991, 1992, 1993, 1994 Free Software Foundation, Inc. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* Written by <NAME> and <NAME>. */ #include "system.h" #include <stdio.h> #include "getopt.h" #include "cmpbuf.h" extern char const version_string[]; #if __STDC__ && defined (HAVE_VPRINTF) void error (int, int, char const *, ...); #else void error (); #endif VOID *xmalloc PARAMS((size_t)); static int cmp PARAMS((void)); static off_t file_position PARAMS((int)); static size_t block_compare PARAMS((char const *, char const *)); static size_t block_compare_and_count PARAMS((char const *, char const *, long *)); static size_t block_read PARAMS((int, char *, size_t)); static void printc PARAMS((int, unsigned)); static void try_help PARAMS((char const *)); static void check_stdout PARAMS((void)); static void usage PARAMS((void)); /* Name under which this program was invoked. */ char const *program_name; /* Filenames of the compared files. */ static char const *file[2]; /* File descriptors of the files. */ static int file_desc[2]; /* Read buffers for the files. */ static char *buffer[2]; /* Optimal block size for the files. */ static size_t buf_size; /* Initial prefix to ignore for each file. */ static off_t ignore_initial; /* Output format: type_first_diff to print the offset and line number of the first differing bytes type_all_diffs to print the (decimal) offsets and (octal) values of all differing bytes type_status to only return an exit status indicating whether the files differ */ static enum { type_first_diff, type_all_diffs, type_status } comparison_type; /* Type used for fast comparison of several bytes at a time. */ #ifndef word #define word int #endif /* If nonzero, print values of bytes quoted like cat -t does. */ static int opt_print_chars; static struct option const long_options[] = { {"print-chars", 0, 0, 'c'}, {"ignore-initial", 1, 0, 'i'}, {"verbose", 0, 0, 'l'}, {"silent", 0, 0, 's'}, {"quiet", 0, 0, 's'}, {"version", 0, 0, 'v'}, {"help", 0, 0, 129}, {0, 0, 0, 0} }; static void try_help (reason) char const *reason; { if (reason) error (0, 0, "%s", reason); error (2, 0, "Try `%s --help' for more information.", program_name); } static void check_stdout () { if (ferror (stdout)) error (2, 0, "write error"); else if (fclose (stdout) != 0) error (2, errno, "write error"); } static void usage () { printf ("Usage: %s [OPTION]... FILE1 [FILE2]\n", program_name); printf ("%s", "\ -c --print-chars Output differing bytes as characters.\n\ -i N --ignore-initial=N Ignore differences in the first N bytes of input.\n\ -l --verbose Output offsets and codes of all differing bytes.\n\ -s --quiet --silent Output nothing; yield exit status only.\n\ -v --version Output version info.\n\ --help Output this help.\n"); printf ("If a FILE is `-' or missing, read standard input.\n"); } int main (argc, argv) int argc; char *argv[]; { int c, i, exit_status; struct stat stat_buf[2]; initialize_main (&argc, &argv); program_name = argv[0]; /* Parse command line options. */ while ((c = getopt_long (argc, argv, "ci:lsv", long_options, 0)) != EOF) switch (c) { case 'c': opt_print_chars = 1; break; case 'i': ignore_initial = 0; while (*optarg) { /* Don't use `atol', because `off_t' may be longer than `long'. */ unsigned digit = *optarg++ - '0'; if (9 < digit) try_help ("non-digit in --ignore-initial value"); ignore_initial = 10 * ignore_initial + digit; } break; case 'l': comparison_type = type_all_diffs; break; case 's': comparison_type = type_status; break; case 'v': printf ("cmp - GNU diffutils version %s\n", version_string); exit (0); case 129: usage (); check_stdout (); exit (0); default: try_help (0); } if (optind == argc) try_help ("missing operand"); file[0] = argv[optind++]; file[1] = optind < argc ? argv[optind++] : "-"; if (optind < argc) try_help ("extra operands"); for (i = 0; i < 2; i++) { /* If file[1] is "-", treat it first; this avoids a misdiagnostic if stdin is closed and opening file[0] yields file descriptor 0. */ int i1 = i ^ (strcmp (file[1], "-") == 0); /* Two files with the same name are identical. But wait until we open the file once, for proper diagnostics. */ if (i && filename_cmp (file[0], file[1]) == 0) exit (0); file_desc[i1] = (strcmp (file[i1], "-") == 0 ? STDIN_FILENO : open (file[i1], O_RDONLY, 0)); if (file_desc[i1] < 0 || fstat (file_desc[i1], &stat_buf[i1]) != 0) { if (file_desc[i1] < 0 && comparison_type == type_status) exit (2); else error (2, errno, "%s", file[i1]); } #if HAVE_SETMODE setmode (file_desc[i1], O_BINARY); #endif } /* If the files are links to the same inode and have the same file position, they are identical. */ if (0 < same_file (&stat_buf[0], &stat_buf[1]) && file_position (0) == file_position (1)) exit (0); /* If output is redirected to the null device, we may assume `-s'. */ if (comparison_type != type_status) { struct stat outstat, nullstat; if (fstat (STDOUT_FILENO, &outstat) == 0 && stat (NULL_DEVICE, &nullstat) == 0 && 0 < same_file (&outstat, &nullstat)) comparison_type = type_status; } /* If only a return code is needed, and if both input descriptors are associated with plain files, conclude that the files differ if they have different sizes. */ if (comparison_type == type_status && S_ISREG (stat_buf[0].st_mode) && S_ISREG (stat_buf[1].st_mode)) { off_t s0 = stat_buf[0].st_size - file_position (0); off_t s1 = stat_buf[1].st_size - file_position (1); if (max (0, s0) != max (0, s1)) exit (1); } /* Get the optimal block size of the files. */ buf_size = buffer_lcm (STAT_BLOCKSIZE (stat_buf[0]), STAT_BLOCKSIZE (stat_buf[1])); /* Allocate buffers, with space for sentinels at the end. */ for (i = 0; i < 2; i++) buffer[i] = xmalloc (buf_size + sizeof (word)); exit_status = cmp (); for (i = 0; i < 2; i++) if (close (file_desc[i]) != 0) error (2, errno, "%s", file[i]); if (exit_status != 0 && comparison_type != type_status) check_stdout (); exit (exit_status); return exit_status; } /* Compare the two files already open on `file_desc[0]' and `file_desc[1]', using `buffer[0]' and `buffer[1]'. Return 0 if identical, 1 if different, >1 if error. */ static int cmp () { long line_number = 1; /* Line number (1...) of first difference. */ long char_number = ignore_initial + 1; /* Offset (1...) in files of 1st difference. */ size_t read0, read1; /* Number of chars read from each file. */ size_t first_diff; /* Offset (0...) in buffers of 1st diff. */ size_t smaller; /* The lesser of `read0' and `read1'. */ char *buf0 = buffer[0]; char *buf1 = buffer[1]; int ret = 0; int i; if (ignore_initial) for (i = 0; i < 2; i++) if (file_position (i) == -1) { /* lseek failed; read and discard the ignored initial prefix. */ off_t ig = ignore_initial; do { size_t r = read (file_desc[i], buf0, (size_t) min (ig, buf_size)); if (!r) break; if (r == -1) error (2, errno, "%s", file[i]); ig -= r; } while (ig); } do { read0 = block_read (file_desc[0], buf0, buf_size); if (read0 == -1) error (2, errno, "%s", file[0]); read1 = block_read (file_desc[1], buf1, buf_size); if (read1 == -1) error (2, errno, "%s", file[1]); /* Insert sentinels for the block compare. */ buf0[read0] = ~buf1[read0]; buf1[read1] = ~buf0[read1]; /* If the line number should be written for differing files, compare the blocks and count the number of newlines simultaneously. */ first_diff = (comparison_type == type_first_diff ? block_compare_and_count (buf0, buf1, &line_number) : block_compare (buf0, buf1)); char_number += first_diff; smaller = min (read0, read1); if (first_diff < smaller) { switch (comparison_type) { case type_first_diff: /* See Posix.2 section 4.10.6.1 for this format. */ printf ("%s %s differ: char %lu, line %lu", file[0], file[1], char_number, line_number); if (opt_print_chars) { unsigned char c0 = buf0[first_diff]; unsigned char c1 = buf1[first_diff]; printf (" is %3o ", c0); printc (0, c0); printf (" %3o ", c1); printc (0, c1); } putchar ('\n'); /* Fall through. */ case type_status: return 1; case type_all_diffs: do { unsigned char c0 = buf0[first_diff]; unsigned char c1 = buf1[first_diff]; if (c0 != c1) { if (opt_print_chars) { printf ("%6lu %3o ", char_number, c0); printc (4, c0); printf (" %3o ", c1); printc (0, c1); putchar ('\n'); } else /* See Posix.2 section 4.10.6.1 for this format. */ printf ("%6lu %3o %3o\n", char_number, c0, c1); } char_number++; first_diff++; } while (first_diff < smaller); ret = 1; break; } } if (read0 != read1) { if (comparison_type != type_status) /* See Posix.2 section 4.10.6.2 for this format. */ fprintf (stderr, "cmp: EOF on %s\n", file[read1 < read0]); return 1; } } while (read0 == buf_size); return ret; } /* Compare two blocks of memory P0 and P1 until they differ, and count the number of '\n' occurrences in the common part of P0 and P1. Assumes that P0 and P1 are aligned at word addresses! If the blocks are not guaranteed to be different, put sentinels at the ends of the blocks before calling this function. Return the offset of the first byte that differs. Increment *COUNT by the count of '\n' occurrences. */ static size_t block_compare_and_count (p0, p1, count) char const *p0, *p1; long *count; { word l; /* One word from first buffer. */ word const *l0, *l1; /* Pointers into each buffer. */ char const *c0, *c1; /* Pointers for finding exact address. */ long cnt = 0; /* Number of '\n' occurrences. */ word nnnn; /* Newline, sizeof (word) times. */ int i; l0 = (word const *) p0; l1 = (word const *) p1; nnnn = 0; for (i = 0; i < sizeof (word); i++) nnnn = (nnnn << CHAR_BIT) | '\n'; /* Find the rough position of the first difference by reading words, not bytes. */ while ((l = *l0++) == *l1++) { l ^= nnnn; for (i = 0; i < sizeof (word); i++) { cnt += ! (unsigned char) l; l >>= CHAR_BIT; } } /* Find the exact differing position (endianness independent). */ c0 = (char const *) (l0 - 1); c1 = (char const *) (l1 - 1); while (*c0 == *c1) { cnt += *c0 == '\n'; c0++; c1++; } *count += cnt; return c0 - p0; } /* Compare two blocks of memory P0 and P1 until they differ. Assumes that P0 and P1 are aligned at word addresses! If the blocks are not guaranteed to be different, put sentinels at the ends of the blocks before calling this function. Return the offset of the first byte that differs. */ static size_t block_compare (p0, p1) char const *p0, *p1; { word const *l0, *l1; char const *c0, *c1; l0 = (word const *) p0; l1 = (word const *) p1; /* Find the rough position of the first difference by reading words, not bytes. */ while (*l0++ == *l1++) ; /* Find the exact differing position (endianness independent). */ c0 = (char const *) (l0 - 1); c1 = (char const *) (l1 - 1); while (*c0 == *c1) { c0++; c1++; } return c0 - p0; } /* Read NCHARS bytes from descriptor FD into BUF. Return the number of characters successfully read. The number returned is always NCHARS unless end-of-file or error. */ static size_t block_read (fd, buf, nchars) int fd; char *buf; size_t nchars; { char *bp = buf; do { size_t nread = read (fd, bp, nchars); if (nread == -1) return -1; if (nread == 0) break; bp += nread; nchars -= nread; } while (nchars != 0); return bp - buf; } /* Print character C, making unprintable characters visible by quoting like cat -t does. Pad with spaces on the right to WIDTH characters. */ static void printc (width, c) int width; unsigned c; { register FILE *fs = stdout; if (! ISPRINT (c)) { if (c >= 128) { putc ('M', fs); putc ('-', fs); c -= 128; width -= 2; } if (c < 32) { putc ('^', fs); c += 64; --width; } else if (c == 127) { putc ('^', fs); c = '?'; --width; } } putc (c, fs); while (--width > 0) putc (' ', fs); } /* Position file I to `ignore_initial' bytes from its initial position, and yield its new position. Don't try more than once. */ static off_t file_position (i) int i; { static int positioned[2]; static off_t position[2]; if (! positioned[i]) { positioned[i] = 1; position[i] = lseek (file_desc[i], ignore_initial, SEEK_CUR); } return position[i]; }
5,681
365
<filename>source/blender/blenkernel/intern/tracking_plane_tracker.c /* * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. * * The Original Code is Copyright (C) 2011 Blender Foundation. * All rights reserved. */ /** \file * \ingroup bke * * This file contains implementation of plane tracker. */ #include "MEM_guardedalloc.h" #include "DNA_movieclip_types.h" #include "BLI_math.h" #include "BLI_utildefines.h" #include "BKE_tracking.h" #include "libmv-capi.h" typedef double Vec2[2]; static int point_markers_correspondences_on_both_image( MovieTrackingPlaneTrack *plane_track, int frame1, int frame2, Vec2 **x1_r, Vec2 **x2_r) { Vec2 *x1, *x2; *x1_r = x1 = MEM_mallocN(sizeof(*x1) * plane_track->point_tracksnr, "point correspondences x1"); *x2_r = x2 = MEM_mallocN(sizeof(*x1) * plane_track->point_tracksnr, "point correspondences x2"); int correspondence_index = 0; for (int i = 0; i < plane_track->point_tracksnr; i++) { MovieTrackingTrack *point_track = plane_track->point_tracks[i]; MovieTrackingMarker *point_marker1, *point_marker2; point_marker1 = BKE_tracking_marker_get_exact(point_track, frame1); point_marker2 = BKE_tracking_marker_get_exact(point_track, frame2); if (point_marker1 != NULL && point_marker2 != NULL) { /* Here conversion from float to double happens. */ x1[correspondence_index][0] = point_marker1->pos[0]; x1[correspondence_index][1] = point_marker1->pos[1]; x2[correspondence_index][0] = point_marker2->pos[0]; x2[correspondence_index][1] = point_marker2->pos[1]; correspondence_index++; } } return correspondence_index; } /* NOTE: frame number should be in clip space, not scene space */ static void track_plane_from_existing_motion(MovieTrackingPlaneTrack *plane_track, int start_frame, int direction, bool retrack) { MovieTrackingPlaneMarker *start_plane_marker = BKE_tracking_plane_marker_get(plane_track, start_frame); MovieTrackingPlaneMarker *keyframe_plane_marker = NULL; MovieTrackingPlaneMarker new_plane_marker; int frame_delta = direction > 0 ? 1 : -1; if (plane_track->flag & PLANE_TRACK_AUTOKEY) { /* Find a keyframe in given direction. */ for (int current_frame = start_frame;; current_frame += frame_delta) { MovieTrackingPlaneMarker *next_plane_marker = BKE_tracking_plane_marker_get_exact( plane_track, current_frame + frame_delta); if (next_plane_marker == NULL) { break; } if ((next_plane_marker->flag & PLANE_MARKER_TRACKED) == 0) { keyframe_plane_marker = next_plane_marker; break; } } } else { start_plane_marker->flag |= PLANE_MARKER_TRACKED; } new_plane_marker = *start_plane_marker; new_plane_marker.flag |= PLANE_MARKER_TRACKED; for (int current_frame = start_frame;; current_frame += frame_delta) { MovieTrackingPlaneMarker *next_plane_marker = BKE_tracking_plane_marker_get_exact( plane_track, current_frame + frame_delta); Vec2 *x1, *x2; double H_double[3][3]; float H[3][3]; /* As soon as we meet keyframed plane, we stop updating the sequence. */ if (next_plane_marker && (next_plane_marker->flag & PLANE_MARKER_TRACKED) == 0) { /* Don't override keyframes if track is in auto-keyframe mode */ if (plane_track->flag & PLANE_TRACK_AUTOKEY) { break; } } const int num_correspondences = point_markers_correspondences_on_both_image( plane_track, current_frame, current_frame + frame_delta, &x1, &x2); if (num_correspondences < 4) { MEM_freeN(x1); MEM_freeN(x2); break; } libmv_homography2DFromCorrespondencesEuc(x1, x2, num_correspondences, H_double); copy_m3_m3d(H, H_double); for (int i = 0; i < 4; i++) { float vec[3] = {0.0f, 0.0f, 1.0f}, vec2[3]; copy_v2_v2(vec, new_plane_marker.corners[i]); /* Apply homography */ mul_v3_m3v3(vec2, H, vec); /* Normalize. */ vec2[0] /= vec2[2]; vec2[1] /= vec2[2]; copy_v2_v2(new_plane_marker.corners[i], vec2); } new_plane_marker.framenr = current_frame + frame_delta; if (!retrack && keyframe_plane_marker && next_plane_marker && (plane_track->flag & PLANE_TRACK_AUTOKEY)) { float fac = ((float)next_plane_marker->framenr - start_plane_marker->framenr) / ((float)keyframe_plane_marker->framenr - start_plane_marker->framenr); fac = 3 * fac * fac - 2 * fac * fac * fac; for (int i = 0; i < 4; i++) { interp_v2_v2v2(new_plane_marker.corners[i], new_plane_marker.corners[i], next_plane_marker->corners[i], fac); } } BKE_tracking_plane_marker_insert(plane_track, &new_plane_marker); MEM_freeN(x1); MEM_freeN(x2); } } /* NOTE: frame number should be in clip space, not scene space */ void BKE_tracking_track_plane_from_existing_motion(MovieTrackingPlaneTrack *plane_track, int start_frame) { track_plane_from_existing_motion(plane_track, start_frame, 1, false); track_plane_from_existing_motion(plane_track, start_frame, -1, false); } static MovieTrackingPlaneMarker *find_plane_keyframe(MovieTrackingPlaneTrack *plane_track, int start_frame, int direction) { MovieTrackingPlaneMarker *plane_marker = BKE_tracking_plane_marker_get(plane_track, start_frame); int index = plane_marker - plane_track->markers; int frame_delta = direction > 0 ? 1 : -1; while (index >= 0 && index < plane_track->markersnr) { if ((plane_marker->flag & PLANE_MARKER_TRACKED) == 0) { return plane_marker; } plane_marker += frame_delta; } return NULL; } void BKE_tracking_retrack_plane_from_existing_motion_at_segment( MovieTrackingPlaneTrack *plane_track, int start_frame) { MovieTrackingPlaneMarker *prev_plane_keyframe, *next_plane_keyframe; prev_plane_keyframe = find_plane_keyframe(plane_track, start_frame, -1); next_plane_keyframe = find_plane_keyframe(plane_track, start_frame, 1); if (prev_plane_keyframe != NULL && next_plane_keyframe != NULL) { /* First we track from left keyframe to the right one without any blending. */ track_plane_from_existing_motion(plane_track, prev_plane_keyframe->framenr, 1, true); /* And then we track from the right keyframe to the left one, so shape blends in nicely */ track_plane_from_existing_motion(plane_track, next_plane_keyframe->framenr, -1, false); } else if (prev_plane_keyframe != NULL) { track_plane_from_existing_motion(plane_track, prev_plane_keyframe->framenr, 1, true); } else if (next_plane_keyframe != NULL) { track_plane_from_existing_motion(plane_track, next_plane_keyframe->framenr, -1, true); } } BLI_INLINE void float_corners_to_double(/*const*/ float corners[4][2], double double_corners[4][2]) { copy_v2db_v2fl(double_corners[0], corners[0]); copy_v2db_v2fl(double_corners[1], corners[1]); copy_v2db_v2fl(double_corners[2], corners[2]); copy_v2db_v2fl(double_corners[3], corners[3]); } void BKE_tracking_homography_between_two_quads(/*const*/ float reference_corners[4][2], /*const*/ float corners[4][2], float H[3][3]) { Vec2 x1[4], x2[4]; double H_double[3][3]; float_corners_to_double(reference_corners, x1); float_corners_to_double(corners, x2); libmv_homography2DFromCorrespondencesEuc(x1, x2, 4, H_double); copy_m3_m3d(H, H_double); }
3,685
677
<gh_stars>100-1000 /* * Copyright (c) 2020 Bitdefender * SPDX-License-Identifier: Apache-2.0 */ #include "hviface.h" #include "asmlib.h" #include "cpu.h" // // HvRaiseEpt // DWORD HvRaiseEpt( void ) { QWORD rax, rbx, rcx, rdx, rdi, rsi; rax = VE_VMCALL_OP; rdi = VE_VMCALL_SUBOP; rsi = 0; rdx = VE_HCALL_RAISE_EPT; AsmVmcall(&rax, &rbx, &rcx, &rdx, &rdi, &rsi); return (DWORD)rax; } // // HvBreak // DWORD HvBreak( QWORD Reason, QWORD Argument ) { QWORD rax, rbx, rcx, rdx, rdi, rsi; rax = VE_VMCALL_OP; rdi = VE_VMCALL_SUBOP; rsi = 0; rdx = VE_HCALL_BREAK; rbx = Reason; rcx = Argument; AsmVmcall(&rax, &rbx, &rcx, &rdx, &rdi, &rsi); return (DWORD)rax; } // // HvBreak // DWORD HvTrace( QWORD Reason, QWORD Argument ) { QWORD rax, rbx, rcx, rdx, rdi, rsi; rax = VE_VMCALL_OP; rdi = VE_VMCALL_SUBOP; rsi = 0; rdx = VE_HCALL_TRACE; rbx = Reason; rcx = Argument; AsmVmcall(&rax, &rbx, &rcx, &rdx, &rdi, &rsi); return (DWORD)rax; }
619
14,668
<gh_stars>1000+ import imp import os from wptserve.utils import isomorphic_decode here = os.path.dirname(os.path.abspath(isomorphic_decode(__file__))) def main(request, response): auth = imp.load_source(u"", os.path.join(here, u"..", u"authentication.py")) return auth.main(request, response)
204
590
# coding: utf-8 import random import unittest from algorithms.searching.linear_search import linear_search class TestCase(unittest.TestCase): def test(self): array = [random.randint(-100, 100) for i in range(10000)] target = random.choice(array) expected = array.index(target) self.assertEqual(linear_search(array, target), expected) def test2(self): array = [random.randint(-100, 100) for i in range(10000)] target = 999 expected = -1 self.assertEqual(linear_search(array, target), expected) def test3(self): array = [] target = 0 expected = -1 self.assertEqual(linear_search(array, target), expected) if __name__ == '__main__': unittest.main()
310
1,083
<filename>include/polarphp/basic/RelativePointer.h //===--- RelativePointer.h - Relative Pointer Support -----------*- C++ -*-===// // // This source file is part of the Swift.org open source project // // Copyright (c) 2014 - 2017 Apple Inc. and the Swift project authors // Licensed under Apache License v2.0 with Runtime Library Exception // // See https://swift.org/LICENSE.txt for license information // See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors // //===----------------------------------------------------------------------===// // This source file is part of the polarphp.org open source project // // Copyright (c) 2017 - 2019 polarphp software foundation // Copyright (c) 2017 - 2019 zzu_softboy <<EMAIL>> // Licensed under Apache License v2.0 with Runtime Library Exception // // See https://polarphp.org/LICENSE.txt for license information // See https://polarphp.org/CONTRIBUTORS.txt for the list of polarphp project authors // // Created by polarboy on 2019/12/02. // // Some data structures emitted by the Swift compiler use relative indirect // addresses in order to minimize startup cost for a process. By referring to // the offset of the global offset table entry for a symbol, instead of directly // referring to the symbol, compiler-emitted data structures avoid requiring // unnecessary relocation at dynamic linking time. This header contains types // to help dereference these relative addresses. // // Theory of references to objects // ------------------------------- // // A reference can be absolute or relative: // // - An absolute reference is a pointer to the object. // // - A relative reference is a (signed) offset from the address of the // reference to the address of its direct referent. // // A relative reference can be direct, indirect, or symbolic. // // In a direct reference, the direct referent is simply the target object. // Generally, a statically-emitted relative reference can only be direct // if it can be resolved to a constant offset by the linker, because loaders // do not support forming relative references. This means that either the // reference and object must lie within the same linkage unit or the // difference must be computed at runtime by code. // // In a symbolic reference, the direct referent is a string holding the symbol // name of the object. A relative reference can only be symbolic if the // object actually has a symbol at runtime, which may require exporting // many internal symbols that would otherwise be strippable. // // In an indirect reference, the direct referent is a variable holding an // absolute reference to the object. An indirect relative reference may // refer to an arbitrary symbol, be it anonymous within the linkage unit // or completely external to it, but it requires the introduction of an // intermediate absolute reference that requires load-time initialization. // However, this initialization can be shared among all indirect references // within the linkage unit, and the linker will generally place all such // references adjacent to one another to improve load-time locality. // // A reference can be made a dynamic union of more than one of these options. // This allows the compiler/linker to use a direct reference when possible // and a less-efficient option where required. However, it also requires // the cases to be dynamically distinguished. This can be done by setting // a low bit of the offset, as long as the difference between the direct // referent's address and the reference is a multiple of 2. This works well // for "indirectable" references because most objects are known to be // well-aligned, and the cases that aren't (chiefly functions and strings) // rarely need the flexibility of this kind of reference. It does not // work quite as well for "possibly symbolic" references because C strings // are not naturally aligned, and making them aligned generally requires // moving them out of the linker's ordinary string section; however, it's // still workable. // // Finally, a relative reference can be near or far. A near reference // is potentially smaller, but it requires the direct referent to lie // within a certain distance of the reference, even if dynamically // initialized. // // In Swift, we always prefer to use a near direct relative reference // when it is possible to do so: that is, when the relationship is always // between two global objects emitted in the same linkage unit, and there // is no compatibility constraint requiring the use of an absolute reference. // // When more flexibility is required, there are several options: // // 1. Use an absolute reference. Size penalty on 64-bit. Requires // load-time work. // // 2. Use a far direct relative reference. Size penalty on 64-bit. // Requires load-time work when object is outside linkage unit. // Generally not directly supported by loaders. // // 3. Use an always-indirect relative reference. Size penalty of one // pointer (shared). Requires load-time work even when object is // within linkage unit. // // 4. Use a near indirectable relative reference. Size penalty of one // pointer (shared) when reference exceeds range. Runtime / code-size // penalty on access. Requires load-time work (shared) only when // object is outside linkage unit. // // 5. Use a far indirectable relative reference. Size penalty on 64-bit. // Size penalty of one pointer (shared) when reference exceeds range // and is initialized statically. Runtime / code-size penalty on access. // Requires load-time work (shared) only when object is outside linkage // unit. // // 6. Use a near or far symbolic relative reference. No load-time work. // Severe runtime penalty on access. Requires custom logic to statically // optimize. Requires emission of symbol for target even if private // to linkage unit. // // 7. Use a near or far direct-or-symbolic relative reference. No // load-time work. Severe runtime penalty on access if object is // outside of linkage unit. Requires custom logic to statically optimize. // // In general, it's our preference in Swift to use option #4 when there // is no possibility of initializing the reference dynamically and option #5 // when there is. This is because it is infeasible to actually share the // memory for the intermediate absolute reference when it must be allocated // dynamically. // // Symbolic references are an interesting idea that we have not yet made // use of. They may be acceptable in reflective metadata cases where it // is desirable to heavily bias towards never using the metadata. However, // they're only profitable if there wasn't any other indirect reference // to the target, and it is likely that their optimal use requires a more // intelligent toolchain from top to bottom. // // Note that the cost of load-time work also includes a binary-size penalty // to store the loader metadata necessary to perform that work. Therefore // it is better to avoid it even when there are dynamic optimizations in // place to skip the work itself. // //===----------------------------------------------------------------------===// #ifndef POLARPHP_BASIC_RELATIVE_POINTER_H #define POLARPHP_BASIC_RELATIVE_POINTER_H #include <cstdint> #include <cassert> #include <type_traits> #include <utility> namespace polar { namespace internal { /// Apply a relative offset to a base pointer. The offset is applied to the base /// pointer using sign-extended, wrapping arithmetic. template<typename BasePtrTy, typename Offset> static inline uintptr_t applyRelativeOffset(BasePtrTy *basePtr, Offset offset) { static_assert(std::is_integral<Offset>::value && std::is_signed<Offset>::value, "offset type should be signed integer"); auto base = reinterpret_cast<uintptr_t>(basePtr); // We want to do wrapping arithmetic, but with a sign-extended // offset. To do this in C, we need to do signed promotion to get // the sign extension, but we need to perform arithmetic on unsigned values, // since signed overflow is undefined behavior. auto extendOffset = static_cast<uintptr_t>(static_cast<intptr_t>(offset)); return base + extendOffset; } /// Measure the relative offset between two pointers. This measures /// (referent - base) using wrapping arithmetic. The result is truncated if /// Offset is smaller than a pointer, with an assertion that the /// pre-truncation result is a sign extension of the truncated result. template<typename Offset, typename A, typename B> static inline Offset measureRelativeOffset(A *referent, B *base) { static_assert(std::is_integral<Offset>::value && std::is_signed<Offset>::value, "offset type should be signed integer"); auto distance = static_cast<uintptr_t>(referent) - static_cast<uintptr_t>(base); // Truncate as unsigned, then wrap around to signed. auto truncatedDistance = static_cast<Offset>(static_cast<typename std::make_unsigned<Offset>::type>(distance)); // Assert that the truncation didn't discard any non-sign-extended bits. assert(static_cast<intptr_t>(truncatedDistance) == static_cast<intptr_t>(distance) && "pointers are too far apart to fit in offset type"); return truncatedDistance; } } // namespace internal /// A relative reference to an object stored in memory. The reference may be /// direct or indirect, and uses the low bit of the (assumed at least /// 2-byte-aligned) pointer to differentiate. template<typename ValueTy, bool Nullable = false, typename Offset = int32_t> class RelativeIndirectPointer { private: static_assert(std::is_integral<Offset>::value && std::is_signed<Offset>::value, "offset type should be signed integer"); /// The relative offset of the pointer's memory from the `this` pointer. /// This is an indirect reference. Offset m_relativeOffset; /// RelativePointers should appear in statically-generated metadata. They /// shouldn't be constructed or copied. RelativeIndirectPointer() = delete; RelativeIndirectPointer(RelativeIndirectPointer &&) = delete; RelativeIndirectPointer(const RelativeIndirectPointer &) = delete; RelativeIndirectPointer &operator=(RelativeIndirectPointer &&) = delete; RelativeIndirectPointer &operator=(const RelativeIndirectPointer &) = delete; public: const ValueTy *get() const & { // Check for null. if (Nullable && m_relativeOffset == 0) { return nullptr; } uintptr_t address = internal::applyRelativeOffset(this, m_relativeOffset); return *reinterpret_cast<const ValueTy * const *>(address); } /// A zero relative offset encodes a null reference. bool isNull() const & { return m_relativeOffset == 0; } operator const ValueTy* () const & { return get(); } const ValueTy *operator->() const & { return get(); } }; /// A relative reference to an object stored in memory. The reference may be /// direct or indirect, and uses the low bit of the (assumed at least /// 2-byte-aligned) pointer to differentiate. template<typename ValueTy, bool Nullable = false, typename Offset = int32_t> class RelativeIndirectablePointer { private: static_assert(std::is_integral<Offset>::value && std::is_signed<Offset>::value, "offset type should be signed integer"); /// The relative offset of the pointer's memory from the `this` pointer. /// If the low bit is clear, this is a direct reference; otherwise, it is /// an indirect reference. Offset RelativeOffsetPlusIndirect; /// RelativePointers should appear in statically-generated metadata. They /// shouldn't be constructed or copied. RelativeIndirectablePointer() = delete; RelativeIndirectablePointer(RelativeIndirectablePointer &&) = delete; RelativeIndirectablePointer(const RelativeIndirectablePointer &) = delete; RelativeIndirectablePointer &operator=(RelativeIndirectablePointer &&) = delete; RelativeIndirectablePointer &operator=(const RelativeIndirectablePointer &) = delete; public: /// Allow construction and reassignment from an absolute pointer. /// These always produce a direct relative offset. RelativeIndirectablePointer(ValueTy *absolute) : RelativeOffsetPlusIndirect( Nullable && absolute == nullptr ? 0 : internal::measureRelativeOffset<Offset>(absolute, this)) { if (!Nullable) { assert(absolute != nullptr && "constructing non-nullable relative pointer from null"); } } RelativeIndirectablePointer &operator=(ValueTy *absolute) & { if (!Nullable) { assert(absolute != nullptr && "constructing non-nullable relative pointer from null"); } RelativeOffsetPlusIndirect = Nullable && absolute == nullptr ? 0 : internal::measureRelativeOffset<Offset>(absolute, this); return *this; } const ValueTy *get() const & { static_assert(alignof(ValueTy) >= 2 && alignof(Offset) >= 2, "alignment of value and offset must be at least 2 to " "make room for indirectable flag"); // Check for null. if (Nullable && RelativeOffsetPlusIndirect == 0) { return nullptr; } Offset offsetPlusIndirect = RelativeOffsetPlusIndirect; uintptr_t address = internal::applyRelativeOffset(this, offsetPlusIndirect & ~1); // If the low bit is set, then this is an indirect address. Otherwise, // it's direct. if (offsetPlusIndirect & 1) { return *reinterpret_cast<const ValueTy * const *>(address); } else { return reinterpret_cast<const ValueTy *>(address); } } /// A zero relative offset encodes a null reference. bool isNull() const & { return RelativeOffsetPlusIndirect == 0; } operator const ValueTy* () const & { return get(); } const ValueTy *operator->() const & { return get(); } }; /// A relative reference to an aligned object stored in memory. The reference /// may be direct or indirect, and uses the low bit of the (assumed at least /// 2-byte-aligned) pointer to differentiate. The remaining low bits store /// an additional tiny integer value. template<typename ValueTy, typename IntTy, bool Nullable = false, typename Offset = int32_t> class RelativeIndirectablePointerIntPair { private: static_assert(std::is_integral<Offset>::value && std::is_signed<Offset>::value, "offset type should be signed integer"); /// The relative offset of the pointer's memory from the `this` pointer. /// If the low bit is clear, this is a direct reference; otherwise, it is /// an indirect reference. Offset m_relativeOffsetPlusIndirectAndInt; /// RelativePointers should appear in statically-generated metadata. They /// shouldn't be constructed or copied. RelativeIndirectablePointerIntPair() = delete; RelativeIndirectablePointerIntPair( RelativeIndirectablePointerIntPair &&) = delete; RelativeIndirectablePointerIntPair( const RelativeIndirectablePointerIntPair &) = delete; RelativeIndirectablePointerIntPair& operator=( RelativeIndirectablePointerIntPair &&) = delete; RelativeIndirectablePointerIntPair &operator=( const RelativeIndirectablePointerIntPair &) = delete; // Retrieve the mask for the stored integer value. static Offset getIntMask() { return (alignof(Offset) - 1) & ~ static_cast<Offset>(0x01); } public: const ValueTy *getPointer() const & { static_assert(alignof(ValueTy) >= 2 && alignof(Offset) >= 2, "alignment of value and offset must be at least 2 to " "make room for indirectable flag"); Offset offset = (m_relativeOffsetPlusIndirectAndInt & ~getIntMask()); // Check for null. if (Nullable && offset == 0) { return nullptr; } Offset offsetPlusIndirect = offset; uintptr_t address = internal::applyRelativeOffset(this, offsetPlusIndirect & ~1); // If the low bit is set, then this is an indirect address. Otherwise, // it's direct. if (offsetPlusIndirect & 1) { return *reinterpret_cast<const ValueTy * const *>(address); } else { return reinterpret_cast<const ValueTy *>(address); } } /// A zero relative offset encodes a null reference. bool isNull() const & { Offset offset = (m_relativeOffsetPlusIndirectAndInt & ~getIntMask()); return offset == 0; } IntTy getInt() const & { return IntTy((m_relativeOffsetPlusIndirectAndInt & getIntMask()) >> 1); } }; /// A relative reference to a function, intended to reference private metadata /// functions for the current executable or dynamic library image from /// position-independent constant data. template<typename T, bool Nullable, typename Offset> class RelativeDirectPointerImpl { private: /// The relative offset of the function's entry point from *this. Offset m_relativeOffset; /// RelativePointers should appear in statically-generated metadata. They /// shouldn't be constructed or copied. RelativeDirectPointerImpl() = delete; /// RelativePointers should appear in statically-generated metadata. They /// shouldn't be constructed or copied. RelativeDirectPointerImpl(RelativeDirectPointerImpl &&) = delete; RelativeDirectPointerImpl(const RelativeDirectPointerImpl &) = delete; RelativeDirectPointerImpl &operator=(RelativeDirectPointerImpl &&) = delete; RelativeDirectPointerImpl &operator=(const RelativeDirectPointerImpl &) = delete; public: using ValueTy = T; using PointerTy = T*; // Allow construction and reassignment from an absolute pointer. RelativeDirectPointerImpl(PointerTy absolute) : m_relativeOffset(Nullable && absolute == nullptr ? 0 : internal::measureRelativeOffset<Offset>(absolute, this)) { if (!Nullable) { assert(absolute != nullptr && "constructing non-nullable relative pointer from null"); } } explicit constexpr RelativeDirectPointerImpl(std::nullptr_t) : m_relativeOffset (0) { static_assert(Nullable, "can't construct non-nullable pointer from null"); } RelativeDirectPointerImpl &operator=(PointerTy absolute) & { if (!Nullable) { assert(absolute != nullptr && "constructing non-nullable relative pointer from null"); } m_relativeOffset = Nullable && absolute == nullptr ? 0 : internal::measureRelativeOffset<Offset>(absolute, this); return *this; } PointerTy get() const & { // Check for null. if (Nullable && m_relativeOffset == 0) { return nullptr; } // The value is addressed relative to `this`. uintptr_t absolute = internal::applyRelativeOffset(this, m_relativeOffset); return reinterpret_cast<PointerTy>(absolute); } /// A zero relative offset encodes a null reference. bool isNull() const & { return m_relativeOffset == 0; } }; /// A direct relative reference to an object. template<typename T, bool Nullable = true, typename Offset = int32_t> class RelativeDirectPointer : private RelativeDirectPointerImpl<T, Nullable, Offset> { using super = RelativeDirectPointerImpl<T, Nullable, Offset>; public: using super::get; using super::super; RelativeDirectPointer &operator=(T *absolute) & { super::operator=(absolute); return *this; } operator typename super::PointerTy() const & { return this->get(); } const typename super::ValueTy *operator->() const & { return this->get(); } using super::isNull; }; /// A specialization of RelativeDirectPointer for function pointers, /// allowing for calls. template<typename RetTy, typename...ArgTy, bool Nullable, typename Offset> class RelativeDirectPointer<RetTy (ArgTy...), Nullable, Offset> : private RelativeDirectPointerImpl<RetTy (ArgTy...), Nullable, Offset> { using super = RelativeDirectPointerImpl<RetTy (ArgTy...), Nullable, Offset>; public: using super::get; using super::super; RelativeDirectPointer &operator=(RetTy (*absolute)(ArgTy...)) & { super::operator=(absolute); return *this; } operator typename super::PointerTy() const & { return this->get(); } RetTy operator()(ArgTy...arg) const { return this->get()(std::forward<ArgTy>(arg)...); } using super::isNull; }; /// A direct relative reference to an aligned object, with an additional /// tiny integer value crammed into its low bits. template<typename PointeeTy, typename IntTy, bool Nullable = false, typename Offset = int32_t> class RelativeDirectPointerIntPair { Offset m_relativeOffsetPlusInt; /// RelativePointers should appear in statically-generated metadata. They /// shouldn't be constructed or copied. RelativeDirectPointerIntPair() = delete; RelativeDirectPointerIntPair(RelativeDirectPointerIntPair &&) = delete; RelativeDirectPointerIntPair(const RelativeDirectPointerIntPair &) = delete; RelativeDirectPointerIntPair &operator=(RelativeDirectPointerIntPair &&) = delete; RelativeDirectPointerIntPair &operator=(const RelativeDirectPointerIntPair&) = delete; static Offset getMask() { return alignof(Offset) - 1; } public: using ValueTy = PointeeTy; using PointerTy = PointeeTy*; PointerTy getPointer() const & { Offset offset = (m_relativeOffsetPlusInt & ~getMask()); // Check for null. if (Nullable && offset == 0) { return nullptr; } // The value is addressed relative to `this`. uintptr_t absolute = internal::applyRelativeOffset(this, offset); return reinterpret_cast<PointerTy>(absolute); } IntTy getInt() const & { return IntTy(m_relativeOffsetPlusInt & getMask()); } Offset getOpaqueValue() const & { return m_relativeOffsetPlusInt; } }; // Type aliases for "far" relative pointers, which need to be able to reach // across the full address space instead of only across a single small-code- // model image. template<typename T, bool Nullable = false> using FarRelativeIndirectablePointer = RelativeIndirectablePointer<T, Nullable, intptr_t>; template<typename T, bool Nullable = false> using FarRelativeDirectPointer = RelativeDirectPointer<T, Nullable, intptr_t>; } // polar #endif // POLARPHP_BASIC_RELATIVE_POINTER_H
7,193
3,039
<gh_stars>1000+ /* $Id: types.c 231 2011-06-27 13:46:19Z marc.noirot $ FLV Metadata updater Copyright (C) 2007-2012 <NAME> <<EMAIL> AT <EMAIL>> This file is part of FLVMeta. FLVMeta is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. FLVMeta is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with FLVMeta; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ #include "types.h" #ifndef WORDS_BIGENDIAN /* swap 64 bits doubles */ typedef union __convert_u { uint64 i; number64 f; } convert_u; number64 swap_number64(number64 n) { convert_u c; c.f = n; c.i = (((c.i & 0x00000000000000FFULL) << 56) | ((c.i & 0x000000000000FF00ULL) << 40) | ((c.i & 0x0000000000FF0000ULL) << 24) | ((c.i & 0x00000000FF000000ULL) << 8) | ((c.i & 0x000000FF00000000ULL) >> 8) | ((c.i & 0x0000FF0000000000ULL) >> 24) | ((c.i & 0x00FF000000000000ULL) >> 40) | ((c.i & 0xFF00000000000000ULL) >> 56)); return c.f; } #endif /* !defined WORDS_BIGENDIAN */ /* convert native integers into 24 bits big endian integers */ uint24_be uint32_to_uint24_be(uint32 l) { uint24_be r; r.b[0] = (uint8)((l & 0x00FF0000U) >> 16); r.b[1] = (uint8)((l & 0x0000FF00U) >> 8); r.b[2] = (uint8) (l & 0x000000FFU); return r; } #ifdef WIN32 /* These functions assume fpos_t is a 64-bit signed integer */ file_offset_t lfs_ftell(FILE * stream) { fpos_t p; if (fgetpos(stream, &p) == 0) { return (file_offset_t)p; } else { return -1LL; } } int lfs_fseek(FILE * stream, file_offset_t offset, int whence) { fpos_t p; if (fgetpos(stream, &p) == 0) { switch (whence) { case SEEK_CUR: p += offset; break; case SEEK_SET: p = offset; break; /*case SEEK_END:; not implemented here */ default: return -1; } fsetpos(stream, &p); return 0; } else { return -1; } } #endif /* WIN32 */
1,129
348
<reponame>chamberone/Leaflet.PixiOverlay {"nom":"Monfaucon","circ":"2ème circonscription","dpt":"Dordogne","inscrits":256,"abs":148,"votants":108,"blancs":8,"nuls":2,"exp":98,"res":[{"nuance":"REM","nom":"<NAME>","voix":63},{"nuance":"FN","nom":"M. <NAME>","voix":35}]}
112
360
// // gradient.h // C-Ray // // Created by <NAME> on 19/12/2020. // Copyright © 2020 <NAME>. All rights reserved. // #pragma once const struct colorNode *newGradientTexture(const struct world *world, struct color down, struct color up);
79
1,402
<reponame>kvmanohar22/gtsam /* ---------------------------------------------------------------------------- * GTSAM Copyright 2010, Georgia Tech Research Corporation, * Atlanta, Georgia 30332-0415 * All Rights Reserved * Authors: <NAME>, et al. (see THANKS for the full author list) * See LICENSE for the license information * -------------------------------------------------------------------------- */ /** * @file SO3.h * @brief 3*3 matrix representation of SO(3) * @author <NAME> * @author <NAME> * @author <NAME> * @date December 2014 */ #pragma once #include <gtsam/geometry/SOn.h> #include <gtsam/base/Lie.h> #include <gtsam/base/Matrix.h> #include <gtsam/dllexport.h> #include <cmath> #include <vector> namespace gtsam { using SO3 = SO<3>; // Below are all declarations of SO<3> specializations. // They are *defined* in SO3.cpp. template <> GTSAM_EXPORT SO3 SO3::AxisAngle(const Vector3& axis, double theta); template <> GTSAM_EXPORT SO3 SO3::ClosestTo(const Matrix3& M); template <> GTSAM_EXPORT SO3 SO3::ChordalMean(const std::vector<SO3>& rotations); template <> GTSAM_EXPORT Matrix3 SO3::Hat(const Vector3& xi); ///< make skew symmetric matrix template <> GTSAM_EXPORT Vector3 SO3::Vee(const Matrix3& X); ///< inverse of Hat /// Adjoint map template <> Matrix3 SO3::AdjointMap() const; /** * Exponential map at identity - create a rotation from canonical coordinates * \f$ [R_x,R_y,R_z] \f$ using Rodrigues' formula */ template <> GTSAM_EXPORT SO3 SO3::Expmap(const Vector3& omega, ChartJacobian H); /// Derivative of Expmap template <> GTSAM_EXPORT Matrix3 SO3::ExpmapDerivative(const Vector3& omega); /** * Log map at identity - returns the canonical coordinates * \f$ [R_x,R_y,R_z] \f$ of this rotation */ template <> GTSAM_EXPORT Vector3 SO3::Logmap(const SO3& R, ChartJacobian H); /// Derivative of Logmap template <> GTSAM_EXPORT Matrix3 SO3::LogmapDerivative(const Vector3& omega); // Chart at origin for SO3 is *not* Cayley but actual Expmap/Logmap template <> GTSAM_EXPORT SO3 SO3::ChartAtOrigin::Retract(const Vector3& omega, ChartJacobian H); template <> GTSAM_EXPORT Vector3 SO3::ChartAtOrigin::Local(const SO3& R, ChartJacobian H); template <> GTSAM_EXPORT Vector9 SO3::vec(OptionalJacobian<9, 3> H) const; /** Serialization function */ template <class Archive> void serialize(Archive& ar, SO3& R, const unsigned int /*version*/) { Matrix3& M = R.matrix_; ar& boost::serialization::make_nvp("R11", M(0, 0)); ar& boost::serialization::make_nvp("R12", M(0, 1)); ar& boost::serialization::make_nvp("R13", M(0, 2)); ar& boost::serialization::make_nvp("R21", M(1, 0)); ar& boost::serialization::make_nvp("R22", M(1, 1)); ar& boost::serialization::make_nvp("R23", M(1, 2)); ar& boost::serialization::make_nvp("R31", M(2, 0)); ar& boost::serialization::make_nvp("R32", M(2, 1)); ar& boost::serialization::make_nvp("R33", M(2, 2)); } namespace so3 { /** * Compose general matrix with an SO(3) element. * We only provide the 9*9 derivative in the first argument M. */ GTSAM_EXPORT Matrix3 compose(const Matrix3& M, const SO3& R, OptionalJacobian<9, 9> H = boost::none); /// (constant) Jacobian of compose wrpt M GTSAM_EXPORT Matrix99 Dcompose(const SO3& R); // Below are two functors that allow for saving computation when exponential map // and its derivatives are needed at the same location in so<3>. The second // functor also implements dedicated methods to apply dexp and/or inv(dexp). /// Functor implementing Exponential map class GTSAM_EXPORT ExpmapFunctor { protected: const double theta2; Matrix3 W, K, KK; bool nearZero; double theta, sin_theta, one_minus_cos; // only defined if !nearZero void init(bool nearZeroApprox = false); public: /// Constructor with element of Lie algebra so(3) explicit ExpmapFunctor(const Vector3& omega, bool nearZeroApprox = false); /// Constructor with axis-angle ExpmapFunctor(const Vector3& axis, double angle, bool nearZeroApprox = false); /// Rodrigues formula SO3 expmap() const; }; /// Functor that implements Exponential map *and* its derivatives class DexpFunctor : public ExpmapFunctor { const Vector3 omega; double a, b; Matrix3 dexp_; public: /// Constructor with element of Lie algebra so(3) GTSAM_EXPORT explicit DexpFunctor(const Vector3& omega, bool nearZeroApprox = false); // NOTE(luca): Right Jacobian for Exponential map in SO(3) - equation // (10.86) and following equations in <NAME>, "Stochastic Models, // Information Theory, and Lie Groups", Volume 2, 2008. // expmap(omega + v) \approx expmap(omega) * expmap(dexp * v) // This maps a perturbation v in the tangent space to // a perturbation on the manifold Expmap(dexp * v) */ const Matrix3& dexp() const { return dexp_; } /// Multiplies with dexp(), with optional derivatives GTSAM_EXPORT Vector3 applyDexp(const Vector3& v, OptionalJacobian<3, 3> H1 = boost::none, OptionalJacobian<3, 3> H2 = boost::none) const; /// Multiplies with dexp().inverse(), with optional derivatives GTSAM_EXPORT Vector3 applyInvDexp(const Vector3& v, OptionalJacobian<3, 3> H1 = boost::none, OptionalJacobian<3, 3> H2 = boost::none) const; }; } // namespace so3 /* * Define the traits. internal::LieGroup provides both Lie group and Testable */ template <> struct traits<SO3> : public internal::LieGroup<SO3> {}; template <> struct traits<const SO3> : public internal::LieGroup<SO3> {}; } // end namespace gtsam
1,967
9,402
<reponame>pyracanda/runtime // Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. #include "stdafx.h" #include "yieldprocessornormalized.h" bool YieldProcessorNormalization::s_isMeasurementScheduled; // Defaults are for when normalization has not yet been done unsigned int YieldProcessorNormalization::s_yieldsPerNormalizedYield = 1; unsigned int YieldProcessorNormalization::s_optimalMaxNormalizedYieldsPerSpinIteration = (unsigned int) ( (double)YieldProcessorNormalization::TargetMaxNsPerSpinIteration / YieldProcessorNormalization::TargetNsPerNormalizedYield + 0.5 );
228
788
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.usergrid.services; import java.util.UUID; import org.apache.usergrid.persistence.Entity; import org.apache.usergrid.persistence.EntityRef; public interface Service { public static final String GENERIC_ENTITY_TYPE = "entity"; public String getServiceType(); public Class<? extends Entity> getEntityClass(); public String getEntityType(); public boolean isRootService(); public ServiceResults invoke( ServiceAction action, ServiceRequest request, ServiceResults previousResults, ServicePayload payload ) throws Exception; public Entity getEntity( ServiceRequest request, UUID uuid ) throws Exception; public Entity getEntity( ServiceRequest request, String name ) throws Exception; public Entity importEntity( ServiceRequest request, Entity entity ) throws Exception; public Entity writeEntity( ServiceRequest request, Entity entity ) throws Exception; public Entity updateEntity( ServiceRequest request, EntityRef ref, ServicePayload payload ) throws Exception; ServiceContext getContext( ServiceAction action, ServiceRequest request, ServiceResults previousResults, ServicePayload payload ) throws Exception; }
544
933
<reponame>proton-vayu/android_external_perfetto<filename>include/perfetto/tracing/string_helpers.h /* * Copyright (C) 2021 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef INCLUDE_PERFETTO_TRACING_STRING_HELPERS_H_ #define INCLUDE_PERFETTO_TRACING_STRING_HELPERS_H_ #include "perfetto/base/export.h" #include <cstddef> #include <string> namespace perfetto { // A wrapper for marking strings that can't be determined to be static at build // time, but are in fact static. class PERFETTO_EXPORT StaticString { public: // Implicit constructor for string literals. template <size_t N> constexpr StaticString(const char (&str)[N]) : value(str) {} // Implicit constructor for null strings. constexpr StaticString(std::nullptr_t) : value(nullptr) {} constexpr explicit StaticString(const char* str) : value(str) {} const char* value; }; namespace internal { // Ensure that |string| is a static constant string. // // If you get a compiler failure here, you are most likely trying to use // TRACE_EVENT with a dynamic event name. There are two ways to fix this: // // 1) If the event name is actually dynamic (e.g., std::string), write it into // the event manually: // // TRACE_EVENT("category", nullptr, [&](perfetto::EventContext ctx) { // ctx.event()->set_name(dynamic_name); // }); // // 2) If the name is static, but the pointer is computed at runtime, wrap it // with perfetto::StaticString: // // TRACE_EVENT("category", perfetto::StaticString{name}); // // DANGER: Using perfetto::StaticString with strings whose contents change // dynamically can cause silent trace data corruption. // constexpr const char* GetStaticString(StaticString string) { return string.value; } } // namespace internal // A explicit wrapper for marking strings as dynamic to ensure that perfetto // doesn't try to cache the pointer value. class PERFETTO_EXPORT DynamicString { public: explicit DynamicString(const std::string& str) : value(str.data()), length(str.length()) {} explicit DynamicString(const char* str) : value(str), length(strlen(str)) {} DynamicString(const char* str, size_t len) : value(str), length(len) {} const char* value; size_t length; }; } // namespace perfetto #endif // INCLUDE_PERFETTO_TRACING_STRING_HELPERS_H_
912
393
package com.marverenic.music.ui.nowplaying; import android.graphics.PorterDuff; import android.graphics.drawable.Drawable; import android.graphics.drawable.LayerDrawable; import android.os.Build; import android.os.Bundle; import androidx.annotation.NonNull; import androidx.annotation.Nullable; import androidx.core.content.ContextCompat; import android.view.LayoutInflater; import android.view.View; import android.view.ViewGroup; import android.widget.ProgressBar; import com.marverenic.music.JockeyApplication; import com.marverenic.music.R; import com.marverenic.music.data.store.ThemeStore; import com.marverenic.music.databinding.FragmentMiniplayerBinding; import com.marverenic.music.player.PlayerController; import com.marverenic.music.ui.BaseFragment; import com.marverenic.music.view.ViewUtils; import javax.inject.Inject; import timber.log.Timber; public class MiniplayerFragment extends BaseFragment { @Inject PlayerController mPlayerController; @Inject ThemeStore mThemeStore; public static MiniplayerFragment newInstance() { return new MiniplayerFragment(); } @Override public void onCreate(@Nullable Bundle savedInstanceState) { super.onCreate(savedInstanceState); JockeyApplication.getComponent(getContext()).inject(this); } @Nullable @Override public View onCreateView(@NonNull LayoutInflater inflater, @Nullable ViewGroup container, @Nullable Bundle savedInstanceState) { FragmentMiniplayerBinding mBinding = FragmentMiniplayerBinding.inflate(inflater, container, false); MiniplayerViewModel viewModel = new MiniplayerViewModel(getContext(), mPlayerController); mPlayerController.getNowPlaying() .compose(bindToLifecycle()) .subscribe(viewModel::setSong, throwable -> { Timber.e(throwable, "Failed to set song"); }); mPlayerController.isPlaying() .compose(bindToLifecycle()) .subscribe(viewModel::setPlaying, throwable -> { Timber.e(throwable, "Failed to set playing state"); }); mPlayerController.getCurrentPosition() .compose(bindToLifecycle()) .subscribe(viewModel::setCurrentPosition, throwable -> { Timber.e(throwable, "Failed to set progress"); }); mPlayerController.getDuration() .compose(bindToLifecycle()) .subscribe(viewModel::setDuration, throwable -> { Timber.e(throwable, "Failed to set duration"); }); mPlayerController.getArtwork() .compose(bindToLifecycle()) .map(artwork -> { if (artwork == null) { return ViewUtils.drawableToBitmap( ContextCompat.getDrawable(getContext(), R.drawable.art_default)); } else { return artwork; } }) .subscribe(viewModel::setArtwork, throwable -> { Timber.e(throwable, "Failed to set artwork"); }); mBinding.setViewModel(viewModel); if (Build.VERSION.SDK_INT < Build.VERSION_CODES.LOLLIPOP) { ProgressBar progressBar = mBinding.miniplayerProgress; LayerDrawable progressBarDrawable = (LayerDrawable) progressBar.getProgressDrawable(); Drawable progress = progressBarDrawable.findDrawableByLayerId(android.R.id.progress); progress.setColorFilter(mThemeStore.getAccentColor(), PorterDuff.Mode.SRC_ATOP); } return mBinding.getRoot(); } }
1,632
7,073
class O: def __init__(self, n): self.n = n def __str__(self): return self.n object = O('Hello! ' * 100)
53
806
package com.evenwell.powersaving.g3.powersaver; import android.os.Binder; import android.os.IBinder; import android.os.IInterface; import android.os.Parcel; import android.os.RemoteException; public interface IStatusChangeListener extends IInterface { public static abstract class Stub extends Binder implements IStatusChangeListener { private static final String DESCRIPTOR = "com.evenwell.powersaving.g3.powersaver.IStatusChangeListener"; static final int TRANSACTION_onChange = 1; private static class Proxy implements IStatusChangeListener { private IBinder mRemote; Proxy(IBinder remote) { this.mRemote = remote; } public IBinder asBinder() { return this.mRemote; } public String getInterfaceDescriptor() { return Stub.DESCRIPTOR; } public void onChange(int status) throws RemoteException { Parcel _data = Parcel.obtain(); Parcel _reply = Parcel.obtain(); try { _data.writeInterfaceToken(Stub.DESCRIPTOR); _data.writeInt(status); this.mRemote.transact(1, _data, _reply, 0); _reply.readException(); } finally { _reply.recycle(); _data.recycle(); } } } public Stub() { attachInterface(this, DESCRIPTOR); } public static IStatusChangeListener asInterface(IBinder obj) { if (obj == null) { return null; } IInterface iin = obj.queryLocalInterface(DESCRIPTOR); if (iin == null || !(iin instanceof IStatusChangeListener)) { return new Proxy(obj); } return (IStatusChangeListener) iin; } public IBinder asBinder() { return this; } public boolean onTransact(int code, Parcel data, Parcel reply, int flags) throws RemoteException { switch (code) { case 1: data.enforceInterface(DESCRIPTOR); onChange(data.readInt()); reply.writeNoException(); return true; case 1598968902: reply.writeString(DESCRIPTOR); return true; default: return super.onTransact(code, data, reply, flags); } } } void onChange(int i) throws RemoteException; }
1,308
1,476
int main(void) { if (!__func__) return 1; if (!(*__func__)) return 1; return 0; }
44
407
<reponame>dartartem/eventuate-tram-reactive-examples-customers-and-orders package io.eventuate.examples.tram.ordersandcustomers.orders.domain.events; public class OrderCancelledEvent implements OrderEvent { private OrderDetails orderDetails; public OrderCancelledEvent() { } public OrderCancelledEvent(OrderDetails orderDetails) { this.orderDetails = orderDetails; } public OrderDetails getOrderDetails() { return orderDetails; } }
140
3,931
<reponame>jellisgwn/DependencyCheck<filename>core/src/main/java/org/owasp/dependencycheck/analyzer/ElixirMixAuditAnalyzer.java /* * This file is part of dependency-check-core. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * Copyright (c) 2020 The OWASP Foundation. All Rights Reserved. */ package org.owasp.dependencycheck.analyzer; import org.owasp.dependencycheck.Engine; import org.owasp.dependencycheck.analyzer.exception.AnalysisException; import org.owasp.dependencycheck.data.nvdcve.CveDB; import org.owasp.dependencycheck.dependency.Dependency; import org.owasp.dependencycheck.exception.InitializationException; import org.owasp.dependencycheck.utils.FileFilterBuilder; import org.owasp.dependencycheck.utils.Settings; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import us.springett.parsers.cpe.exceptions.CpeValidationException; import java.io.File; import java.io.FileFilter; import java.io.IOException; import java.nio.file.Path; import java.nio.file.Paths; import java.util.ArrayList; import java.util.Arrays; import java.util.List; import org.apache.commons.lang3.StringUtils; import org.owasp.dependencycheck.processing.MixAuditProcessor; import org.owasp.dependencycheck.utils.processing.ProcessReader; @Experimental public class ElixirMixAuditAnalyzer extends AbstractFileTypeAnalyzer { /** * The logger. */ private static final Logger LOGGER = LoggerFactory.getLogger(ElixirMixAuditAnalyzer.class); /** * A descriptor for the type of dependencies processed or added by this * analyzer. */ public static final String DEPENDENCY_ECOSYSTEM = "elixir"; /** * The name of the analyzer. */ private static final String ANALYZER_NAME = "Elixir Mix Audit Analyzer"; /** * The phase that this analyzer is intended to run in. */ private static final AnalysisPhase ANALYSIS_PHASE = AnalysisPhase.PRE_INFORMATION_COLLECTION; /** * The filter defining which files will be analyzed. */ private static final FileFilter FILTER = FileFilterBuilder.newInstance().addFilenames("mix.lock").build(); /** * Name. */ public static final String NAME = "Name: "; /** * Version. */ public static final String VERSION = "Version: "; /** * Advisory. */ public static final String ADVISORY = "Advisory: "; /** * Criticality. */ public static final String CRITICALITY = "Criticality: "; /** * The DAL. */ private CveDB cvedb = null; /** * @return a filter that accepts files named mix.lock */ @Override protected FileFilter getFileFilter() { return FILTER; } @Override protected void prepareFileTypeAnalyzer(Engine engine) throws InitializationException { if (engine != null) { this.cvedb = engine.getDatabase(); } // Here we check if mix_audit actually runs from this location. We do this by running the // `mix_audit --version` command and seeing whether or not it succeeds (if it returns with an exit value of 0) final Process process; try { final List<String> mixAuditArgs = Arrays.asList("--version"); process = launchMixAudit(getSettings().getTempDirectory(), mixAuditArgs); } catch (AnalysisException ae) { setEnabled(false); final String msg = String.format("Exception from mix_audit process: %s. Disabling %s", ae.getCause(), ANALYZER_NAME); throw new InitializationException(msg, ae); } catch (IOException ex) { setEnabled(false); throw new InitializationException("Unable to create temporary file, the Mix Audit Analyzer will be disabled", ex); } final int exitValue; final String mixAuditVersionDetails; try (ProcessReader processReader = new ProcessReader(process)) { processReader.readAll(); exitValue = process.exitValue(); if (exitValue != 0) { if (StringUtils.isBlank(processReader.getError())) { LOGGER.warn("Unexpected exit value from mix_audit process and error stream unexpectedly not ready to capture error details. " + "Disabling {}. Exit value was: {}", ANALYZER_NAME, exitValue); setEnabled(false); throw new InitializationException("mix_audit error stream unexpectedly not ready."); } else { setEnabled(false); LOGGER.warn("Unexpected exit value from mix_audit process. Disabling {}. Exit value was: {}. " + "error stream output from mix_audit process was: {}", ANALYZER_NAME, exitValue, processReader.getError()); throw new InitializationException("Unexpected exit value from bundle-audit process."); } } else { if (StringUtils.isBlank(processReader.getOutput())) { LOGGER.warn("mix_audit input stream unexpectedly not ready to capture version details. Disabling {}", ANALYZER_NAME); setEnabled(false); throw new InitializationException("mix_audit input stream unexpectedly not ready to capture version details."); } else { mixAuditVersionDetails = processReader.getOutput(); } } } catch (InterruptedException ex) { setEnabled(false); final String msg = String.format("mix_audit process was interrupted. Disabling %s", ANALYZER_NAME); Thread.currentThread().interrupt(); throw new InitializationException(msg); } catch (IOException ex) { setEnabled(false); final String msg = String.format("IOException '%s' during mix_audit process was interrupted. Disabling %s", ex.getMessage(), ANALYZER_NAME); throw new InitializationException(msg); } if (isEnabled()) { LOGGER.debug("{} is enabled and is using mix_audit with version: {}.", ANALYZER_NAME, mixAuditVersionDetails); } } /** * Returns the key used in the properties file to reference the analyzer's * enabled property. * * @return the analyzer's enabled property setting key */ @Override protected String getAnalyzerEnabledSettingKey() { return Settings.KEYS.ANALYZER_MIX_AUDIT_ENABLED; } /** * Returns the name of the analyzer. * * @return the name of the analyzer. */ @Override public String getName() { return ANALYZER_NAME; } /** * Returns the phase that the analyzer is intended to run in. * * @return the phase that the analyzer is intended to run in. */ @Override public AnalysisPhase getAnalysisPhase() { return ANALYSIS_PHASE; } /** * Launch mix audit. * * @param folder directory that contains the mix.lock file * @param mixAuditArgs the arguments to pass to mix audit * @return a handle to the process * @throws AnalysisException thrown when there is an issue launching mix * audit */ private Process launchMixAudit(File folder, List<String> mixAuditArgs) throws AnalysisException { if (!folder.isDirectory()) { throw new AnalysisException(String.format("%s should have been a directory.", folder.getAbsolutePath())); } final List<String> args = new ArrayList<>(); final String mixAuditPath = getSettings().getString(Settings.KEYS.ANALYZER_MIX_AUDIT_PATH); File mixAudit = null; if (mixAuditPath != null) { mixAudit = new File(mixAuditPath); if (!mixAudit.isFile()) { LOGGER.warn("Supplied `mixAudit` path is incorrect: {}", mixAuditPath); mixAudit = null; } } else { final Path homePath = Paths.get(System.getProperty("user.home")); final Path escriptPath = Paths.get(homePath.toString(), ".mix", "escripts", "mix_audit"); mixAudit = escriptPath.toFile(); } args.add(mixAudit != null ? mixAudit.getAbsolutePath() : "mix_audit"); args.addAll(mixAuditArgs); final ProcessBuilder builder = new ProcessBuilder(args); builder.directory(folder); try { LOGGER.info("Launching: {} from {}", args, folder); return builder.start(); } catch (IOException ioe) { throw new AnalysisException("mix_audit initialization failure; this error can be ignored if you are not analyzing Elixir. " + "Otherwise ensure that mix_audit is installed and the path to mix_audit is correctly specified", ioe); } } /** * Determines if the analyzer can analyze the given file type. * * @param dependency the dependency to determine if it can analyze * @param engine the dependency-check engine * @throws AnalysisException thrown if there is an analysis exception. */ @Override protected void analyzeDependency(Dependency dependency, Engine engine) throws AnalysisException { final File parentFile = dependency.getActualFile().getParentFile(); final List<String> mixAuditArgs = Arrays.asList("--format", "json"); final Process process = launchMixAudit(parentFile, mixAuditArgs); final int exitValue; try (MixAuditProcessor processor = new MixAuditProcessor(dependency, engine); ProcessReader processReader = new ProcessReader(process, processor)) { processReader.readAll(); exitValue = process.exitValue(); if (exitValue < 0 || exitValue > 1) { final String msg = String.format("Unexpected exit code from mix_audit process; exit code: %s", exitValue); throw new AnalysisException(msg); } } catch (InterruptedException ie) { Thread.currentThread().interrupt(); throw new AnalysisException("mix_audit process interrupted", ie); } catch (IOException | CpeValidationException ioe) { LOGGER.warn("mix_audit failure", ioe); throw new AnalysisException("mix_audit failure", ioe); } } }
4,213
682
// Copyright (c) 2013-2014 Sandstorm Development Group, Inc. and contributors // Licensed under the MIT License: // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. #pragma once #if defined(__GNUC__) && !KJ_HEADER_WARNINGS #pragma GCC system_header #endif #include "array.h" #include "string.h" #include "vector.h" #include "function.h" namespace kj { class ProcessContext { // Context for command-line programs. public: virtual StringPtr getProgramName() = 0; // Get argv[0] as passed to main(). KJ_NORETURN(virtual void exit()) = 0; // Indicates program completion. The program is considered successful unless `error()` was // called. Typically this exits with _Exit(), meaning that the stack is not unwound, buffers // are not flushed, etc. -- it is the responsibility of the caller to flush any buffers that // matter. However, an alternate context implementation e.g. for unit testing purposes could // choose to throw an exception instead. // // At first this approach may sound crazy. Isn't it much better to shut down cleanly? What if // you lose data? However, it turns out that if you look at each common class of program, _Exit() // is almost always preferable. Let's break it down: // // * Commands: A typical program you might run from the command line is single-threaded and // exits quickly and deterministically. Commands often use buffered I/O and need to flush // those buffers before exit. However, most of the work performed by destructors is not // flushing buffers, but rather freeing up memory, placing objects into freelists, and closing // file descriptors. All of this is irrelevant if the process is about to exit anyway, and // for a command that runs quickly, time wasted freeing heap space may make a real difference // in the overall runtime of a script. Meanwhile, it is usually easy to determine exactly what // resources need to be flushed before exit, and easy to tell if they are not being flushed // (because the command fails to produce the expected output). Therefore, it is reasonably // easy for commands to explicitly ensure all output is flushed before exiting, and it is // probably a good idea for them to do so anyway, because write failures should be detected // and handled. For commands, a good strategy is to allocate any objects that require clean // destruction on the stack, and allow them to go out of scope before the command exits. // Meanwhile, any resources which do not need to be cleaned up should be allocated as members // of the command's main class, whose destructor normally will not be called. // // * Interactive apps: Programs that interact with the user (whether they be graphical apps // with windows or console-based apps like emacs) generally exit only when the user asks them // to. Such applications may store large data structures in memory which need to be synced // to disk, such as documents or user preferences. However, relying on stack unwind or global // destructors as the mechanism for ensuring such syncing occurs is probably wrong. First of // all, it's 2013, and applications ought to be actively syncing changes to non-volatile // storage the moment those changes are made. Applications can crash at any time and a crash // should never lose data that is more than half a second old. Meanwhile, if a user actually // does try to close an application while unsaved changes exist, the application UI should // prompt the user to decide what to do. Such a UI mechanism is obviously too high level to // be implemented via destructors, so KJ's use of _Exit() shouldn't make a difference here. // // * Servers: A good server is fault-tolerant, prepared for the possibility that at any time // it could crash, the OS could decide to kill it off, or the machine it is running on could // just die. So, using _Exit() should be no problem. In fact, servers generally never even // call exit anyway; they are killed externally. // // * Batch jobs: A long-running batch job is something between a command and a server. It // probably knows exactly what needs to be flushed before exiting, and it probably should be // fault-tolerant. // // Meanwhile, regardless of program type, if you are adhering to KJ style, then the use of // _Exit() shouldn't be a problem anyway: // // * KJ style forbids global mutable state (singletons) in general and global constructors and // destructors in particular. Therefore, everything that could possibly need cleanup either // lives on the stack or is transitively owned by something living on the stack. // // * Calling exit() simply means "Don't clean up anything older than this stack frame.". If you // have resources that require cleanup before exit, make sure they are owned by stack frames // beyond the one that eventually calls exit(). To be as safe as possible, don't place any // state in your program's main class, and don't call exit() yourself. Then, runMainAndExit() // will do it, and the only thing on the stack at that time will be your main class, which // has no state anyway. // // TODO(someday): Perhaps we should use the new std::quick_exit(), so that at_quick_exit() is // available for those who really think they need it. Unfortunately, it is not yet available // on many platforms. virtual void warning(StringPtr message) = 0; // Print the given message to standard error. A newline is printed after the message if it // doesn't already have one. virtual void error(StringPtr message) = 0; // Like `warning()`, but also sets a flag indicating that the process has failed, and that when // it eventually exits it should indicate an error status. KJ_NORETURN(virtual void exitError(StringPtr message)) = 0; // Equivalent to `error(message)` followed by `exit()`. KJ_NORETURN(virtual void exitInfo(StringPtr message)) = 0; // Displays the given non-error message to the user and then calls `exit()`. This is used to // implement things like --help. virtual void increaseLoggingVerbosity() = 0; // Increase the level of detail produced by the debug logging system. `MainBuilder` invokes // this if the caller uses the -v flag. // TODO(someday): Add interfaces representing standard OS resources like the filesystem, so that // these things can be mocked out. }; class TopLevelProcessContext final: public ProcessContext { // A ProcessContext implementation appropriate for use at the actual entry point of a process // (as opposed to when you are trying to call a program's main function from within some other // program). This implementation writes errors to stderr, and its `exit()` method actually // calls the C `quick_exit()` function. public: explicit TopLevelProcessContext(StringPtr programName); struct CleanShutdownException { int exitCode; }; // If the environment variable KJ_CLEAN_SHUTDOWN is set, then exit() will actually throw this // exception rather than exiting. `kj::runMain()` catches this exception and returns normally. // This is useful primarily for testing purposes, to assist tools like memory leak checkers that // are easily confused by quick_exit(). StringPtr getProgramName() override; KJ_NORETURN(void exit() override); void warning(StringPtr message) override; void error(StringPtr message) override; KJ_NORETURN(void exitError(StringPtr message) override); KJ_NORETURN(void exitInfo(StringPtr message) override); void increaseLoggingVerbosity() override; private: StringPtr programName; bool cleanShutdown; bool hadErrors = false; }; typedef Function<void(StringPtr programName, ArrayPtr<const StringPtr> params)> MainFunc; int runMainAndExit(ProcessContext& context, MainFunc&& func, int argc, char* argv[]); // Runs the given main function and then exits using the given context. If an exception is thrown, // this will catch it, report it via the context and exit with an error code. // // Normally this function does not return, because returning would probably lead to wasting time // on cleanup when the process is just going to exit anyway. However, to facilitate memory leak // checkers and other tools that require a clean shutdown to do their job, if the environment // variable KJ_CLEAN_SHUTDOWN is set, the function will in fact return an exit code, which should // then be returned from main(). // // Most users will use the KJ_MAIN() macro rather than call this function directly. #define KJ_MAIN(MainClass) \ int main(int argc, char* argv[]) { \ ::kj::TopLevelProcessContext context(argv[0]); \ MainClass mainObject(context); \ return ::kj::runMainAndExit(context, mainObject.getMain(), argc, argv); \ } // Convenience macro for declaring a main function based on the given class. The class must have // a constructor that accepts a ProcessContext& and a method getMain() which returns // kj::MainFunc (probably building it using a MainBuilder). class MainBuilder { // Builds a main() function with nice argument parsing. As options and arguments are parsed, // corresponding callbacks are called, so that you never have to write a massive switch() // statement to interpret arguments. Additionally, this approach encourages you to write // main classes that have a reasonable API that can be used as an alternative to their // command-line interface. // // All StringPtrs passed to MainBuilder must remain valid until option parsing completes. The // assumption is that these strings will all be literals, making this an easy requirement. If // not, consider allocating them in an Arena. // // Some flags are automatically recognized by the main functions built by this class: // --help: Prints help text and exits. The help text is constructed based on the // information you provide to the builder as you define each flag. // --verbose: Increase logging verbosity. // --version: Print version information and exit. // // Example usage: // // class FooMain { // public: // FooMain(kj::ProcessContext& context): context(context) {} // // bool setAll() { all = true; return true; } // // Enable the --all flag. // // kj::MainBuilder::Validity setOutput(kj::StringPtr name) { // // Set the output file. // // if (name.endsWith(".foo")) { // outputFile = name; // return true; // } else { // return "Output file must have extension .foo."; // } // } // // kj::MainBuilder::Validity processInput(kj::StringPtr name) { // // Process an input file. // // if (!exists(name)) { // return kj::str(name, ": file not found"); // } // // ... process the input file ... // return true; // } // // kj::MainFunc getMain() { // return MainBuilder(context, "Foo Builder v1.5", "Reads <source>s and builds a Foo.") // .addOption({'a', "all"}, KJ_BIND_METHOD(*this, setAll), // "Frob all the widgets. Otherwise, only some widgets are frobbed.") // .addOptionWithArg({'o', "output"}, KJ_BIND_METHOD(*this, setOutput), // "<filename>", "Output to <filename>. Must be a .foo file.") // .expectOneOrMoreArgs("<source>", KJ_BIND_METHOD(*this, processInput)) // .build(); // } // // private: // bool all = false; // kj::StringPtr outputFile; // kj::ProcessContext& context; // }; public: MainBuilder(ProcessContext& context, StringPtr version, StringPtr briefDescription, StringPtr extendedDescription = nullptr); ~MainBuilder() noexcept(false); class OptionName { public: OptionName() = default; inline OptionName(char shortName): isLong(false), shortName(shortName) {} inline OptionName(const char* longName): isLong(true), longName(longName) {} private: bool isLong; union { char shortName; const char* longName; }; friend class MainBuilder; }; class Validity { public: inline Validity(bool valid) { if (!valid) errorMessage = heapString("invalid argument"); } inline Validity(const char* errorMessage) : errorMessage(heapString(errorMessage)) {} inline Validity(String&& errorMessage) : errorMessage(kj::mv(errorMessage)) {} inline const Maybe<String>& getError() const { return errorMessage; } inline Maybe<String> releaseError() { return kj::mv(errorMessage); } private: Maybe<String> errorMessage; friend class MainBuilder; }; MainBuilder& addOption(std::initializer_list<OptionName> names, Function<Validity()> callback, StringPtr helpText); // Defines a new option (flag). `names` is a list of characters and strings that can be used to // specify the option on the command line. Single-character names are used with "-" while string // names are used with "--". `helpText` is a natural-language description of the flag. // // `callback` is called when the option is seen. Its return value indicates whether the option // was accepted. If not, further option processing stops, and error is written, and the process // exits. // // Example: // // builder.addOption({'a', "all"}, KJ_BIND_METHOD(*this, showAll), "Show all files."); // // This option could be specified in the following ways: // // -a // --all // // Note that single-character option names can be combined into a single argument. For example, // `-abcd` is equivalent to `-a -b -c -d`. // // The help text for this option would look like: // // -a, --all // Show all files. // // Note that help text is automatically word-wrapped. MainBuilder& addOptionWithArg(std::initializer_list<OptionName> names, Function<Validity(StringPtr)> callback, StringPtr argumentTitle, StringPtr helpText); // Like `addOption()`, but adds an option which accepts an argument. `argumentTitle` is used in // the help text. The argument text is passed to the callback. // // Example: // // builder.addOptionWithArg({'o', "output"}, KJ_BIND_METHOD(*this, setOutput), // "<filename>", "Output to <filename>."); // // This option could be specified with an argument of "foo" in the following ways: // // -ofoo // -o foo // --output=foo // --output foo // // Note that single-character option names can be combined, but only the last option can have an // argument, since the characters after the option letter are interpreted as the argument. E.g. // `-abofoo` would be equivalent to `-a -b -o foo`. // // The help text for this option would look like: // // -o FILENAME, --output=FILENAME // Output to FILENAME. MainBuilder& addSubCommand(StringPtr name, Function<MainFunc()> getSubParser, StringPtr briefHelpText); // If exactly the given name is seen as an argument, invoke getSubParser() and then pass all // remaining arguments to the parser it returns. This is useful for implementing commands which // have lots of sub-commands, like "git" (which has sub-commands "checkout", "branch", "pull", // etc.). // // `getSubParser` is only called if the command is seen. This avoids building main functions // for commands that aren't used. // // `briefHelpText` should be brief enough to show immediately after the command name on a single // line. It will not be wrapped. Users can use the built-in "help" command to get extended // help on a particular command. MainBuilder& expectArg(StringPtr title, Function<Validity(StringPtr)> callback); MainBuilder& expectOptionalArg(StringPtr title, Function<Validity(StringPtr)> callback); MainBuilder& expectZeroOrMoreArgs(StringPtr title, Function<Validity(StringPtr)> callback); MainBuilder& expectOneOrMoreArgs(StringPtr title, Function<Validity(StringPtr)> callback); // Set callbacks to handle arguments. `expectArg()` and `expectOptionalArg()` specify positional // arguments with special handling, while `expect{Zero,One}OrMoreArgs()` specifies a handler for // an argument list (the handler is called once for each argument in the list). `title` // specifies how the argument should be represented in the usage text. // // All options callbacks are called before argument callbacks, regardless of their ordering on // the command line. This matches GNU getopt's behavior of permuting non-flag arguments to the // end of the argument list. Also matching getopt, the special option "--" indicates that the // rest of the command line is all arguments, not options, even if they start with '-'. // // The interpretation of positional arguments is fairly flexible. The non-optional arguments can // be expected at the beginning, end, or in the middle. If more arguments are specified than // the number of non-optional args, they are assigned to the optional argument handlers in the // order of registration. // // For example, say you called: // builder.expectArg("<foo>", ...); // builder.expectOptionalArg("<bar>", ...); // builder.expectArg("<baz>", ...); // builder.expectZeroOrMoreArgs("<qux>", ...); // builder.expectArg("<corge>", ...); // // This command requires at least three arguments: foo, baz, and corge. If four arguments are // given, the second is assigned to bar. If five or more arguments are specified, then the // arguments between the third and last are assigned to qux. Note that it never makes sense // to call `expect*OrMoreArgs()` more than once since only the first call would ever be used. // // In practice, you probably shouldn't create such complicated commands as in the above example. // But, this flexibility seems necessary to support commands where the first argument is special // as well as commands (like `cp`) where the last argument is special. MainBuilder& callAfterParsing(Function<Validity()> callback); // Call the given function after all arguments have been parsed. MainFunc build(); // Build the "main" function, which simply parses the arguments. Once this returns, the // `MainBuilder` is no longer valid. private: struct Impl; Own<Impl> impl; class MainImpl; }; } // namespace kj
5,826
2,996
// Copyright 2021 The Terasology Foundation // SPDX-License-Identifier: Apache-2.0 package org.terasology.engine.logic.common; import com.google.common.collect.Sets; import org.terasology.engine.network.Replicate; import org.terasology.gestalt.entitysystem.component.Component; import java.util.Set; /** * This component is intended to list component classes that are supposed to be retained when converting between blocks * and block items. * <p> * If a block (item) entity has a component that is not part of its prefab, retaining this component results in the block * entity still having this component afterwards. If not retained, it is likely to be removed instead. */ public class RetainComponentsComponent implements Component<RetainComponentsComponent> { @Replicate public Set<Class<? extends Component>> components = Sets.newHashSet(); @Override public void copyFrom(RetainComponentsComponent other) { this.components = Sets.newHashSet(other.components); // TODO Investigate, needs to deep-copy or not! } }
293
831
/* * Copyright (C) 2018 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.android.tools.idea.gradle.project.sync.idea.data; import static com.android.tools.idea.Projects.getBaseDirPath; import static com.android.tools.idea.gradle.project.importing.ProjectFolder.deleteLibrariesFolder; import com.android.tools.idea.IdeInfo; import com.android.tools.idea.gradle.project.GradleProjectInfo; import com.intellij.ide.caches.CachesInvalidator; import com.intellij.openapi.project.Project; import com.intellij.openapi.project.ProjectManager; public class IdeaSyncCachesInvalidator extends CachesInvalidator { @Override public void invalidateCaches() { Project[] openProjects = ProjectManager.getInstance().getOpenProjects(); for (Project project : openProjects) { if (GradleProjectInfo.getInstance(project).isBuildWithGradle()) { DataNodeCaches.getInstance(project).clearCaches(); if (IdeInfo.getInstance().isAndroidStudio()) { // Remove contents in .idea/libraries to recover from any invalid library entries. deleteLibrariesFolder(getBaseDirPath(project)); } } } } }
525
310
{ "name": "Skyline S5", "description": "A sewing machine.", "url": "https://www.janome.com/machines/sewing/skyline--s5/" }
53
1,224
#include <limits.h> #include <stdlib.h> #include <math.h> #include <string.h> #include "luksan.h" #define MAX2(a,b) ((a) > (b) ? (a) : (b)) #define MIN2(a,b) ((a) < (b) ? (a) : (b)) /* Table of constant values */ static double c_b7 = 0.; /* *********************************************************************** */ /* SUBROUTINE PNET ALL SYSTEMS 01/09/22 */ /* PURPOSE : */ /* GENERAL SUBROUTINE FOR LARGE-SCALE BOX CONSTRAINED MINIMIZATION THAT */ /* USE THE LIMITED MEMORY VARIABLE METRIC METHOD BASED ON THE STRANG */ /* RECURRENCES. */ /* PARAMETERS : */ /* II NF NUMBER OF VARIABLES. */ /* II NB CHOICE OF SIMPLE BOUNDS. NB=0-SIMPLE BOUNDS SUPPRESSED. */ /* NB>0-SIMPLE BOUNDS ACCEPTED. */ /* RI X(NF) VECTOR OF VARIABLES. */ /* II IX(NF) VECTOR CONTAINING TYPES OF BOUNDS. IX(I)=0-VARIABLE */ /* X(I) IS UNBOUNDED. IX(I)=1-LOVER BOUND XL(I).LE.X(I). */ /* IX(I)=2-UPPER BOUND X(I).LE.XU(I). IX(I)=3-TWO SIDE BOUND */ /* XL(I).LE.X(I).LE.XU(I). IX(I)=5-VARIABLE X(I) IS FIXED. */ /* RI XL(NF) VECTOR CONTAINING LOWER BOUNDS FOR VARIABLES. */ /* RI XU(NF) VECTOR CONTAINING UPPER BOUNDS FOR VARIABLES. */ /* RO GF(NF) GRADIENT OF THE OBJECTIVE FUNCTION. */ /* RA GN(NF) OLD GRADIENT OF THE OBJECTIVE FUNCTION. */ /* RO S(NF) DIRECTION VECTOR. */ /* RA XO(NF) ARRAY CONTAINING INCREMENTS OF VARIABLES. */ /* RA GO(NF) ARRAY CONTAINING INCREMENTS OF GRADIENTS. */ /* RA XS(NF) AUXILIARY VECTOR. */ /* RA GS(NF) AUXILIARY VECTOR. */ /* RA XM(NF*MF) ARRAY CONTAINING INCREMENTS OF VARIABLES. */ /* RA GM(NF*MF) ARRAY CONTAINING INCREMENTS OF GRADIENTS. */ /* RA U1(MF) AUXILIARY VECTOR. */ /* RA U2(MF) AUXILIARY VECTOR. */ /* RI XMAX MAXIMUM STEPSIZE. */ /* RI TOLX TOLERANCE FOR CHANGE OF VARIABLES. */ /* RI TOLF TOLERANCE FOR CHANGE OF FUNCTION VALUES. */ /* RI TOLB TOLERANCE FOR THE FUNCTION VALUE. */ /* RI TOLG TOLERANCE FOR THE GRADIENT NORM. */ /* RI MINF_EST ESTIMATION OF THE MINIMUM FUNCTION VALUE. */ /* RO GMAX MAXIMUM PARTIAL DERIVATIVE. */ /* RO F VALUE OF THE OBJECTIVE FUNCTION. */ /* II MIT MAXIMUM NUMBER OF ITERATIONS. */ /* II MFV MAXIMUM NUMBER OF FUNCTION EVALUATIONS. */ /* II MFG MAXIMUM NUMBER OF GRADIENT EVALUATIONS. */ /* II IEST ESTIMATION INDICATOR. IEST=0-MINIMUM IS NOT ESTIMATED. */ /* IEST=1-MINIMUM IS ESTIMATED BY THE VALUE MINF_EST. */ /* II MOS1 CHOICE OF RESTARTS AFTER A CONSTRAINT CHANGE. */ /* MOS1=1-RESTARTS ARE SUPPRESSED. MOS1=2-RESTARTS WITH */ /* STEEPEST DESCENT DIRECTIONS ARE USED. */ /* II MOS1 CHOICE OF DIRECTION VECTORS AFTER RESTARTS. MOS1=1-THE */ /* NEWTON DIRECTIONS ARE USED. MOS1=2-THE STEEPEST DESCENT */ /* DIRECTIONS ARE USED. */ /* II MOS2 CHOICE OF PRECONDITIONING STRATEGY. MOS2=1-PRECONDITIONING */ /* IS NOT USED. MOS2=2-PRECONDITIONING BY THE LIMITED MEMORY */ /* BFGS METHOD IS USED. */ /* II MF THE NUMBER OF LIMITED-MEMORY VARIABLE METRIC UPDATES */ /* IN EACH ITERATION (THEY USE 2*MF STORED VECTORS). */ /* IO ITERM VARIABLE THAT INDICATES THE CAUSE OF TERMINATION. */ /* ITERM=1-IF ABS(X-XO) WAS LESS THAN OR EQUAL TO TOLX IN */ /* MTESX (USUALLY TWO) SUBSEQUEBT ITERATIONS. */ /* ITERM=2-IF ABS(F-FO) WAS LESS THAN OR EQUAL TO TOLF IN */ /* MTESF (USUALLY TWO) SUBSEQUEBT ITERATIONS. */ /* ITERM=3-IF F IS LESS THAN OR EQUAL TO TOLB. */ /* ITERM=4-IF GMAX IS LESS THAN OR EQUAL TO TOLG. */ /* ITERM=6-IF THE TERMINATION CRITERION WAS NOT SATISFIED, */ /* BUT THE SOLUTION OBTAINED IS PROBABLY ACCEPTABLE. */ /* ITERM=11-IF NIT EXCEEDED MIT. ITERM=12-IF NFV EXCEEDED MFV. */ /* ITERM=13-IF NFG EXCEEDED MFG. ITERM<0-IF THE METHOD FAILED. */ /* VARIABLES IN COMMON /STAT/ (STATISTICS) : */ /* IO NRES NUMBER OF RESTARTS. */ /* IO NDEC NUMBER OF MATRIX DECOMPOSITION. */ /* IO NIN NUMBER OF INNER ITERATIONS. */ /* IO NIT NUMBER OF ITERATIONS. */ /* IO NFV NUMBER OF FUNCTION EVALUATIONS. */ /* IO NFG NUMBER OF GRADIENT EVALUATIONS. */ /* IO NFH NUMBER OF HESSIAN EVALUATIONS. */ /* SUBPROGRAMS USED : */ /* S PCBS04 ELIMINATION OF BOX CONSTRAINT VIOLATIONS. */ /* S PS1L01 STEPSIZE SELECTION USING LINE SEARCH. */ /* S PYADC0 ADDITION OF A BOX CONSTRAINT. */ /* S PYFUT1 TEST ON TERMINATION. */ /* S PYRMC0 DELETION OF A BOX CONSTRAINT. */ /* S PYTRCD COMPUTATION OF PROJECTED DIFFERENCES FOR THE VARIABLE METRIC */ /* UPDATE. */ /* S PYTRCG COMPUTATION OF THE PROJECTED GRADIENT. */ /* S PYTRCS COMPUTATION OF THE PROJECTED DIRECTION VECTOR. */ /* S MXDRCB BACKWARD PART OF THE STRANG FORMULA FOR PREMULTIPLICATION */ /* OF THE VECTOR X BY AN IMPLICIT BFGS UPDATE. */ /* S MXDRCF FORWARD PART OF THE STRANG FORMULA FOR PREMULTIPLICATION */ /* OF THE VECTOR X BY AN IMPLICIT BFGS UPDATE. */ /* S MXDRSU SHIFT OF COLUMNS OF THE RECTANGULAR MATRICES A AND B. */ /* SHIFT OF ELEMENTS OF THE VECTOR U. THESE SHIFTS ARE USED IN */ /* THE LIMITED MEMORY BFGS METHOD. */ /* S MXUDIR VECTOR AUGMENTED BY THE SCALED VECTOR. */ /* RF MXUDOT DOT PRODUCT OF TWO VECTORS. */ /* S MXVNEG COPYING OF A VECTOR WITH CHANGE OF THE SIGN. */ /* S MXVCOP COPYING OF A VECTOR. */ /* S MXVSCL SCALING OF A VECTOR. */ /* S MXVSET INITIATINON OF A VECTOR. */ /* S MXVDIF DIFFERENCE OF TWO VECTORS. */ /* EXTERNAL SUBROUTINES : */ /* SE OBJ COMPUTATION OF THE VALUE OF THE OBJECTIVE FUNCTION. */ /* CALLING SEQUENCE: CALL OBJ(NF,X,FF) WHERE NF IS THE NUMBER */ /* OF VARIABLES, X(NF) IS THE VECTOR OF VARIABLES AND FF IS */ /* THE VALUE OF THE OBJECTIVE FUNCTION. */ /* SE DOBJ COMPUTATION OF THE GRADIENT OF THE OBJECTIVE FUNCTION. */ /* CALLING SEQUENCE: CALL DOBJ(NF,X,GF) WHERE NF IS THE NUMBER */ /* OF VARIABLES, X(NF) IS THE VECTOR OF VARIABLES AND GF(NF) */ /* IS THE GRADIENT OF THE OBJECTIVE FUNCTION. */ /* -- OBJ and DOBJ are replaced by a single function, objgrad, in NLopt */ /* METHOD : */ /* LIMITED MEMORY VARIABLE METRIC METHOD BASED ON THE STRANG */ /* RECURRENCES. */ static void pnet_(int *nf, int *nb, double *x, int * ix, double *xl, double *xu, double *gf, double *gn, double *s, double *xo, double *go, double *xs, double *gs, double *xm, double *gm, double *u1, double *u2, double *xmax, double *tolx, double *tolf, double *tolb, double *tolg, nlopt_stopping *stop, double *minf_est, double * gmax, double *f, int *mit, int *mfv, int *mfg, int *iest, int *mos1, int *mos2, int *mf, int *iterm, stat_common *stat_1, nlopt_func objgrad, void *objgrad_data) { /* System generated locals */ int i__1; double d__1, d__2; /* Builtin functions */ /* Local variables */ double a = 0.0, b = 0.0; int i__, n; double p, r__; int kd, ld; double fo, fp, po, pp, ro, rp; int mx, kbf; double alf; double par; int mes, kit; double rho, eps; int mmx; double alf1, alf2, eta0, eta9, par1, par2; double rho1, rho2, eps8, eps9; int mred, iold, nred; double maxf, dmax__; int xstop = 0; int inew; double told; int ites; double rmin, rmax, umax, tolp, tols; int isys; int ires1, ires2; int iterd, mtesf, ntesf; double gnorm; int iters, irest, inits, kters, maxst; double snorm; int mtesx, ntesx; ps1l01_state state; (void) tolb; /* INITIATION */ /* Parameter adjustments */ --u2; --u1; --gm; --xm; --gs; --xs; --go; --xo; --s; --gn; --gf; --xu; --xl; --ix; --x; /* Function Body */ kbf = 0; if (*nb > 0) { kbf = 2; } stat_1->nres = 0; stat_1->ndec = 0; stat_1->nin = 0; stat_1->nit = 0; stat_1->nfg = 0; stat_1->nfh = 0; isys = 0; ites = 1; mtesx = 2; mtesf = 2; inits = 2; *iterm = 0; iterd = 0; iters = 2; kters = 3; irest = 0; ires1 = 999; ires2 = 0; mred = 10; mes = 4; eps = .8; eta0 = 1e-15; eta9 = 1e120; eps8 = 1.; eps9 = 1e-8; alf1 = 1e-10; alf2 = 1e10; rmax = eta9; dmax__ = eta9; maxf = 1e20; if (*iest <= 0) { *minf_est = -HUGE_VAL; /* changed from -1e60 by SGJ */ } if (*iest > 0) { *iest = 1; } if (*xmax <= 0.) { *xmax = 1e16; } if (*tolx <= 0.) { *tolx = 1e-16; } if (*tolf <= 0.) { *tolf = 1e-14; } if (*tolg <= 0.) { *tolg = 1e-8; /* SGJ: was 1e-6, but this sometimes stops too soon */ } #if 0 /* removed by SGJ: this check prevented us from using minf_max <= 0, which doesn't make sense. Instead, if you don't want to have a lower limit, you should set minf_max = -HUGE_VAL */ if (*tolb <= 0.) { *tolb = *minf_est + 1e-16; } #endif told = 1e-4; tols = 1e-4; tolp = .9; /* changed by SGJ: default is no limit (INT_MAX) on # iterations/fevals */ if (*mit <= 0) { *mit = INT_MAX; } if (*mfv <= 0) { *mfv = INT_MAX; } if (*mfg <= 0) { *mfg = INT_MAX; } if (*mos1 <= 0) { *mos1 = 1; } if (*mos2 <= 0) { *mos2 = 1; } kd = 1; ld = -1; kit = -(ires1 * *nf + ires2); fo = *minf_est; /* INITIAL OPERATIONS WITH SIMPLE BOUNDS */ if (kbf > 0) { i__1 = *nf; for (i__ = 1; i__ <= i__1; ++i__) { if ((ix[i__] == 3 || ix[i__] == 4) && xu[i__] <= xl[i__]) { xu[i__] = xl[i__]; ix[i__] = 5; } else if (ix[i__] == 5 || ix[i__] == 6) { xl[i__] = x[i__]; xu[i__] = x[i__]; ix[i__] = 5; } /* L2: */ } luksan_pcbs04__(nf, &x[1], &ix[1], &xl[1], &xu[1], &eps9, &kbf); luksan_pyadc0__(nf, &n, &x[1], &ix[1], &xl[1], &xu[1], &inew); } *f = objgrad(*nf, &x[1], &gf[1], objgrad_data); ++(*stop->nevals_p); ++stat_1->nfg; if (nlopt_stop_time(stop)) { *iterm = 100; goto L11080; } ld = kd; L11020: luksan_pytrcg__(nf, nf, &ix[1], &gf[1], &umax, gmax, &kbf, &iold); luksan_mxvcop__(nf, &gf[1], &gn[1]); luksan_pyfut1__(nf, f, &fo, &umax, gmax, xstop, stop, tolg, &kd, &stat_1->nit, &kit, mit, &stat_1->nfg, mfg, & ntesx, &mtesx, &ntesf, &mtesf, &ites, &ires1, &ires2, &irest, & iters, iterm); if (*iterm != 0) { goto L11080; } if (nlopt_stop_time(stop)) { *iterm = 100; goto L11080; } if (kbf > 0) { luksan_pyrmc0__(nf, &n, &ix[1], &gn[1], &eps8, &umax, gmax, &rmax, & iold, &irest); if (umax > eps8 * *gmax) { irest = MAX2(irest,1); } } luksan_mxvcop__(nf, &x[1], &xo[1]); L11040: /* DIRECTION DETERMINATION */ if (irest != 0) { if (kit < stat_1->nit) { mx = 0; ++stat_1->nres; kit = stat_1->nit; } else { *iterm = -10; if (iters < 0) { *iterm = iters - 5; } goto L11080; } if (*mos1 > 1) { luksan_mxvneg__(nf, &gn[1], &s[1]); gnorm = sqrt(luksan_mxudot__(nf, &gn[1], &gn[1], &ix[1], &kbf)); snorm = gnorm; goto L12560; } } rho1 = luksan_mxudot__(nf, &gn[1], &gn[1], &ix[1], &kbf); gnorm = sqrt(rho1); /* Computing MIN */ d__1 = eps, d__2 = sqrt(gnorm); par = MIN2(d__1,d__2); if (par > .01) { /* Computing MIN */ d__1 = par, d__2 = 1. / (double) stat_1->nit; par = MIN2(d__1,d__2); } par *= par; /* CG INITIATION */ rho = rho1; snorm = 0.; luksan_mxvset__(nf, &c_b7, &s[1]); luksan_mxvneg__(nf, &gn[1], &gs[1]); luksan_mxvcop__(nf, &gs[1], &xs[1]); if (*mos2 > 1) { if (mx == 0) { b = 0.; } else { b = luksan_mxudot__(nf, &xm[1], &gm[1], &ix[1], &kbf); } if (b > 0.) { u1[1] = 1. / b; luksan_mxdrcb__(nf, &mx, &xm[1], &gm[1], &u1[1], &u2[1], &xs[1], & ix[1], &kbf); a = luksan_mxudot__(nf, &gm[1], &gm[1], &ix[1], &kbf); if (a > 0.) { d__1 = b / a; luksan_mxvscl__(nf, &d__1, &xs[1], &xs[1]); } luksan_mxdrcf__(nf, &mx, &xm[1], &gm[1], &u1[1], &u2[1], &xs[1], & ix[1], &kbf); } } rho = luksan_mxudot__(nf, &gs[1], &xs[1], &ix[1], &kbf); /* SIG=RHO */ mmx = *nf + 3; nred = 0; L12520: ++nred; if (nred > mmx) { goto L12550; } fo = *f; pp = sqrt(eta0 / luksan_mxudot__(nf, &xs[1], &xs[1], &ix[1], &kbf)); ld = 0; luksan_mxudir__(nf, &pp, &xs[1], &xo[1], &x[1], &ix[1], &kbf); objgrad(*nf, &x[1], &gf[1], objgrad_data); ++*(stop->nevals_p); ++stat_1->nfg; ld = kd; luksan_mxvdif__(nf, &gf[1], &gn[1], &go[1]); *f = fo; d__1 = 1. / pp; luksan_mxvscl__(nf, &d__1, &go[1], &go[1]); alf = luksan_mxudot__(nf, &xs[1], &go[1], &ix[1], &kbf); if (alf <= 1. / eta9) { /* IF (ALF.LE.1.0D-8*SIG) THEN */ /* CG FAILS (THE MATRIX IS NOT POSITIVE DEFINITE) */ if (nred == 1) { luksan_mxvneg__(nf, &gn[1], &s[1]); snorm = gnorm; } iterd = 0; goto L12560; } else { iterd = 2; } /* CG STEP */ alf = rho / alf; luksan_mxudir__(nf, &alf, &xs[1], &s[1], &s[1], &ix[1], &kbf); d__1 = -alf; luksan_mxudir__(nf, &d__1, &go[1], &gs[1], &gs[1], &ix[1], &kbf); rho2 = luksan_mxudot__(nf, &gs[1], &gs[1], &ix[1], &kbf); snorm = sqrt(luksan_mxudot__(nf, &s[1], &s[1], &ix[1], &kbf)); if (rho2 <= par * rho1) { goto L12560; } if (nred >= mmx) { goto L12550; } if (*mos2 > 1) { if (b > 0.) { luksan_mxvcop__(nf, &gs[1], &go[1]); luksan_mxdrcb__(nf, &mx, &xm[1], &gm[1], &u1[1], &u2[1], &go[1], & ix[1], &kbf); if (a > 0.) { d__1 = b / a; luksan_mxvscl__(nf, &d__1, &go[1], &go[1]); } luksan_mxdrcf__(nf, &mx, &xm[1], &gm[1], &u1[1], &u2[1], &go[1], & ix[1], &kbf); rho2 = luksan_mxudot__(nf, &gs[1], &go[1], &ix[1], &kbf); alf = rho2 / rho; luksan_mxudir__(nf, &alf, &xs[1], &go[1], &xs[1], &ix[1], &kbf); } else { alf = rho2 / rho; luksan_mxudir__(nf, &alf, &xs[1], &gs[1], &xs[1], &ix[1], &kbf); } } else { alf = rho2 / rho; luksan_mxudir__(nf, &alf, &xs[1], &gs[1], &xs[1], &ix[1], &kbf); } rho = rho2; /* SIG=RHO2+ALF*ALF*SIG */ goto L12520; L12550: /* AN INEXACT SOLUTION IS OBTAINED */ L12560: /* ------------------------------ */ /* END OF DIRECTION DETERMINATION */ /* ------------------------------ */ luksan_mxvcop__(nf, &xo[1], &x[1]); luksan_mxvcop__(nf, &gn[1], &gf[1]); if (kd > 0) { p = luksan_mxudot__(nf, &gn[1], &s[1], &ix[1], &kbf); } if (iterd < 0) { *iterm = iterd; } else { /* TEST ON DESCENT DIRECTION */ if (snorm <= 0.) { irest = MAX2(irest,1); } else if (p + told * gnorm * snorm <= 0.) { irest = 0; } else { /* UNIFORM DESCENT CRITERION */ irest = MAX2(irest,1); } if (irest == 0) { /* PREPARATION OF LINE SEARCH */ nred = 0; rmin = alf1 * gnorm / snorm; /* Computing MIN */ d__1 = alf2 * gnorm / snorm, d__2 = *xmax / snorm; rmax = MIN2(d__1,d__2); } } ld = kd; if (*iterm != 0) { goto L11080; } if (nlopt_stop_time(stop)) { *iterm = 100; goto L11080; } if (irest != 0) { goto L11040; } luksan_pytrcs__(nf, &x[1], &ix[1], &xo[1], &xl[1], &xu[1], &gf[1], &go[1], &s[1], &ro, &fp, &fo, f, &po, &p, &rmax, &eta9, &kbf); if (rmax == 0.) { goto L11075; } L11060: luksan_ps1l01__(&r__, &rp, f, &fo, &fp, &p, &po, &pp, minf_est, &maxf, &rmin, &rmax, &tols, &tolp, &par1, &par2, &kd, &ld, &stat_1->nit, &kit, & nred, &mred, &maxst, iest, &inits, &iters, &kters, &mes, &isys, &state); if (isys == 0) { goto L11064; } luksan_mxudir__(nf, &r__, &s[1], &xo[1], &x[1], &ix[1], &kbf); luksan_pcbs04__(nf, &x[1], &ix[1], &xl[1], &xu[1], &eps9, &kbf); *f = objgrad(*nf, &x[1], &gf[1], objgrad_data); ++*(stop->nevals_p); ++stat_1->nfg; ld = kd; p = luksan_mxudot__(nf, &gf[1], &s[1], &ix[1], &kbf); goto L11060; L11064: if (iters <= 0) { r__ = 0.; *f = fo; p = po; luksan_mxvcop__(nf, &xo[1], &x[1]); luksan_mxvcop__(nf, &go[1], &gf[1]); irest = MAX2(irest,1); ld = kd; goto L11040; } luksan_pytrcd__(nf, &x[1], &ix[1], &xo[1], &gf[1], &go[1], &r__, f, &fo, & p, &po, &dmax__, &kbf, &kd, &ld, &iters); xstop = nlopt_stop_dx(stop, &x[1], &xo[1]); if (*mos2 > 1) { /* Computing MIN */ i__1 = mx + 1; mx = MIN2(i__1,*mf); luksan_mxdrsu__(nf, &mx, &xm[1], &gm[1], &u1[1]); luksan_mxvcop__(nf, &xo[1], &xm[1]); luksan_mxvcop__(nf, &go[1], &gm[1]); } L11075: if (kbf > 0) { luksan_pyadc0__(nf, &n, &x[1], &ix[1], &xl[1], &xu[1], &inew); if (inew > 0) { irest = MAX2(irest,1); } } goto L11020; L11080: return; } /* pnet_ */ /* NLopt wrapper around pnet_, handling dynamic allocation etc. */ nlopt_result luksan_pnet(int n, nlopt_func f, void *f_data, const double *lb, const double *ub, /* bounds */ double *x, /* in: initial guess, out: minimizer */ double *minf, nlopt_stopping *stop, int mf, /* subspace dimension (0 for default) */ int mos1, int mos2) /* 1 or 2 */ { int i, *ix, nb = 1; double *work; double *xl, *xu, *gf, *gn, *s, *xo, *go, *xs, *gs, *xm, *gm, *u1, *u2; double gmax, minf_est; double xmax = 0; /* no maximum */ double tolg = 0; /* default gradient tolerance */ int iest = 0; /* we have no estimate of min function value */ int mit = 0, mfg = 0; /* default no limit on #iterations */ int mfv = stop->maxeval; stat_common stat; int iterm; ix = (int*) malloc(sizeof(int) * n); if (!ix) return NLOPT_OUT_OF_MEMORY; if (mf <= 0) { mf = MAX2(MEMAVAIL/n, 10); if (stop->maxeval && stop->maxeval <= mf) mf = MAX2(stop->maxeval, 1); } retry_alloc: work = (double*) malloc(sizeof(double) * (n * 9 + MAX2(n,n*mf)*2 + MAX2(n,mf)*2)); if (!work) { if (mf > 0) { mf = 0; /* allocate minimal memory */ goto retry_alloc; } free(ix); return NLOPT_OUT_OF_MEMORY; } xl = work; xu = xl + n; gf = xu + n; gn = gf + n; s = gn + n; xo = s + n; go = xo + n; xs = go + n; gs = xs + n; xm = gs + n; gm = xm + MAX2(n*mf,n); u1 = gm + MAX2(n*mf,n); u2 = u1 + MAX2(n,mf); for (i = 0; i < n; ++i) { int lbu = lb[i] <= -0.99 * HUGE_VAL; /* lb unbounded */ int ubu = ub[i] >= 0.99 * HUGE_VAL; /* ub unbounded */ ix[i] = lbu ? (ubu ? 0 : 2) : (ubu ? 1 : (lb[i] == ub[i] ? 5 : 3)); xl[i] = lb[i]; xu[i] = ub[i]; } /* ? xo does not seem to be initialized in the original Fortran code, but it is used upon input to pnet if mf > 0 ... perhaps ALLOCATE initializes arrays to zero by default? */ memset(xo, 0, sizeof(double) * MAX2(n,n*mf)); pnet_(&n, &nb, x, ix, xl, xu, gf, gn, s, xo, go, xs, gs, xm, gm, u1, u2, &xmax, /* fixme: pass tol_rel and tol_abs and use NLopt check */ &stop->xtol_rel, &stop->ftol_rel, &stop->minf_max, &tolg, stop, &minf_est, &gmax, minf, &mit, &mfv, &mfg, &iest, &mos1, &mos2, &mf, &iterm, &stat, f, f_data); free(work); free(ix); switch (iterm) { case 1: return NLOPT_XTOL_REACHED; case 2: return NLOPT_FTOL_REACHED; case 3: return NLOPT_MINF_MAX_REACHED; case 4: return NLOPT_SUCCESS; /* gradient tolerance reached */ case 6: return NLOPT_SUCCESS; case 12: case 13: return NLOPT_MAXEVAL_REACHED; case 100: return NLOPT_MAXTIME_REACHED; case -999: return NLOPT_FORCED_STOP; default: return NLOPT_FAILURE; } }
10,431
521
<reponame>kensho-technologies/graphql-compiler # Copyright 2019-present Kensho Technologies, LLC. from copy import copy from dataclasses import dataclass from typing import FrozenSet, List, Optional, Tuple, cast from graphql import print_ast from graphql.language.ast import ( ArgumentNode, DirectiveNode, DocumentNode, FieldNode, InlineFragmentNode, ListValueNode, NameNode, OperationDefinitionNode, SelectionSetNode, StringValueNode, ) from graphql.pyutils import FrozenList from ..ast_manipulation import get_only_query_definition from ..exceptions import GraphQLValidationError from ..schema import FilterDirective, OutputDirective from ..schema_transformation.split_query import AstType, SubQueryNode @dataclass class SubQueryPlan: """Query plan for a part of a larger query over a single schema.""" # Representing a piece of the overall query with directives added. query_ast: DocumentNode # Identifier for the schema that this query piece targets. schema_id: Optional[str] # The query that the current query depends on, or None if the current query does not # depend on another. parent_query_plan: Optional["SubQueryPlan"] # The queries that depend on the current query. child_query_plans: List["SubQueryPlan"] @dataclass(frozen=True) class OutputJoinDescriptor: """Description of what outputs should be joined and how.""" # (parent output name, child output name) # May be expanded to have more attributes, e.g. is_optional, describing how the join # should be made. output_names: Tuple[str, str] @dataclass(frozen=True) class QueryPlanDescriptor: """Describes a query plan including output join information and intermediate output names.""" # The root of the query plan. root_sub_query_plan: SubQueryPlan # Names of outputs to be removed at the end. intermediate_output_names: FrozenSet[str] # Describing which outputs should be joined and how. output_join_descriptors: List[OutputJoinDescriptor] def _make_query_plan_recursive( sub_query_node: SubQueryNode, sub_query_plan: SubQueryPlan, output_join_descriptors: List[OutputJoinDescriptor], ) -> None: """Recursively copy the structure of sub_query_node onto sub_query_plan. For each child connection contained in sub_query_node, create a new SubQueryPlan for the corresponding child SubQueryNode, add appropriate @filter directive to the child AST, and attach the new SubQueryPlan to the list of children of the input sub-query plan. Args: sub_query_node: SubQueryNode, whose child_query_connections are copied over onto sub_query_plan. It is not modified by this function. sub_query_plan: SubQueryPlan, whose list of child query plans and query AST are modified. output_join_descriptors: describing which outputs should be joined and how. """ # Iterate through child connections of query node for child_query_connection in sub_query_node.child_query_connections: child_sub_query_node = child_query_connection.sink_query_node parent_out_name = child_query_connection.source_field_out_name child_out_name = child_query_connection.sink_field_out_name child_query_type = get_only_query_definition( child_sub_query_node.query_ast, GraphQLValidationError ) child_query_type_with_filter = _add_filter_at_field_with_output( child_query_type, child_out_name, parent_out_name # @filter's local variable is named the same as the out_name of the parent's @output ) if child_query_type is child_query_type_with_filter: raise AssertionError( 'An @output directive with out_name "{}" is unexpectedly not found in the ' 'AST "{}".'.format(child_out_name, child_query_type) ) else: new_child_query_ast = DocumentNode(definitions=[child_query_type_with_filter]) # Create new SubQueryPlan for child child_sub_query_plan = SubQueryPlan( query_ast=new_child_query_ast, schema_id=child_sub_query_node.schema_id, parent_query_plan=sub_query_plan, child_query_plans=[], ) # Add new SubQueryPlan to parent's child list sub_query_plan.child_query_plans.append(child_sub_query_plan) # Add information about this edge new_output_join_descriptor = OutputJoinDescriptor( output_names=(parent_out_name, child_out_name), ) output_join_descriptors.append(new_output_join_descriptor) # Recursively repeat on child SubQueryPlans _make_query_plan_recursive( child_sub_query_node, child_sub_query_plan, output_join_descriptors ) def _add_filter_at_field_with_output( ast: AstType, field_out_name: str, input_filter_name: str ) -> AstType: """Return an AST with @filter added at the field with the specified @output, if found. Args: ast: AST Node type that occurs in the selections of a SelectionSet. It is not modified by this function. field_out_name: the out_name of an @output directive. This function will create a new @filter directive on the field that has an @output directive with this out_name. input_filter_name: the name of the local variable in the new @filter directive created. Returns: AST node identical to the input AST except with a @filter added at the specified field if such a field is found. If no changes were made, this is the same object as the input. """ if not isinstance(ast, (FieldNode, InlineFragmentNode, OperationDefinitionNode)): raise AssertionError( 'Input AST is of type "{}", which should not be a selection.' "".format(type(ast).__name__) ) if isinstance(ast, FieldNode): # Check whether this field has the expected directive, if so, modify and return if ast.directives is not None and any( _is_output_directive_with_name(directive, field_out_name) for directive in ast.directives ): new_directives = list(ast.directives) new_directives.append(_get_in_collection_filter_directive(input_filter_name)) new_ast = copy(ast) new_ast.directives = cast(FrozenList, new_directives) return new_ast if ast.selection_set is None: # Nothing to recurse on return ast # Otherwise, recurse and look for field with desired out_name made_changes = False new_selections = [] for selection in ast.selection_set.selections: # Make sure selection is a FieldNode of InlineFragment and cast to AST type # to make mypy happy. if not isinstance(selection, FieldNode) and not isinstance(selection, InlineFragmentNode): raise AssertionError( f"Unexpected selection type {type(selection)}. Only FieldNodes and " "InlineFragmentNodes are expected." ) ast_type_selection = cast(AstType, selection) new_selection = _add_filter_at_field_with_output( ast_type_selection, field_out_name, input_filter_name ) if new_selection is not ast_type_selection: # Changes made somewhere down the line if not made_changes: made_changes = True else: # Change has already been made, but there is a new change. Implies that multiple # fields have the @output directive with the desired name raise GraphQLValidationError( 'There are multiple @output directives with the out_name "{}"'.format( field_out_name ) ) new_selections.append(new_selection) if made_changes: new_ast = copy(ast) new_ast.selection_set = SelectionSetNode(selections=new_selections) return new_ast else: return ast def _is_output_directive_with_name(directive: DirectiveNode, out_name: str) -> bool: """Return whether or not the input is an @output directive with the desired out_name.""" if not isinstance(directive, DirectiveNode): raise AssertionError('Input "{}" is not a directive.'.format(directive)) # Check whether or not this directive is an output directive. if directive.name.value != OutputDirective.name: return False # Ensure the output directive has arguments since @output takes an `out_name`. if not directive.arguments: raise AssertionError( "directive is an OutputDirective, but has no arguments. This should be impossible! " f"directive: {directive}" ) # Ensure he output directive argument is a string since output directives must have a # non-null string `out_name`. directive_out_name_value_node = directive.arguments[0].value if not isinstance(directive_out_name_value_node, StringValueNode): raise AssertionError( "directive is an OutputDirective, but has a non-string argument. " f"This should be impossible! directive: {directive}" ) return directive_out_name_value_node.value == out_name def _get_in_collection_filter_directive(input_filter_name: str) -> DirectiveNode: """Create a @filter directive with in_collection operation and the desired variable name.""" return DirectiveNode( name=NameNode(value=FilterDirective.name), arguments=[ ArgumentNode( name=NameNode(value="op_name"), value=StringValueNode(value="in_collection"), ), ArgumentNode( name=NameNode(value="value"), value=ListValueNode( values=[ StringValueNode(value="$" + input_filter_name), ], ), ), ], ) def _get_plan_and_depth_in_dfs_order(query_plan: SubQueryPlan) -> List[Tuple[SubQueryPlan, int]]: """Return a list of topologically sorted (query plan, depth) tuples.""" def _get_plan_and_depth_in_dfs_order_helper(query_plan, depth): plan_and_depth_in_dfs_order = [(query_plan, depth)] for child_query_plan in query_plan.child_query_plans: plan_and_depth_in_dfs_order.extend( _get_plan_and_depth_in_dfs_order_helper(child_query_plan, depth + 1) ) return plan_and_depth_in_dfs_order return _get_plan_and_depth_in_dfs_order_helper(query_plan, 0) ###### # Public API ###### def make_query_plan( root_sub_query_node: SubQueryNode, intermediate_output_names: FrozenSet[str] ) -> QueryPlanDescriptor: """Return a QueryPlanDescriptor, whose query ASTs have @filters added. For each parent of parent and child SubQueryNodes, a new @filter directive will be added in the child AST. It will be added on the field whose @output directive has the out_name equal to the child's out name as specified in the QueryConnection. The newly added @filter will be a 'in_collection' type filter, and the name of the local variable is guaranteed to be the same as the out_name of the @output on the parent. ASTs contained in the input node and its children nodes will not be modified. Args: root_sub_query_node: representing the base of a query split into pieces that we want to turn into a query plan. intermediate_output_names: names of outputs to be removed at the end. Returns: QueryPlanDescriptor containing a tree of SubQueryPlans that wrap around each individual query AST, the set of intermediate output names that are to be removed at the end, and information on which outputs are to be connect to which in what manner. """ output_join_descriptors: List[OutputJoinDescriptor] = [] root_sub_query_plan = SubQueryPlan( query_ast=root_sub_query_node.query_ast, schema_id=root_sub_query_node.schema_id, parent_query_plan=None, child_query_plans=[], ) _make_query_plan_recursive(root_sub_query_node, root_sub_query_plan, output_join_descriptors) return QueryPlanDescriptor( root_sub_query_plan=root_sub_query_plan, intermediate_output_names=intermediate_output_names, output_join_descriptors=output_join_descriptors, ) def print_query_plan(query_plan_descriptor: QueryPlanDescriptor, indentation_depth: int = 4) -> str: """Return a string describing query plan.""" query_plan_strings = [""] plan_and_depth = _get_plan_and_depth_in_dfs_order(query_plan_descriptor.root_sub_query_plan) for query_plan, depth in plan_and_depth: line_separation = "\n" + " " * indentation_depth * depth query_plan_strings.append(line_separation) query_str = 'Execute in schema named "{}":\n'.format(query_plan.schema_id) query_str += print_ast(query_plan.query_ast) query_str = query_str.replace("\n", line_separation) query_plan_strings.append(query_str) query_plan_strings.append("\n\nJoin together outputs as follows: ") query_plan_strings.append(str(query_plan_descriptor.output_join_descriptors)) query_plan_strings.append("\n\nRemove the following outputs at the end: ") query_plan_strings.append(str(query_plan_descriptor.intermediate_output_names) + "\n") return "".join(query_plan_strings)
5,256
1,281
<filename>pytorch_toolbelt/modules/encoders/timm/resnet.py<gh_stars>1000+ from collections import OrderedDict from typing import List import torch from torch import nn from .common import GenericTimmEncoder from ..common import EncoderModule, make_n_channel_input __all__ = [ "SKResNet18Encoder", "SKResNeXt50Encoder", "SWSLResNeXt101Encoder", "TResNetMEncoder", "TimmResnet152D", "TimmSEResnet152D", "TimmResnet50D", "TimmResnet101D", "TimmResnet200D", ] from ... import ACT_RELU, get_activation_block class TResNetMEncoder(EncoderModule): def __init__(self, pretrained=True, layers=None, activation=ACT_RELU): if layers is None: layers = [1, 2, 3, 4] from timm.models import tresnet_m act_layer = get_activation_block(activation) encoder = tresnet_m(pretrained=pretrained, act_layer=act_layer) super().__init__([64, 64, 128, 1024, 2048], [4, 4, 8, 16, 32], layers) self.stem = nn.Sequential(encoder.body.SpaceToDepth, encoder.body.conv1) self.layer1 = encoder.body.layer1 self.layer2 = encoder.body.layer2 self.layer3 = encoder.body.layer3 self.layer4 = encoder.body.layer4 @property @torch.jit.unused def encoder_layers(self) -> List[nn.Module]: return [self.stem, self.layer1, self.layer2, self.layer3, self.layer4] class SKResNet18Encoder(EncoderModule): def __init__(self, pretrained=True, layers=None, no_first_max_pool=False, activation=ACT_RELU): if layers is None: layers = [1, 2, 3, 4] from timm.models import skresnet18 act_layer = get_activation_block(activation) encoder = skresnet18(pretrained=pretrained, features_only=True, act_layer=act_layer) super().__init__([64, 64, 128, 256, 512], [2, 4, 8, 16, 32], layers) self.stem = nn.Sequential( OrderedDict([("conv1", encoder.conv1), ("bn1", encoder.bn1), ("act1", encoder.act1)]) ) self.layer1 = nn.Sequential( nn.Conv2d(64, 64, kernel_size=3, padding=1, stride=2) if no_first_max_pool else encoder.maxpool, encoder.layer1, ) self.layer2 = encoder.layer2 self.layer3 = encoder.layer3 self.layer4 = encoder.layer4 @property def encoder_layers(self) -> List[nn.Module]: return [self.stem, self.layer1, self.layer2, self.layer3, self.layer4] def change_input_channels(self, input_channels: int, mode="auto", **kwargs): self.stem.conv1 = make_n_channel_input(self.stem.conv1, input_channels, mode, **kwargs) return self class SKResNeXt50Encoder(EncoderModule): def __init__(self, pretrained=True, layers=None, activation=ACT_RELU): if layers is None: layers = [1, 2, 3, 4] from timm.models import skresnext50_32x4d act_layer = get_activation_block(activation) encoder = skresnext50_32x4d(pretrained=pretrained, act_layer=act_layer) super().__init__([64, 256, 512, 1024, 2048], [2, 4, 8, 16, 32], layers) self.stem = nn.Sequential( OrderedDict([("conv1", encoder.conv1), ("bn1", encoder.bn1), ("act1", encoder.act1)]) ) self.layer1 = nn.Sequential(encoder.maxpool, encoder.layer1) self.layer2 = encoder.layer2 self.layer3 = encoder.layer3 self.layer4 = encoder.layer4 @property def encoder_layers(self) -> List[nn.Module]: return [self.stem, self.layer1, self.layer2, self.layer3, self.layer4] def change_input_channels(self, input_channels: int, mode="auto", **kwargs): self.stem.conv1 = make_n_channel_input(self.stem.conv1, input_channels, mode, **kwargs) return self class SWSLResNeXt101Encoder(EncoderModule): def __init__(self, pretrained=True, layers=None, activation=ACT_RELU): if layers is None: layers = [1, 2, 3, 4] from timm.models.resnet import swsl_resnext101_32x8d act_layer = get_activation_block(activation) encoder = swsl_resnext101_32x8d(pretrained=pretrained, act_layer=act_layer) super().__init__([64, 256, 512, 1024, 2048], [2, 4, 8, 16, 32], layers) self.stem = nn.Sequential( OrderedDict( [ ("conv1", encoder.conv1), ("bn1", encoder.bn1), ("act1", encoder.act1), ] ) ) self.layer1 = nn.Sequential(encoder.maxpool, encoder.layer1) self.layer2 = encoder.layer2 self.layer3 = encoder.layer3 self.layer4 = encoder.layer4 @property def encoder_layers(self) -> List[nn.Module]: return [self.stem, self.layer1, self.layer2, self.layer3, self.layer4] def change_input_channels(self, input_channels: int, mode="auto", **kwargs): self.stem.conv1 = make_n_channel_input(self.stem.conv1, input_channels, mode, **kwargs) return self class TimmResnet152D(GenericTimmEncoder): def __init__(self, pretrained=True, layers=None, activation=ACT_RELU): from timm.models.resnet import resnet152d act_layer = get_activation_block(activation) encoder = resnet152d(features_only=True, pretrained=pretrained, act_layer=act_layer) super().__init__(encoder, layers) def change_input_channels(self, input_channels: int, mode="auto"): self.encoder.conv1[0] = make_n_channel_input(self.encoder.conv1[0], input_channels, mode=mode) return self class TimmSEResnet152D(GenericTimmEncoder): def __init__(self, pretrained=True, layers=None, activation=ACT_RELU): from timm.models.resnet import seresnet152d act_layer = get_activation_block(activation) encoder = seresnet152d(features_only=True, pretrained=pretrained, act_layer=act_layer) super().__init__(encoder, layers) def change_input_channels(self, input_channels: int, mode="auto"): self.encoder.conv1[0] = make_n_channel_input(self.encoder.conv1[0], input_channels, mode=mode) return self class TimmResnet50D(GenericTimmEncoder): def __init__(self, pretrained=True, layers=None, activation=ACT_RELU): from timm.models.resnet import resnet50d act_layer = get_activation_block(activation) encoder = resnet50d(features_only=True, pretrained=pretrained, act_layer=act_layer) super().__init__(encoder, layers) def change_input_channels(self, input_channels: int, mode="auto"): self.encoder.conv1[0] = make_n_channel_input(self.encoder.conv1[0], input_channels, mode=mode) return self class TimmResnet101D(GenericTimmEncoder): def __init__(self, pretrained=True, layers=None, activation=ACT_RELU): from timm.models.resnet import resnet101d act_layer = get_activation_block(activation) encoder = resnet101d(features_only=True, pretrained=pretrained, act_layer=act_layer) super().__init__(encoder, layers) def change_input_channels(self, input_channels: int, mode="auto"): self.encoder.conv1[0] = make_n_channel_input(self.encoder.conv1[0], input_channels, mode=mode) return self class TimmResnet200D(GenericTimmEncoder): def __init__(self, pretrained=True, layers=None, activation=ACT_RELU): from timm.models.resnet import resnet200d act_layer = get_activation_block(activation) encoder = resnet200d(features_only=True, pretrained=pretrained, act_layer=act_layer) super().__init__(encoder, layers) def change_input_channels(self, input_channels: int, mode="auto"): self.encoder.conv1[0] = make_n_channel_input(self.encoder.conv1[0], input_channels, mode=mode) return self
3,366
815
<gh_stars>100-1000 /*========================================================================= Program: ParaView Module: pqEditScalarBarReaction.cxx Copyright (c) 2005,2006 Sandia Corporation, Kitware Inc. All rights reserved. ParaView is a free software; you can redistribute it and/or modify it under the terms of the ParaView license version 1.2. See License_v1.2.txt for the full ParaView license. A copy of this license can be obtained by contacting Kitware Inc. 28 Corporate Drive Clifton Park, NY 12065 USA THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ========================================================================*/ #include "pqEditScalarBarReaction.h" #include "pqDataRepresentation.h" #include "pqProxyWidgetDialog.h" #include "pqScalarBarVisibilityReaction.h" //----------------------------------------------------------------------------- pqEditScalarBarReaction::pqEditScalarBarReaction(QAction* parentObject, bool track_active_objects) : Superclass(parentObject) { QAction* tmp = new QAction(this); this->SBVReaction = new pqScalarBarVisibilityReaction(tmp, track_active_objects); this->connect(tmp, SIGNAL(changed()), SLOT(updateEnableState())); this->updateEnableState(); } //----------------------------------------------------------------------------- pqEditScalarBarReaction::~pqEditScalarBarReaction() { delete this->SBVReaction; } //----------------------------------------------------------------------------- void pqEditScalarBarReaction::setRepresentation(pqDataRepresentation* repr) { this->SBVReaction->setRepresentation(repr); } //----------------------------------------------------------------------------- void pqEditScalarBarReaction::updateEnableState() { this->parentAction()->setEnabled(this->SBVReaction->parentAction()->isEnabled() && this->SBVReaction->parentAction()->isChecked()); } //----------------------------------------------------------------------------- void pqEditScalarBarReaction::onTriggered() { this->editScalarBar(); } //----------------------------------------------------------------------------- bool pqEditScalarBarReaction::editScalarBar() { if (vtkSMProxy* sbProxy = this->SBVReaction->scalarBarProxy()) { pqRepresentation* repr = this->SBVReaction->representation(); pqProxyWidgetDialog dialog(sbProxy); dialog.setWindowTitle("Edit Color Legend Properties"); dialog.setObjectName("ColorLegendEditor"); dialog.setEnableSearchBar(true); dialog.setSettingsKey("ColorLegendEditor"); repr->connect(&dialog, SIGNAL(accepted()), SLOT(renderViewEventually())); return dialog.exec() == QDialog::Accepted; } return false; }
988
446
"""The neuralmonkey package is the root package of this project."""
16
2,134
<gh_stars>1000+ /* Copyright (c) 2013 <NAME> <<EMAIL>> * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. */ #include "ftnoir_tracker_linux_joystick.h" #include "api/plugin-api.hpp" #include "compat/math.hpp" #include <QMutexLocker> joystick::joystick() { QString device = getJoystickDevice(s.guid); joy_fd = open(device.toUtf8().data(), O_RDONLY | O_NONBLOCK); } joystick::~joystick() { if (joy_fd > 0) close(joy_fd); } module_status joystick::start_tracker(QFrame *) { if (joy_fd == -1) return error("Couldn't open joystick"); return status_ok(); } void joystick::data(double *data) { int map[6] = { s.joy_1 - 1, s.joy_2 - 1, s.joy_3 - 1, s.joy_4 - 1, s.joy_5 - 1, s.joy_6 - 1, }; const double limits[] = { 100, 100, 100, 180, 180, 180 }; const QString guid = s.guid; int axes[8]; struct js_event event; bool ret = true; if (read(joy_fd, &event, sizeof(event)) > 0) { switch (event.type) { case JS_EVENT_AXIS: if (event.number >= 8) break; axes_state[event.number] = event.value; break; default: /* Ignore init/button events. */ break; } } for (int i = 0; i < 6; i++) { axes[i] = axes_state[i]; } if (ret) { for (int i = 0; i < 6; i++) { int k = map[i]; if (k < 0 || k >= 8) data[i] = 0; else data[i] = std::clamp(axes[k] * limits[i] / AXIS_MAX, -limits[i], limits[i]); } } } OPENTRACK_DECLARE_TRACKER(joystick, dialog_joystick, joystickDll)
992
439
package org.allenai.scienceparse.pdfapi; import lombok.Data; import lombok.val; import java.util.concurrent.ConcurrentHashMap; @Data public class PDFFontMetrics { private static final ConcurrentHashMap<String, PDFFontMetrics> canonical = new ConcurrentHashMap<>(); /** * The special value for when the underlying font didn't have * an extractable family name. */ public static String UNKNWON_FONT_FAMILY = "*UNKNOWN*"; public final String name; public final float ptSize; public final float spaceWidth; /** * Ensures one font object per unique font name * * @param name * @param ptSize * @param spaceWidth * @return */ public static PDFFontMetrics of(String name, float ptSize, float spaceWidth) { val fontMetrics = new PDFFontMetrics(name, ptSize, spaceWidth); val curValue = canonical.putIfAbsent(name, fontMetrics); return curValue != null ? curValue : fontMetrics; } public String stringRepresentation() { return String.format("%s-%f", name, ptSize); } }
344
898
<filename>cocos2d/cocos/audio/android/jni/cddandroidAndroidJavaEngine.cpp /**************************************************************************** Copyright (c) 2010-2012 cocos2d-x.org Copyright (c) 2013-2017 Chukong Technologies Inc. http://www.cocos2d-x.org Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ****************************************************************************/ #define LOG_TAG "cocosdenshion::android::AndroidJavaEngine" #include "audio/android/jni/cddandroidAndroidJavaEngine.h" #include <stdlib.h> #include <sys/system_properties.h> #include "audio/android/ccdandroidUtils.h" #include "audio/android/utils/Utils.h" #include "audio/include/AudioEngine.h" #include "platform/android/jni/JniHelper.h" // logging #define LOGD(...) __android_log_print(ANDROID_LOG_DEBUG,LOG_TAG,__VA_ARGS__) // Java class static const std::string helperClassName = "org/cocos2dx/lib/Cocos2dxHelper"; using namespace cocos2d; using namespace cocos2d::experimental; using namespace CocosDenshion::android; AndroidJavaEngine::AndroidJavaEngine() : _implementBaseOnAudioEngine(false) , _effectVolume(1.f) { int sdkVer = getSDKVersion(); if (sdkVer > 0) { __android_log_print(ANDROID_LOG_DEBUG, "cocos2d", "android SDK version:%d", sdkVer); if (sdkVer == 21) { _implementBaseOnAudioEngine = true; } } else { __android_log_print(ANDROID_LOG_DEBUG, "cocos2d", "%s", "Fail to get android SDK version."); } } AndroidJavaEngine::~AndroidJavaEngine() { if (_implementBaseOnAudioEngine) { stopAllEffects(); } JniHelper::callStaticVoidMethod(helperClassName, "end"); } void AndroidJavaEngine::preloadBackgroundMusic(const char* filePath) { std::string fullPath = CocosDenshion::android::getFullPathWithoutAssetsPrefix(filePath); JniHelper::callStaticVoidMethod(helperClassName, "preloadBackgroundMusic", fullPath); } void AndroidJavaEngine::playBackgroundMusic(const char* filePath, bool loop) { std::string fullPath = CocosDenshion::android::getFullPathWithoutAssetsPrefix(filePath); JniHelper::callStaticVoidMethod(helperClassName, "playBackgroundMusic", fullPath, loop); } void AndroidJavaEngine::stopBackgroundMusic(bool releaseData) { JniHelper::callStaticVoidMethod(helperClassName, "stopBackgroundMusic"); } void AndroidJavaEngine::pauseBackgroundMusic() { JniHelper::callStaticVoidMethod(helperClassName, "pauseBackgroundMusic"); } void AndroidJavaEngine::resumeBackgroundMusic() { JniHelper::callStaticVoidMethod(helperClassName, "resumeBackgroundMusic"); } void AndroidJavaEngine::rewindBackgroundMusic() { JniHelper::callStaticVoidMethod(helperClassName, "rewindBackgroundMusic"); } bool AndroidJavaEngine::willPlayBackgroundMusic() { return JniHelper::callStaticBooleanMethod(helperClassName, "willPlayBackgroundMusic"); } bool AndroidJavaEngine::isBackgroundMusicPlaying() { return JniHelper::callStaticBooleanMethod(helperClassName, "isBackgroundMusicPlaying"); } float AndroidJavaEngine::getBackgroundMusicVolume() { return JniHelper::callStaticFloatMethod(helperClassName, "getBackgroundMusicVolume"); } void AndroidJavaEngine::setBackgroundMusicVolume(float volume) { JniHelper::callStaticVoidMethod(helperClassName, "setBackgroundMusicVolume", volume); } float AndroidJavaEngine::getEffectsVolume() { if (_implementBaseOnAudioEngine) { return _effectVolume; } else { return JniHelper::callStaticFloatMethod(helperClassName, "getEffectsVolume"); } } void AndroidJavaEngine::setEffectsVolume(float volume) { if (_implementBaseOnAudioEngine) { if (volume > 1.f) { volume = 1.f; } else if (volume < 0.f) { volume = 0.f; } if (_effectVolume != volume) { _effectVolume = volume; for (auto it : _soundIDs) { AudioEngine::setVolume(it, volume); } } } else { JniHelper::callStaticVoidMethod(helperClassName, "setEffectsVolume", volume); } } unsigned int AndroidJavaEngine::playEffect(const char* filePath, bool loop, float pitch, float pan, float gain) { if (_implementBaseOnAudioEngine) { auto soundID = AudioEngine::play2d(filePath, loop, _effectVolume); if (soundID != AudioEngine::INVALID_AUDIO_ID) { _soundIDs.push_back(soundID); AudioEngine::setFinishCallback(soundID, [this](int id, const std::string& filePath){ _soundIDs.remove(id); }); } return soundID; } else { std::string fullPath = CocosDenshion::android::getFullPathWithoutAssetsPrefix(filePath); int ret = JniHelper::callStaticIntMethod(helperClassName, "playEffect", fullPath, loop, pitch, pan, gain); return (unsigned int)ret; } } void AndroidJavaEngine::pauseEffect(unsigned int soundID) { if (_implementBaseOnAudioEngine) { AudioEngine::pause(soundID); } else { JniHelper::callStaticVoidMethod(helperClassName, "pauseEffect", (int)soundID); } } void AndroidJavaEngine::resumeEffect(unsigned int soundID) { if (_implementBaseOnAudioEngine) { AudioEngine::resume(soundID); } else { JniHelper::callStaticVoidMethod(helperClassName, "resumeEffect", (int)soundID); } } void AndroidJavaEngine::stopEffect(unsigned int soundID) { if (_implementBaseOnAudioEngine) { AudioEngine::stop(soundID); _soundIDs.remove(soundID); } else { JniHelper::callStaticVoidMethod(helperClassName, "stopEffect", (int)soundID); } } void AndroidJavaEngine::pauseAllEffects() { if (_implementBaseOnAudioEngine) { for (auto it : _soundIDs) { AudioEngine::pause(it); } } else { JniHelper::callStaticVoidMethod(helperClassName, "pauseAllEffects"); } } void AndroidJavaEngine::resumeAllEffects() { if (_implementBaseOnAudioEngine) { for (auto it : _soundIDs) { AudioEngine::resume(it); } } else { JniHelper::callStaticVoidMethod(helperClassName, "resumeAllEffects"); } } void AndroidJavaEngine::stopAllEffects() { if (_implementBaseOnAudioEngine) { for (auto it : _soundIDs) { AudioEngine::stop(it); } _soundIDs.clear(); } else { JniHelper::callStaticVoidMethod(helperClassName, "stopAllEffects"); } } void AndroidJavaEngine::preloadEffect(const char* filePath) { if (!_implementBaseOnAudioEngine) { std::string fullPath = CocosDenshion::android::getFullPathWithoutAssetsPrefix(filePath); JniHelper::callStaticVoidMethod(helperClassName, "preloadEffect", fullPath); } else { AudioEngine::preload(filePath); } } void AndroidJavaEngine::unloadEffect(const char* filePath) { if (!_implementBaseOnAudioEngine) { std::string fullPath = CocosDenshion::android::getFullPathWithoutAssetsPrefix(filePath); JniHelper::callStaticVoidMethod(helperClassName, "unloadEffect", fullPath); } else { AudioEngine::uncache(filePath); } }
3,171
965
CDHtmlDialog mydialog(IDD_MYDHTMLDLG); TCHAR szResID[] = _T("HTML_PAGE"); mydialog.m_szHtmlResID = szResID; mydialog.DoModal();
62
835
/* * Copyright (C) 2015 Twitter, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.twitter.sdk.android.tweetcomposer.internal.util; import android.annotation.TargetApi; import android.content.Context; import android.os.Build; import android.util.AttributeSet; import android.widget.ScrollView; public class ObservableScrollView extends ScrollView { ScrollViewListener scrollViewListener; public ObservableScrollView(Context context) { super(context); } public ObservableScrollView(Context context, AttributeSet attrs) { super(context, attrs); } public ObservableScrollView(Context context, AttributeSet attrs, int defStyleAttr) { super(context, attrs, defStyleAttr); } @TargetApi(Build.VERSION_CODES.LOLLIPOP) public ObservableScrollView(Context context, AttributeSet attrs, int defStyleAttr, int defStyleRes) { super(context, attrs, defStyleAttr, defStyleRes); } @Override protected void onScrollChanged(int currentX, int currentY, int oldX, int oldY) { super.onScrollChanged(currentX, currentY, oldX, oldY); if (scrollViewListener != null) { scrollViewListener.onScrollChanged(currentY); } } public void setScrollViewListener(ScrollViewListener scrollViewListener) { this.scrollViewListener = scrollViewListener; } public interface ScrollViewListener { void onScrollChanged(int scrollY); } }
656
10,225
package io.quarkus.spring.data.deployment.generate; import java.util.List; import java.util.Map; import java.util.function.Consumer; import org.jboss.jandex.ClassInfo; import org.jboss.jandex.DotName; import org.jboss.jandex.IndexView; import org.jboss.jandex.MethodInfo; import io.quarkus.gizmo.ClassCreator; import io.quarkus.gizmo.FieldDescriptor; import io.quarkus.gizmo.MethodCreator; import io.quarkus.gizmo.MethodDescriptor; import io.quarkus.gizmo.ResultHandle; public class FragmentMethodsAdder { private final Consumer<String> fragmentImplClassResolvedCallback; private final IndexView index; public FragmentMethodsAdder(Consumer<String> fragmentImplClassResolvedCallback, IndexView index) { this.fragmentImplClassResolvedCallback = fragmentImplClassResolvedCallback; this.index = index; } public void add(ClassCreator classCreator, String generatedClassName, List<DotName> customInterfaceNamesToImplement, Map<String, FieldDescriptor> customImplNameToHandle) { for (DotName customInterfaceToImplement : customInterfaceNamesToImplement) { String customImplementationClassName = FragmentMethodsUtil .getImplementationDotName(customInterfaceToImplement, index).toString(); fragmentImplClassResolvedCallback.accept(customImplementationClassName); ClassInfo customInterfaceToImplementClassInfo = index.getClassByName(customInterfaceToImplement); if (customInterfaceToImplementClassInfo == null) { throw new IllegalArgumentException("Unable to implement" + customInterfaceToImplement + " because it is not known - please make sure it's part of the Quarkus index"); } for (MethodInfo methodToImplement : customInterfaceToImplementClassInfo.methods()) { // methods defined on the interface are implemented by forwarding them to the bean that implements them Object[] methodParameterTypes = new Object[methodToImplement.parameters().size()]; for (int i = 0; i < methodToImplement.parameters().size(); i++) { methodParameterTypes[i] = methodToImplement.parameters().get(i).name().toString(); } String methodReturnType = methodToImplement.returnType().name().toString(); MethodDescriptor methodDescriptor = MethodDescriptor.ofMethod(generatedClassName, methodToImplement.name(), methodReturnType, methodParameterTypes); if (!classCreator.getExistingMethods().contains(methodDescriptor)) { try (MethodCreator methodCreator = classCreator.getMethodCreator(methodDescriptor)) { // obtain the bean from Arc ResultHandle bean = methodCreator.readInstanceField( customImplNameToHandle.get(customImplementationClassName), methodCreator.getThis()); ResultHandle[] methodParameterHandles = new ResultHandle[methodToImplement.parameters().size()]; for (int i = 0; i < methodToImplement.parameters().size(); i++) { methodParameterHandles[i] = methodCreator.getMethodParam(i); } // delegate call to bean ResultHandle result = methodCreator.invokeVirtualMethod( MethodDescriptor.ofMethod(customImplementationClassName, methodToImplement.name(), methodReturnType, methodParameterTypes), bean, methodParameterHandles); if (void.class.getName().equals(methodReturnType)) { methodCreator.returnValue(null); } else { methodCreator.returnValue(result); } } } } } } }
1,738
1,150
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import logging import socket import time from .log_helper import get_logger from threading import Thread from .lock import lock, unlock __all__ = ['ControllerServer'] _logger = get_logger(__name__, level=logging.INFO) class ControllerServer(object): """The controller wrapper with a socket server to handle the request of search agent. Args: controller(slim.searcher.Controller): The controller used to generate tokens. address(tuple): The address of current server binding with format (ip, port). Default: ('', 0). which means setting ip automatically max_client_num(int): The maximum number of clients connecting to current server simultaneously. Default: 100. search_steps(int|None): The total steps of searching. None means never stopping. Default: None key(str|None): Config information. Default: None. """ def __init__(self, controller=None, address=('', 0), max_client_num=100, search_steps=None, key=None): """ """ self._controller = controller self._address = address self._max_client_num = max_client_num self._search_steps = search_steps self._closed = False self._port = address[1] self._ip = address[0] self._key = key self._client_num = 0 self._client = dict() self._compare_time = 172800 ### 48 hours def start(self): self._socket_server = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self._socket_server.bind(self._address) self._socket_server.listen(self._max_client_num) self._port = self._socket_server.getsockname()[1] self._ip = self._socket_server.getsockname()[0] _logger.info("ControllerServer Start!!!") _logger.debug("ControllerServer - listen on: [{}:{}]".format( self._ip, self._port)) thread = Thread(target=self.run) thread.setDaemon(True) thread.start() return str(thread) def close(self): """Close the server.""" self._closed = True _logger.info("server closed!") def port(self): """Get the port.""" return self._port def ip(self): """Get the ip.""" return self._ip def run(self): """Start the server. """ _logger.info("Controller Server run...") try: while ((self._search_steps is None) or (self._controller._iter < (self._search_steps))) and not self._closed: conn, addr = self._socket_server.accept() message = conn.recv(1024).decode() _logger.debug(message) if message.strip("\n") == "next_tokens": tokens = self._controller.next_tokens() tokens = ",".join([str(token) for token in tokens]) conn.send(tokens.encode()) elif message.strip("\n") == "current_info": current_info = dict() current_info['best_tokens'] = self._controller.best_tokens current_info['best_reward'] = self._controller.max_reward current_info[ 'current_tokens'] = self._controller.current_tokens conn.send(str(current_info).encode()) else: _logger.debug("recv message from {}: [{}]".format(addr, message)) messages = message.strip('\n').split("\t") if (len(messages) < 5) or (messages[0] != self._key): _logger.debug("recv noise from {}: [{}]".format( addr, message)) continue tokens = messages[1] reward = messages[2] iter = messages[3] client_name = messages[4] one_step_time = -1 if client_name in self._client.keys(): current_time = time.time() - self._client[client_name] if current_time > one_step_time: one_step_time = current_time self._compare_time = 2 * one_step_time if client_name not in self._client.keys(): self._client[client_name] = time.time() self._client_num += 1 self._client[client_name] = time.time() for key_client in self._client.keys(): ### if a client not request token in double train one tokens' time, we think this client was stoped. if ( time.time() - self._client[key_client] ) > self._compare_time and len(self._client.keys()) > 1: self._client.pop(key_client) self._client_num -= 1 _logger.debug( "client: {}, client_num: {}, compare_time: {}".format( self._client, self._client_num, self._compare_time)) tokens = [int(token) for token in tokens.split(",")] self._controller.update(tokens, float(reward), int(iter), int(self._client_num)) response = "ok" conn.send(response.encode()) _logger.debug("send message to {}: [{}]".format(addr, tokens)) conn.close() except Exception as err: _logger.error(err) finally: self._socket_server.close() self.close()
3,304
945
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.iotdb.consensus.multileader.service; import org.apache.iotdb.common.rpc.thrift.TSStatus; import org.apache.iotdb.commons.consensus.ConsensusGroupId; import org.apache.iotdb.consensus.common.request.ByteBufferConsensusRequest; import org.apache.iotdb.consensus.multileader.MultiLeaderConsensus; import org.apache.iotdb.consensus.multileader.MultiLeaderServerImpl; import org.apache.iotdb.consensus.multileader.thrift.MultiLeaderConsensusIService; import org.apache.iotdb.consensus.multileader.thrift.TLogBatch; import org.apache.iotdb.consensus.multileader.thrift.TSyncLogReq; import org.apache.iotdb.consensus.multileader.thrift.TSyncLogRes; import org.apache.iotdb.rpc.TSStatusCode; import org.apache.thrift.TException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.util.ArrayList; import java.util.Collections; import java.util.List; public class MultiLeaderRPCServiceProcessor implements MultiLeaderConsensusIService.Iface { private final Logger logger = LoggerFactory.getLogger(MultiLeaderRPCServiceProcessor.class); private final MultiLeaderConsensus consensus; public MultiLeaderRPCServiceProcessor(MultiLeaderConsensus consensus) { this.consensus = consensus; } @Override public TSyncLogRes syncLog(TSyncLogReq req) throws TException { ConsensusGroupId groupId = ConsensusGroupId.Factory.createFromTConsensusGroupId(req.getConsensusGroupId()); MultiLeaderServerImpl impl = consensus.getImpl(groupId); if (impl == null) { String message = String.format( "Unexpected consensusGroupId %s for TSyncLogReq which size is %s", groupId, req.getBatches().size()); logger.error(message); TSStatus status = new TSStatus(TSStatusCode.INTERNAL_SERVER_ERROR.getStatusCode()); status.setMessage(message); return new TSyncLogRes(Collections.singletonList(status)); } List<TSStatus> statuses = new ArrayList<>(); // We use synchronized to ensure atomicity of executing multiple logs synchronized (impl.getStateMachine()) { for (TLogBatch batch : req.getBatches()) { statuses.add( impl.getStateMachine() .write( impl.buildIndexedConsensusRequestForRemoteRequest( new ByteBufferConsensusRequest(batch.data)))); } } logger.debug("Execute TSyncLogReq for {} with result {}", req.consensusGroupId, statuses); return new TSyncLogRes(statuses); } public void handleClientExit() {} }
1,112
1,350
<reponame>archeuclid/sbt-native-packager class WithIgnore { public WithIgnore() { new foo.Foo(); } }
47
471
package com.dtflys.forest.backend.httpclient.response; import com.dtflys.forest.handler.LifeCycleHandler; import com.dtflys.forest.http.ForestRequest; import com.dtflys.forest.utils.ForestProgress; import org.apache.http.Header; import org.apache.http.HttpEntity; import java.io.*; public class HttpclientEntity implements HttpEntity { private final ForestRequest request; private final HttpEntity entity; private final LifeCycleHandler handler; private long contentLength = -1; private long readBytes; private final long progressStep; private long currentStep = 0; public HttpclientEntity(ForestRequest request, HttpEntity entity, LifeCycleHandler handler) { this.request = request; this.entity = entity; this.handler = handler; this.progressStep = request.getProgressStep(); } @Override public boolean isRepeatable() { return entity.isRepeatable(); } @Override public boolean isChunked() { return entity.isChunked(); } @Override public long getContentLength() { return entity.getContentLength(); } @Override public Header getContentType() { return entity.getContentType(); } @Override public Header getContentEncoding() { return entity.getContentEncoding(); } @Override public InputStream getContent() throws IOException, UnsupportedOperationException { if (isStreaming()) { InputStream in = entity.getContent(); ByteArrayOutputStream out = new ByteArrayOutputStream(); if (contentLength < 0) { contentLength = getContentLength(); } ForestProgress progress = new ForestProgress(request, contentLength); try { byte[] tmp = new byte[4096]; progress.setBegin(true); int len; while((len = in.read(tmp)) != -1) { // increment current length of written bytes readBytes += len; progress.setCurrentBytes(readBytes); if (contentLength >= 0) { currentStep += len; if (readBytes == contentLength) { // progress is done progress.setDone(true); handler.handleProgress(request, progress); } else { while (currentStep >= progressStep) { currentStep = currentStep - progressStep; progress.setDone(false); // invoke progress listener handler.handleProgress(request, progress); } } } progress.setBegin(false); out.write(tmp, 0, len); } out.flush(); } finally { in.close(); } ByteArrayInputStream stream = new ByteArrayInputStream(out.toByteArray()); return stream; } return entity.getContent(); } @Override public void writeTo(OutputStream outputStream) throws IOException { entity.writeTo(outputStream); } @Override public boolean isStreaming() { return entity.isStreaming(); } @Override public void consumeContent() throws IOException { entity.consumeContent(); } }
1,653
5,169
{ "name": "HeziSDK", "version": "1.2.1", "license": "MIT", "summary": "活动盒子SDK", "homepage": "http://www.huodonghezi.com", "authors": { "mcmore": "<EMAIL>" }, "source": { "git": "https://github.com/mcmore/hezi-sdk-for-ios.git", "tag": "1.2.1" }, "requires_arc": true, "platforms": { "ios": "7.0" }, "source_files": [ "HeziSDK/*.{h,m}", "HeziSDK/HeziSDK.a" ], "resources": "HeziSDK/HeziSDK.bundle", "frameworks": "UIKit" }
249
809
/** * @file * @brief fnmatch_function, now working with two flags - FNM_MATCH and FNM_PATHNAME * @date 20.12.18 * @author <NAME> */ #include <stdlib.h> #include <string.h> #include <errno.h> #include <fnmatch.h> static int bracket_check(const char *pattern) { while (*pattern != ']') { if (*pattern == '/') { return 0; } pattern++; } return 1; } static int fnmatch_string(const char *pattern, const char *string, int flags) { int flag; int denial_flag; while (*pattern != '\0') { if ((*string == '/') && (*pattern != '/') && (*pattern != '*') && (flags == FNM_PATHNAME)) { return FNM_NOMATCH; } if ((*string == '\0') && (*pattern != '*')) { return FNM_NOMATCH; } switch (*pattern) { case '?': break; case '*': pattern++; if (*pattern == '\0' && flags != FNM_PATHNAME) { return 0; } while (*string != '\0') { if (!fnmatch_string(pattern, string, flags)) { return 0; } if (*string == '/' && flags == FNM_PATHNAME) { return FNM_NOMATCH; } string++; } break; case '[': flag = 0; denial_flag = 0; if (flags == FNM_PATHNAME) { if (!bracket_check(pattern)) { if (*pattern != *string) { return FNM_NOMATCH; } return fnmatch_string(pattern + 1, string + 1, flags); } } pattern++; if (*pattern == '^') { denial_flag = 1; pattern++; } while (*pattern != ']') { if (*string == *pattern) { flag = 1; } pattern++; } if ((!flag && !denial_flag) || (flag && denial_flag)) { return FNM_NOMATCH; } break; default: if (*pattern != *string) { return FNM_NOMATCH; } } string++; pattern++; } if (*string != '\0') { return FNM_NOMATCH; } return 0; } int fnmatch(const char *pattern, const char *string, int flags) { if (pattern == NULL || string == NULL) { return EINVAL; } /* Begin of pattern-check */ char *bracket_open = strrchr(pattern, '['); char *bracket_close = strrchr(pattern, ']'); if ((bracket_open != NULL) && (bracket_close < bracket_open)) { return EINVAL; } /* End of pattern-check */ return fnmatch_string(pattern, string, flags); }
1,008
2,542
// ------------------------------------------------------------ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License (MIT). See License.txt in the repo root for license information. // ------------------------------------------------------------ #pragma once #define MEMORY_STREAM_TAG 'msTG' namespace Data { namespace TStore { // // Currently, this class implements the bare minimum to get StoreCopyStream to work // class MemoryBuffer : public KObject<MemoryBuffer> , public KShared<MemoryBuffer> , public ktl::io::KStream { K_FORCE_SHARED(MemoryBuffer) K_SHARED_INTERFACE_IMP(KStream) public: static NTSTATUS Create(__in KAllocator & allocator, __out SPtr & result); static NTSTATUS Create(__in ULONG capacity, __in KAllocator & allocator, __out SPtr & result); static NTSTATUS Create(__in KBuffer & buffer, __in KAllocator & allocator, __out SPtr & result); public: __declspec(property(get = GetPosition, put = SetPosition)) LONGLONG Position; LONGLONG GetPosition() const override; void SetPosition(__in LONGLONG Position) override; __declspec(property(get = GetLength)) LONGLONG Length; LONGLONG GetLength() const override; ktl::Awaitable<NTSTATUS> ReadAsync( __in KBuffer& buffer, __out ULONG & bytesRead, __in ULONG offsetIntoBuffer = 0, __in ULONG count = 0) override; ktl::Awaitable<NTSTATUS> WriteAsync( __in KBuffer const & buffer, __in ULONG offsetIntoBuffer = 0, __in ULONG count = 0) override; ktl::Awaitable<NTSTATUS> WriteAsync(__in byte value); ktl::Awaitable<NTSTATUS> WriteAsync(__in ULONG32 value); ktl::Awaitable<NTSTATUS> FlushAsync() override; ktl::Awaitable<NTSTATUS> CloseAsync() override; KBuffer::SPtr GetBuffer(); private: MemoryBuffer(__in ULONG capacity); MemoryBuffer(__in KBuffer & buffer); bool isResizable_; KBuffer::SPtr bufferSPtr_; SharedBinaryWriter::SPtr binaryWriterSPtr_; ULONG position_; }; } }
1,063
2,151
/* Copyright 2012 The Chromium Authors. All rights reserved. * Use of this source code is governed by a BSD-style license that can be * found in the LICENSE file. */ #ifndef PPAPI_SIMPLE_PS_MAIN_H_ #define PPAPI_SIMPLE_PS_MAIN_H_ #include "ppapi_simple/ps.h" #include "ppapi_simple/ps_event.h" EXTERN_C_BEGIN typedef int (*PSMainFunc_t)(int argc, char *argv[]); /** * PSUserMainGet * * Prototype for the user provided function which retrieves the user's main * function. * This is normally defined using the PPAPI_SIMPLE_REGISTER_MAIN macro. */ PSMainFunc_t PSUserMainGet(); /** * PPAPI_SIMPLE_REGISTER_MAIN * * Constructs a PSInstance object and configures it to use call the provided * 'main' function on its own thread once initialization is complete. * * The ps_entrypoint_*.o and ps_main.o objects will not be linked by default, * so we force them to be linked here. */ #define PPAPI_SIMPLE_REGISTER_MAIN(main_func) \ PSMainFunc_t PSUserMainGet() { return main_func; } EXTERN_C_END #endif /* PPAPI_SIMPLE_PS_MAIN_H_ */
361
4,573
<reponame>compose-x/troposphere<filename>examples/WaitObject.py # Converted from WaitObject.template located at: # http://aws.amazon.com/cloudformation/aws-cloudformation-templates/ from troposphere import GetAtt, Output, Ref, Template from troposphere.cloudformation import WaitCondition, WaitConditionHandle t = Template() t.set_description( "Example template showing how the WaitCondition and WaitConditionHandle " "are configured. With this template, the stack will not complete until " "either the WaitCondition timeout occurs, or you manually signal the " "WaitCondition object using the URL created by the WaitConditionHandle. " "You can use CURL or some other equivalent mechanism to signal the " "WaitCondition. To find the URL, use cfn-describe-stack-resources or " "the AWS Management Console to display the PhysicalResourceId of the " "WaitConditionHandle - this is the URL to use to signal. For details of " "the signal request see the AWS CloudFormation User Guide at " "http://docs.amazonwebservices.com/AWSCloudFormation/latest/UserGuide/" ) mywaithandle = t.add_resource(WaitConditionHandle("myWaitHandle")) mywaitcondition = t.add_resource( WaitCondition( "myWaitCondition", Handle=Ref(mywaithandle), Timeout="300", ) ) t.add_output( [ Output( "ApplicationData", Value=GetAtt(mywaitcondition, "Data"), Description="The data passed back as part of signalling the " "WaitCondition", ) ] ) print(t.to_json())
523
486
#ifndef CUBERT_BERTMGPU_H #define CUBERT_BERTMGPU_H #include <vector> #include <string> #include <atomic> #include <mutex> #include "cuBERT.h" #include "cuBERT/Bert.h" #include "cuBERT/tensorflow/Graph.h" namespace cuBERT { template <typename T> class BertM { public: explicit BertM(const char *model_file, size_t max_batch_size, size_t seq_length, size_t num_hidden_layers = 12, size_t num_attention_heads = 12); virtual ~BertM(); unsigned int compute(size_t batch_size, int *input_ids, int8_t *input_mask, int8_t *segment_ids, T *output, cuBERT_OutputType output_type = cuBERT_LOGITS); // output_to_float = true: // for half model, the output is always float, the method will convert half to float; // for float model, this flag is not used. unsigned int compute(size_t batch_size, int *input_ids, int8_t *input_mask, int8_t *segment_ids, cuBERT_Output *output, bool output_to_float = false); size_t seq_length; private: Graph<T> graph; std::vector<Bert<T> *> bert_instances; std::vector<std::mutex *> mutex_instances; std::atomic<uint8_t> rr; }; } #endif //CUBERT_BERTMGPU_H
769
1,761
<filename>scripts/examples/OpenMV/10-Color-Tracking/multi_color_blob_tracking.py<gh_stars>1000+ # Multi Color Blob Tracking Example # # This example shows off multi color blob tracking using the OpenMV Cam. import sensor, image, time, math # Color Tracking Thresholds (L Min, L Max, A Min, A Max, B Min, B Max) # The below thresholds track in general red/green things. You may wish to tune them... thresholds = [(30, 100, 15, 127, 15, 127), # generic_red_thresholds (30, 100, -64, -8, -32, 32), # generic_green_thresholds (0, 15, 0, 40, -80, -20)] # generic_blue_thresholds # You may pass up to 16 thresholds above. However, it's not really possible to segment any # scene with 16 thresholds before color thresholds start to overlap heavily. sensor.reset() sensor.set_pixformat(sensor.RGB565) sensor.set_framesize(sensor.QVGA) sensor.skip_frames(time = 2000) sensor.set_auto_gain(False) # must be turned off for color tracking sensor.set_auto_whitebal(False) # must be turned off for color tracking clock = time.clock() # Only blobs that with more pixels than "pixel_threshold" and more area than "area_threshold" are # returned by "find_blobs" below. Change "pixels_threshold" and "area_threshold" if you change the # camera resolution. Don't set "merge=True" becuase that will merge blobs which we don't want here. while(True): clock.tick() img = sensor.snapshot() for blob in img.find_blobs(thresholds, pixels_threshold=200, area_threshold=200): # These values depend on the blob not being circular - otherwise they will be shaky. if blob.elongation() > 0.5: img.draw_edges(blob.min_corners(), color=(255,0,0)) img.draw_line(blob.major_axis_line(), color=(0,255,0)) img.draw_line(blob.minor_axis_line(), color=(0,0,255)) # These values are stable all the time. img.draw_rectangle(blob.rect()) img.draw_cross(blob.cx(), blob.cy()) # Note - the blob rotation is unique to 0-180 only. img.draw_keypoints([(blob.cx(), blob.cy(), int(math.degrees(blob.rotation())))], size=20) print(clock.fps())
789
892
{ "schema_version": "1.2.0", "id": "GHSA-p674-c6w9-wwgg", "modified": "2022-04-30T18:11:21Z", "published": "2022-04-30T18:11:21Z", "aliases": [ "CVE-1999-0862" ], "details": "Insecure directory permissions in RPM distribution for PostgreSQL allows local users to gain privileges by reading a plaintext password file.", "severity": [ ], "affected": [ ], "references": [ { "type": "ADVISORY", "url": "https://nvd.nist.gov/vuln/detail/CVE-1999-0862" } ], "database_specific": { "cwe_ids": [ ], "severity": "LOW", "github_reviewed": false } }
265
4,868
<reponame>cjkall/parking-system package com.cf.chat.domain; import lombok.AllArgsConstructor; import lombok.Data; import lombok.NoArgsConstructor; import lombok.ToString; import org.springframework.data.annotation.Id; import org.springframework.data.mongodb.core.mapping.Document; import java.io.Serializable; @Data @ToString @AllArgsConstructor @NoArgsConstructor @Document(collection = "cf_chat_group_message") public class CfUserGroupMessage implements Serializable { @Id private String id; private String uid; private String groupId; private String messageId; private Integer status; //状态(0-未读/1-已读/2-已撤回) private Long createTime; }
246
335
<filename>P/Proliferation.json { "word": "Proliferation", "definitions": [ "the growth or production of cells by multiplication of parts.", "a rapid and often excessive spread or increase." ], "parts-of-speech": "Noun" }
88
343
// Copyright 2013 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // #ifndef SYZYGY_BLOCK_GRAPH_TRANSFORMS_FUZZING_TRANSFORM_H_ #define SYZYGY_BLOCK_GRAPH_TRANSFORMS_FUZZING_TRANSFORM_H_ #include "syzygy/block_graph/iterate.h" #include "syzygy/block_graph/transforms/iterative_transform.h" #include "syzygy/block_graph/transforms/named_transform.h" namespace block_graph { namespace transforms { typedef block_graph::BlockGraph BlockGraph; typedef block_graph::BasicBlockSubGraph BasicBlockSubGraph; // This class applies the liveness fuzzing transformation to each basic block. // // At each program point where a register has been proven dead (i.e., it has // no downstream read dependency), an instruction is inserted which modifies // the contents of the register to contain a dummy value. class LivenessFuzzingBasicBlockTransform : public block_graph::transforms::NamedBasicBlockSubGraphTransformImpl< LivenessFuzzingBasicBlockTransform> { public: LivenessFuzzingBasicBlockTransform() {} // The transform name. static const char kTransformName[]; protected: // @name BasicBlockSubGraphTransformInterface method. virtual bool TransformBasicBlockSubGraph( const TransformPolicyInterface* policy, BlockGraph* block_graph, BasicBlockSubGraph* basic_block_subgraph) override; private: DISALLOW_COPY_AND_ASSIGN(LivenessFuzzingBasicBlockTransform); }; // This transformation applied some basic block transform to validate analysis // done on subgraph. The behavior must be the same with each transformation. class FuzzingTransform : public block_graph::transforms::IterativeTransformImpl<FuzzingTransform> { public: FuzzingTransform(); // @name IterativeTransformImpl implementation. // @{ bool OnBlock(const TransformPolicyInterface* policy, BlockGraph* block_graph, BlockGraph::Block* block); // @} // The transform name. static const char kTransformName[]; private: DISALLOW_COPY_AND_ASSIGN(FuzzingTransform); }; } // namespace transforms } // namespace block_graph #endif // SYZYGY_BLOCK_GRAPH_TRANSFORMS_FUZZING_TRANSFORM_H_
809
354
/*********************************************************************************************************************** * OpenStudio(R), Copyright (c) 2008-2021, Alliance for Sustainable Energy, LLC, and other contributors. All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, are permitted provided that the * following conditions are met: * * (1) Redistributions of source code must retain the above copyright notice, this list of conditions and the following * disclaimer. * * (2) Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided with the distribution. * * (3) Neither the name of the copyright holder nor the names of any contributors may be used to endorse or promote products * derived from this software without specific prior written permission from the respective party. * * (4) Other than as required in clauses (1) and (2), distributions in any form of modifications or other derivative works * may not use the "OpenStudio" trademark, "OS", "os", or any other confusingly similar designation without specific prior * written permission from Alliance for Sustainable Energy, LLC. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) AND ANY CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER(S), ANY CONTRIBUTORS, THE UNITED STATES GOVERNMENT, OR THE UNITED * STATES DEPARTMENT OF ENERGY, NOR ANY OF THEIR EMPLOYEES, BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ***********************************************************************************************************************/ #include <gtest/gtest.h> #include "ModelFixture.hpp" #include "../CurveTriquadratic.hpp" #include <cmath> using namespace openstudio; using namespace openstudio::model; TEST_F(ModelFixture, CurveTriquadratic_DefaultConstructors) { ::testing::FLAGS_gtest_death_test_style = "threadsafe"; ASSERT_EXIT( { Model m; CurveTriquadratic curve(m); exit(0); }, ::testing::ExitedWithCode(0), ""); } TEST_F(ModelFixture, CurveTriquadratic_GetterSetters_evaluate) { Model m; CurveTriquadratic curve(m); EXPECT_FALSE(curve.coefficient1Constant()); EXPECT_FALSE(curve.coefficient2xPOW2()); EXPECT_FALSE(curve.coefficient3x()); EXPECT_FALSE(curve.coefficient4yPOW2()); EXPECT_FALSE(curve.coefficient5y()); EXPECT_FALSE(curve.coefficient6zPOW2()); EXPECT_FALSE(curve.coefficient7z()); EXPECT_FALSE(curve.coefficient8xPOW2TIMESYPOW2()); EXPECT_FALSE(curve.coefficient9xTIMESY()); EXPECT_FALSE(curve.coefficient10xTIMESYPOW2()); EXPECT_FALSE(curve.coefficient11xPOW2TIMESY()); EXPECT_FALSE(curve.coefficient12xPOW2TIMESZPOW2()); EXPECT_FALSE(curve.coefficient13xTIMESZ()); EXPECT_FALSE(curve.coefficient14xTIMESZPOW2()); EXPECT_FALSE(curve.coefficient15xPOW2TIMESZ()); EXPECT_FALSE(curve.coefficient16yPOW2TIMESZPOW2()); EXPECT_FALSE(curve.coefficient17yTIMESZ()); EXPECT_FALSE(curve.coefficient18yTIMESZPOW2()); EXPECT_FALSE(curve.coefficient19yPOW2TIMESZ()); EXPECT_FALSE(curve.coefficient20xPOW2TIMESYPOW2TIMESZPOW2()); EXPECT_FALSE(curve.coefficient21xPOW2TIMESYPOW2TIMESZ()); EXPECT_FALSE(curve.coefficient22xPOW2TIMESYTIMESZPOW2()); EXPECT_FALSE(curve.coefficient23xTIMESYPOW2TIMESZPOW2()); EXPECT_FALSE(curve.coefficient24xPOW2TIMESYTIMESZ()); EXPECT_FALSE(curve.coefficient25xTIMESYPOW2TIMESZ()); EXPECT_FALSE(curve.coefficient26xTIMESYTIMESZPOW2()); EXPECT_FALSE(curve.coefficient27xTIMESYTIMESZ()); EXPECT_FALSE(curve.minimumValueofx()); EXPECT_FALSE(curve.maximumValueofx()); EXPECT_FALSE(curve.minimumValueofy()); EXPECT_FALSE(curve.maximumValueofy()); EXPECT_FALSE(curve.minimumValueofz()); EXPECT_FALSE(curve.maximumValueofz()); double c1 = 1.0; double c2 = 2.0; double c3 = 3.0; double c4 = 4.0; double c5 = 5.0; double c6 = 6.0; double c7 = 7.0; double c8 = 8.0; double c9 = 9.0; double c10 = 10.0; double c11 = 11.0; double c12 = 12.0; double c13 = 13.0; double c14 = 14.0; double c15 = 15.0; double c16 = 16.0; double c17 = 17.0; double c18 = 18.0; double c19 = 19.0; double c20 = 20.0; double c21 = 21.0; double c22 = 22.0; double c23 = 23.0; double c24 = 24.0; double c25 = 25.0; double c26 = 26.0; double c27 = 27.0; double min_x = 0.1; double max_x = 3.0; double min_y = 4.0; double max_y = 6.0; double min_z = 8.0; double max_z = 10.0; auto calc = [=](double x, double y, double z) { return c1 + c2 * std::pow(x, 2) + c3 * x + c4 * std::pow(y, 2) + c5 * y + c6 * std::pow(z, 2) + c7 * z + c8 * std::pow(x, 2) * std::pow(y, 2) + c9 * x * y + c10 * x * std::pow(y, 2) + c11 * std::pow(x, 2) * y + c12 * std::pow(x, 2) * std::pow(z, 2) + c13 * x * z + c14 * x * std::pow(z, 2) + c15 * std::pow(x, 2) * z + c16 * std::pow(y, 2) * std::pow(z, 2) + c17 * y * z + c18 * y * std::pow(z, 2) + c19 * std::pow(y, 2) * z + c20 * std::pow(x, 2) * std::pow(y, 2) * std::pow(z, 2) + c21 * std::pow(x, 2) * std::pow(y, 2) * z + c22 * std::pow(x, 2) * y * std::pow(z, 2) + c23 * x * std::pow(y, 2) * std::pow(z, 2) + c24 * std::pow(x, 2) * y * z + c25 * x * std::pow(y, 2) * z + c26 * x * y * std::pow(z, 2) + c27 * x * y * z; }; EXPECT_TRUE(curve.setCoefficient1Constant(c1)); EXPECT_TRUE(curve.setCoefficient2xPOW2(c2)); EXPECT_TRUE(curve.setCoefficient3x(c3)); EXPECT_TRUE(curve.setCoefficient4yPOW2(c4)); EXPECT_TRUE(curve.setCoefficient5y(c5)); EXPECT_TRUE(curve.setCoefficient6zPOW2(c6)); EXPECT_TRUE(curve.setCoefficient7z(c7)); EXPECT_TRUE(curve.setCoefficient8xPOW2TIMESYPOW2(c8)); EXPECT_TRUE(curve.setCoefficient9xTIMESY(c9)); EXPECT_TRUE(curve.setCoefficient10xTIMESYPOW2(c10)); EXPECT_TRUE(curve.setCoefficient11xPOW2TIMESY(c11)); EXPECT_TRUE(curve.setCoefficient12xPOW2TIMESZPOW2(c12)); EXPECT_TRUE(curve.setCoefficient13xTIMESZ(c13)); EXPECT_TRUE(curve.setCoefficient14xTIMESZPOW2(c14)); EXPECT_TRUE(curve.setCoefficient15xPOW2TIMESZ(c15)); EXPECT_TRUE(curve.setCoefficient16yPOW2TIMESZPOW2(c16)); EXPECT_TRUE(curve.setCoefficient17yTIMESZ(c17)); EXPECT_TRUE(curve.setCoefficient18yTIMESZPOW2(c18)); EXPECT_TRUE(curve.setCoefficient19yPOW2TIMESZ(c19)); EXPECT_TRUE(curve.setCoefficient20xPOW2TIMESYPOW2TIMESZPOW2(c20)); EXPECT_TRUE(curve.setCoefficient21xPOW2TIMESYPOW2TIMESZ(c21)); EXPECT_TRUE(curve.setCoefficient22xPOW2TIMESYTIMESZPOW2(c22)); EXPECT_TRUE(curve.setCoefficient23xTIMESYPOW2TIMESZPOW2(c23)); EXPECT_TRUE(curve.setCoefficient24xPOW2TIMESYTIMESZ(c24)); EXPECT_TRUE(curve.setCoefficient25xTIMESYPOW2TIMESZ(c25)); EXPECT_TRUE(curve.setCoefficient26xTIMESYTIMESZPOW2(c26)); EXPECT_TRUE(curve.setCoefficient27xTIMESYTIMESZ(c27)); ASSERT_TRUE(curve.coefficient1Constant()); ASSERT_TRUE(curve.coefficient2xPOW2()); ASSERT_TRUE(curve.coefficient3x()); ASSERT_TRUE(curve.coefficient4yPOW2()); ASSERT_TRUE(curve.coefficient5y()); ASSERT_TRUE(curve.coefficient6zPOW2()); ASSERT_TRUE(curve.coefficient7z()); ASSERT_TRUE(curve.coefficient8xPOW2TIMESYPOW2()); ASSERT_TRUE(curve.coefficient9xTIMESY()); ASSERT_TRUE(curve.coefficient10xTIMESYPOW2()); ASSERT_TRUE(curve.coefficient11xPOW2TIMESY()); ASSERT_TRUE(curve.coefficient12xPOW2TIMESZPOW2()); ASSERT_TRUE(curve.coefficient13xTIMESZ()); ASSERT_TRUE(curve.coefficient14xTIMESZPOW2()); ASSERT_TRUE(curve.coefficient15xPOW2TIMESZ()); ASSERT_TRUE(curve.coefficient16yPOW2TIMESZPOW2()); ASSERT_TRUE(curve.coefficient17yTIMESZ()); ASSERT_TRUE(curve.coefficient18yTIMESZPOW2()); ASSERT_TRUE(curve.coefficient19yPOW2TIMESZ()); ASSERT_TRUE(curve.coefficient20xPOW2TIMESYPOW2TIMESZPOW2()); ASSERT_TRUE(curve.coefficient21xPOW2TIMESYPOW2TIMESZ()); ASSERT_TRUE(curve.coefficient22xPOW2TIMESYTIMESZPOW2()); ASSERT_TRUE(curve.coefficient23xTIMESYPOW2TIMESZPOW2()); ASSERT_TRUE(curve.coefficient24xPOW2TIMESYTIMESZ()); ASSERT_TRUE(curve.coefficient25xTIMESYPOW2TIMESZ()); ASSERT_TRUE(curve.coefficient26xTIMESYTIMESZPOW2()); ASSERT_TRUE(curve.coefficient27xTIMESYTIMESZ()); EXPECT_EQ(c1, curve.coefficient1Constant().get()); EXPECT_EQ(c2, curve.coefficient2xPOW2().get()); EXPECT_EQ(c3, curve.coefficient3x().get()); EXPECT_EQ(c4, curve.coefficient4yPOW2().get()); EXPECT_EQ(c5, curve.coefficient5y().get()); EXPECT_EQ(c6, curve.coefficient6zPOW2().get()); EXPECT_EQ(c7, curve.coefficient7z().get()); EXPECT_EQ(c8, curve.coefficient8xPOW2TIMESYPOW2().get()); EXPECT_EQ(c9, curve.coefficient9xTIMESY().get()); EXPECT_EQ(c10, curve.coefficient10xTIMESYPOW2().get()); EXPECT_EQ(c11, curve.coefficient11xPOW2TIMESY().get()); EXPECT_EQ(c12, curve.coefficient12xPOW2TIMESZPOW2().get()); EXPECT_EQ(c13, curve.coefficient13xTIMESZ().get()); EXPECT_EQ(c14, curve.coefficient14xTIMESZPOW2().get()); EXPECT_EQ(c15, curve.coefficient15xPOW2TIMESZ().get()); EXPECT_EQ(c16, curve.coefficient16yPOW2TIMESZPOW2().get()); EXPECT_EQ(c17, curve.coefficient17yTIMESZ().get()); EXPECT_EQ(c18, curve.coefficient18yTIMESZPOW2().get()); EXPECT_EQ(c19, curve.coefficient19yPOW2TIMESZ().get()); EXPECT_EQ(c20, curve.coefficient20xPOW2TIMESYPOW2TIMESZPOW2().get()); EXPECT_EQ(c21, curve.coefficient21xPOW2TIMESYPOW2TIMESZ().get()); EXPECT_EQ(c22, curve.coefficient22xPOW2TIMESYTIMESZPOW2().get()); EXPECT_EQ(c23, curve.coefficient23xTIMESYPOW2TIMESZPOW2().get()); EXPECT_EQ(c24, curve.coefficient24xPOW2TIMESYTIMESZ().get()); EXPECT_EQ(c25, curve.coefficient25xTIMESYPOW2TIMESZ().get()); EXPECT_EQ(c26, curve.coefficient26xTIMESYTIMESZPOW2().get()); EXPECT_EQ(c27, curve.coefficient27xTIMESYTIMESZ().get()); // Lims EXPECT_TRUE(curve.setMinimumValueofx(min_x)); EXPECT_TRUE(curve.setMaximumValueofx(max_x)); ASSERT_TRUE(curve.minimumValueofx()); ASSERT_TRUE(curve.maximumValueofx()); EXPECT_EQ(min_x, curve.minimumValueofx().get()); EXPECT_EQ(max_x, curve.maximumValueofx().get()); EXPECT_TRUE(curve.setMinimumValueofy(min_y)); EXPECT_TRUE(curve.setMaximumValueofy(max_y)); ASSERT_TRUE(curve.minimumValueofy()); ASSERT_TRUE(curve.maximumValueofy()); EXPECT_EQ(min_y, curve.minimumValueofy().get()); EXPECT_EQ(max_y, curve.maximumValueofy().get()); EXPECT_TRUE(curve.setMinimumValueofz(min_z)); EXPECT_TRUE(curve.setMaximumValueofz(max_z)); ASSERT_TRUE(curve.minimumValueofz()); ASSERT_TRUE(curve.maximumValueofz()); EXPECT_EQ(min_z, curve.minimumValueofz().get()); EXPECT_EQ(max_z, curve.maximumValueofz().get()); EXPECT_FALSE(curve.minimumCurveOutput()); EXPECT_FALSE(curve.maximumCurveOutput()); // x, y and z in range, no output limit double x = 0.5; double y = 5.0; double z = 9.0; EXPECT_DOUBLE_EQ(calc(x, y, z), curve.evaluate(x, y, z)); EXPECT_DOUBLE_EQ(92296.75, curve.evaluate(x, y, z)); // x < min_x x = 0.05; EXPECT_DOUBLE_EQ(calc(min_x, y, z), curve.evaluate(x, y, z)); EXPECT_DOUBLE_EQ(52520.19, curve.evaluate(x, y, z)); // x > max_x x = 20.0; EXPECT_DOUBLE_EQ(calc(max_x, y, z), curve.evaluate(x, y, z)); EXPECT_DOUBLE_EQ(751098.0, curve.evaluate(x, y, z)); // y < min_y x = 0.5; y = 3.5; EXPECT_DOUBLE_EQ(calc(x, min_y, z), curve.evaluate(x, y, z)); EXPECT_DOUBLE_EQ(62231.25, curve.evaluate(x, y, z)); // y > max_y y = 40.0; EXPECT_DOUBLE_EQ(calc(x, max_y, z), curve.evaluate(x, y, z)); EXPECT_DOUBLE_EQ(128310.75, curve.evaluate(x, y, z)); // z < min_z x = 0.5; y = 5.0; z = 3.0; EXPECT_DOUBLE_EQ(calc(x, y, min_z), curve.evaluate(x, y, z)); EXPECT_DOUBLE_EQ(73991.25, curve.evaluate(x, y, z)); // z > max_z z = 40.0; EXPECT_DOUBLE_EQ(calc(x, y, max_z), curve.evaluate(x, y, z)); EXPECT_DOUBLE_EQ(112624.25, curve.evaluate(x, y, z)); // x < min_x, y < min_y, z < min_z x = -5.0; y = -5.0; z = -5.0; EXPECT_DOUBLE_EQ(calc(min_x, min_y, min_z), curve.evaluate(x, y, z)); EXPECT_DOUBLE_EQ(28346.4, curve.evaluate(x, y, z)); // x > max_x, y, z > max_y x = 10.0; y = 10.0; z = 10.0; EXPECT_DOUBLE_EQ(calc(max_x, max_y, max_z), curve.evaluate(x, y, z)); EXPECT_DOUBLE_EQ(1273160.0, curve.evaluate(x, y, z)); // Set output limits double min_output = 40000.0; double max_output = 100000.0; EXPECT_TRUE(curve.setMinimumCurveOutput(min_output)); EXPECT_TRUE(curve.setMaximumCurveOutput(max_output)); ASSERT_TRUE(curve.minimumCurveOutput()); ASSERT_TRUE(curve.maximumCurveOutput()); EXPECT_EQ(min_output, curve.minimumCurveOutput().get()); EXPECT_EQ(max_output, curve.maximumCurveOutput().get()); // out < min output EXPECT_DOUBLE_EQ(min_output, curve.evaluate(min_x, min_y, min_z)); // out > max output EXPECT_DOUBLE_EQ(max_output, curve.evaluate(max_x, max_y, max_z)); // Wrong number of arguments // EXPECT_THROW(curve.evaluate(1.0), openstudio::Exception); // EXPECT_THROW(curve.evaluate(1.0, 2.0), openstudio::Exception); }
5,906
2,092
<reponame>nabladev/ohmyform<filename>docker/nginx/start.py #!/usr/bin/python import os import subprocess #Set default port if not os.environ["PORT"]: os.environ["PORT"] = "5000" #Set default sockets port if not os.environ["SOCKET_PORT"]: os.environ["SOCKET_PORT"] = "20523" # Actual startup script if not os.path.exists("/certs/dhparam.pem") and os.environ["TLS_FLAVOR"] != "notls": os.system("openssl dhparam -out /certs/dhparam.pem 2048") if os.environ["TLS_FLAVOR"] == "letsencrypt": subprocess.Popen(["/letsencrypt.py"]) elif os.environ["TLS_FLAVOR"] == "cert": if not os.path.exists("/certs/cert.pem"): os.system("openssl req -newkey rsa:2048 -x509 -keyout /certs/key.pem -out /certs/cert.pem -days 365 -nodes -subj '/C=NA/ST=None/L=None/O=None/CN=" + os.environ["BASE_URL"] + "'") subprocess.call(["/config.py"]) os.execv("/usr/sbin/nginx", ["nginx", "-g", "daemon off;"])
386
561
/// Copyright 2021 Pinterest Inc. /// /// Licensed under the Apache License, Version 2.0 (the "License"); /// you may not use this file except in compliance with the License. /// You may obtain a copy of the License at /// /// http://www.apache.org/licenses/LICENSE-2.0 /// Unless required by applicable law or agreed to in writing, software /// distributed under the License is distributed on an "AS IS" BASIS, /// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. /// See the License for the specific language governing permissions and /// limitations under the License. // // @author <NAME> (<EMAIL>) // package com.pinterest.rocksplicator.codecs; import static org.junit.Assert.assertEquals; import com.pinterest.rocksplicator.thrift.eventhistory.LeaderEventsHistory; import org.apache.thrift.protocol.TBinaryProtocol; import org.apache.thrift.protocol.TCompactProtocol; import org.junit.Test; public class ThriftCodecTest extends CodecTestBase { @Test public void testBinaryCodec() throws Exception { ThriftCodec<LeaderEventsHistory> codec = new ThriftCodec<>(LeaderEventsHistory.class, new TBinaryProtocol.Factory()); byte[] binaryData = codec.encode(history); LeaderEventsHistory decodedHistory = codec.decode(binaryData); assertEquals(history, decodedHistory); } @Test public void testCompactCodec() throws Exception { ThriftCodec<LeaderEventsHistory> codec = new ThriftCodec<>(LeaderEventsHistory.class, new TCompactProtocol.Factory()); byte[] binaryData = codec.encode(history); LeaderEventsHistory decodedHistory = codec.decode(binaryData); assertEquals(history, decodedHistory); } }
504
1,819
<reponame>fakeNetflix/square-repo-mortar /* * Copyright 2013 Square Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.example.mortar.view; import android.content.Context; import android.util.AttributeSet; import android.widget.TextView; import mortar.dagger1support.ObjectGraphService; import com.example.mortar.screen.FriendScreen; import javax.inject.Inject; public class FriendView extends TextView { @Inject FriendScreen.Presenter presenter; public FriendView(Context context, AttributeSet attrs) { super(context, attrs); ObjectGraphService.inject(context, this); } @Override protected void onAttachedToWindow() { super.onAttachedToWindow(); presenter.takeView(this); } @Override protected void onDetachedFromWindow() { super.onDetachedFromWindow(); presenter.dropView(this); } }
395
1,521
/** * The MIT License (MIT) * * Copyright (c) 2014-2017 <NAME>, 2017-2021 Ta4j Organization & respective * authors (see AUTHORS) * * Permission is hereby granted, free of charge, to any person obtaining a copy of * this software and associated documentation files (the "Software"), to deal in * the Software without restriction, including without limitation the rights to * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of * the Software, and to permit persons to whom the Software is furnished to do so, * subject to the following conditions: * * The above copyright notice and this permission notice shall be included in all * copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ package org.ta4j.core.indicators.statistics; import static org.ta4j.core.TestUtils.assertNumEquals; import java.util.function.Function; import org.junit.Before; import org.junit.Test; import org.ta4j.core.BarSeries; import org.ta4j.core.Indicator; import org.ta4j.core.indicators.AbstractIndicatorTest; import org.ta4j.core.indicators.helpers.ClosePriceIndicator; import org.ta4j.core.mocks.MockBarSeries; import org.ta4j.core.num.Num; public class VarianceIndicatorTest extends AbstractIndicatorTest<Indicator<Num>, Num> { private BarSeries data; public VarianceIndicatorTest(Function<Number, Num> numFunction) { super(numFunction); } @Before public void setUp() { data = new MockBarSeries(numFunction, 1, 2, 3, 4, 3, 4, 5, 4, 3, 0, 9); } @Test public void varianceUsingBarCount4UsingClosePrice() { VarianceIndicator var = new VarianceIndicator(new ClosePriceIndicator(data), 4); assertNumEquals(0, var.getValue(0)); assertNumEquals(0.25, var.getValue(1)); assertNumEquals(2.0 / 3, var.getValue(2)); assertNumEquals(1.25, var.getValue(3)); assertNumEquals(0.5, var.getValue(4)); assertNumEquals(0.25, var.getValue(5)); assertNumEquals(0.5, var.getValue(6)); assertNumEquals(0.5, var.getValue(7)); assertNumEquals(0.5, var.getValue(8)); assertNumEquals(3.5, var.getValue(9)); assertNumEquals(10.5, var.getValue(10)); } @Test public void firstValueShouldBeZero() { VarianceIndicator var = new VarianceIndicator(new ClosePriceIndicator(data), 4); assertNumEquals(0, var.getValue(0)); } @Test public void varianceShouldBeZeroWhenBarCountIs1() { VarianceIndicator var = new VarianceIndicator(new ClosePriceIndicator(data), 1); assertNumEquals(0, var.getValue(3)); assertNumEquals(0, var.getValue(8)); } @Test public void varianceUsingBarCount2UsingClosePrice() { VarianceIndicator var = new VarianceIndicator(new ClosePriceIndicator(data), 2); assertNumEquals(0, var.getValue(0)); assertNumEquals(0.25, var.getValue(1)); assertNumEquals(0.25, var.getValue(2)); assertNumEquals(0.25, var.getValue(3)); assertNumEquals(2.25, var.getValue(9)); assertNumEquals(20.25, var.getValue(10)); } }
1,310
312
#include <occa/defines.hpp> #ifndef OCCA_INTERNAL_MODES_HIP_POLYFILL_HEADER #define OCCA_INTERNAL_MODES_HIP_POLYFILL_HEADER #if OCCA_HIP_ENABLED # include <hip/hip_runtime_api.h> #else #include <sys/types.h> #undef minor #undef major // Wrap in the occa namespace so as long as we don't use ::hipModule_t, the two // - hipModule_t // - occa::hipModule_t // are indistinguisable inside the occa namespace namespace occa { //---[ Types ]------------------------ typedef struct _hipCtx_t* hipCtx_t; typedef int hipDevice_t; typedef void* hipDeviceptr_t; typedef struct _hipEvent_t* hipEvent_t; typedef struct _hipFunction_t* hipFunction_t; typedef struct _hipFunctionAttribute_t* hipFunctionAttribute_t; typedef struct _hipModule_t* hipModule_t; typedef struct _hipStream_t* hipStream_t; //---[ Enums ]------------------------ static const int HIP_LAUNCH_PARAM_BUFFER_POINTER = 0; static const int HIP_LAUNCH_PARAM_BUFFER_SIZE = 0; static const int HIP_LAUNCH_PARAM_END = 0; class hipDeviceProp_t { public: char *name; size_t totalGlobalMem; int maxThreadsPerBlock; int gcnArch; char gcnArchName[256]; int major; int minor; inline hipDeviceProp_t() : name(NULL), totalGlobalMem(0), maxThreadsPerBlock(-1), gcnArch(-1), major(-1), minor(-1) {} }; enum hipError_t { hipSuccess = 0, hipErrorInvalidValue, hipErrorMemoryAllocation, hipErrorNotInitialized, hipErrorDeinitialized, hipErrorProfilerDisabled, hipErrorProfilerNotInitialized, hipErrorProfilerAlreadyStarted, hipErrorProfilerAlreadyStopped, hipErrorNoDevice, hipErrorInvalidDevice, hipErrorInvalidImage, hipErrorInvalidContext, hipErrorContextAlreadyCurrent, hipErrorMapFailed, hipErrorUnmapFailed, hipErrorArrayIsMapped, hipErrorAlreadyMapped, hipErrorNoBinaryForGpu, hipErrorAlreadyAcquired, hipErrorNotMapped, hipErrorNotMappedAsArray, hipErrorNotMappedAsPointer, hipErrorECCNotCorrectable, hipErrorUnsupportedLimit, hipErrorContextAlreadyInUse, hipErrorPeerAccessUnsupported, hipErrorInvalidSource, hipErrorFileNotFound, hipErrorSharedObjectSymbolNotFound, hipErrorSharedObjectInitFailed, hipErrorOperatingSystem, hipErrorInvalidHandle, hipErrorNotFound, hipErrorNotReady, hipErrorLaunchOutOfResources, hipErrorLaunchTimeOut, hipErrorPeerAccessAlreadyEnabled, hipErrorPeerAccessNotEnabled, hipErrorHostMemoryAlreadyRegistered, hipErrorHostMemoryNotRegistered, OCCA_HIP_IS_NOT_ENABLED }; //---[ Methods ]---------------------- inline hipError_t hipInit(unsigned int Flags) { return OCCA_HIP_IS_NOT_ENABLED; } inline hipError_t hipDriverGetVersion(int *pVersion) { return OCCA_HIP_IS_NOT_ENABLED; } inline hipError_t hipFuncGetAttribute(int *pi, hipFunctionAttribute_t attrib, hipFunction_t hfunc) { return OCCA_HIP_IS_NOT_ENABLED; } inline hipError_t hipModuleLaunchKernel(hipFunction_t f, unsigned int gridDimX, unsigned int gridDimY, unsigned int gridDimZ, unsigned int blockDimX, unsigned int blockDimY, unsigned int blockDimZ, unsigned int sharedMemBytes, hipStream_t hStream, void **kernelParams, void **extra) { return OCCA_HIP_IS_NOT_ENABLED; } // ---[ Context ]------------------- inline hipError_t hipCtxCreate(hipCtx_t *pctx, unsigned int flags, hipDevice_t dev) { return OCCA_HIP_IS_NOT_ENABLED; } inline hipError_t hipCtxDestroy(hipCtx_t ctx) { return OCCA_HIP_IS_NOT_ENABLED; } inline hipError_t hipCtxEnablePeerAccess(hipCtx_t peerContext, unsigned int Flags) { return OCCA_HIP_IS_NOT_ENABLED; } inline hipError_t hipCtxSetCurrent(hipCtx_t ctx) { return OCCA_HIP_IS_NOT_ENABLED; } // ---[ Device ]-------------------- inline hipError_t hipDeviceCanAccessPeer(int *canAccessPeer, hipDevice_t dev, hipDevice_t peerDev) { return OCCA_HIP_IS_NOT_ENABLED; } inline hipError_t hipDeviceComputeCapability(int *major, int *minor, hipDevice_t dev) { // [Deprecated] return OCCA_HIP_IS_NOT_ENABLED; } inline hipError_t hipDeviceGet(hipDevice_t *device, int ordinal) { return OCCA_HIP_IS_NOT_ENABLED; } inline hipError_t hipGetDeviceProperties(hipDeviceProp_t *deviceProps, hipDevice_t device) { return OCCA_HIP_IS_NOT_ENABLED; } inline hipError_t hipGetDeviceCount(int *count) { return OCCA_HIP_IS_NOT_ENABLED; } inline hipError_t hipDeviceGetName(char *name, int len, hipDevice_t dev) { return OCCA_HIP_IS_NOT_ENABLED; } inline hipError_t hipSetDevice(hipDevice_t device) { return OCCA_HIP_IS_NOT_ENABLED; } inline hipError_t hipDeviceTotalMem(size_t *bytes, hipDevice_t dev) { return OCCA_HIP_IS_NOT_ENABLED; } // ---[ Event ]--------------------- inline hipError_t hipEventCreate(hipEvent_t *phEvent) { return OCCA_HIP_IS_NOT_ENABLED; } inline hipError_t hipEventDestroy(hipEvent_t hEvent) { return OCCA_HIP_IS_NOT_ENABLED; } inline hipError_t hipEventElapsedTime(float *pMilliseconds, hipEvent_t hStart, hipEvent_t hEnd) { return OCCA_HIP_IS_NOT_ENABLED; } inline hipError_t hipEventRecord(hipEvent_t hEvent, hipStream_t hStream) { return OCCA_HIP_IS_NOT_ENABLED; } inline hipError_t hipEventSynchronize(hipEvent_t hEvent) { return OCCA_HIP_IS_NOT_ENABLED; } // ---[ Memory ]-------------------- inline hipError_t hipMalloc(hipDeviceptr_t *dptr, size_t bytesize) { return OCCA_HIP_IS_NOT_ENABLED; } inline hipError_t hipHostMalloc(void **pp, size_t bytesize) { return OCCA_HIP_IS_NOT_ENABLED; } inline hipError_t hipMemAllocManaged(hipDeviceptr_t *dptr, size_t bytesize, unsigned int flags) { return OCCA_HIP_IS_NOT_ENABLED; } inline hipError_t hipFree(hipDeviceptr_t dptr) { return OCCA_HIP_IS_NOT_ENABLED; } inline hipError_t hipHostFree(void *p) { return OCCA_HIP_IS_NOT_ENABLED; } inline hipError_t hipHostGetDevicePointer(hipDeviceptr_t *dptr, void *p, unsigned int Flags) { return OCCA_HIP_IS_NOT_ENABLED; } inline hipError_t hipMemPrefetchAsync(hipDeviceptr_t *dptr, size_t count, hipDevice_t dstDevice, hipStream_t hStream) { return OCCA_HIP_IS_NOT_ENABLED; } inline hipError_t hipMemcpyDtoD(hipDeviceptr_t dstDevice, const hipDeviceptr_t srcDevice, size_t ByteCount) { return OCCA_HIP_IS_NOT_ENABLED; } inline hipError_t hipMemcpyDtoDAsync(hipDeviceptr_t dstDevice, const hipDeviceptr_t srcDevice, size_t ByteCount, hipStream_t hstream) { return OCCA_HIP_IS_NOT_ENABLED; } inline hipError_t hipMemcpyDtoH(void *dstHost, const hipDeviceptr_t srcDevice, size_t ByteCount) { return OCCA_HIP_IS_NOT_ENABLED; } inline hipError_t hipMemcpyDtoHAsync(void *dstHost, const hipDeviceptr_t srcDevice, size_t ByteCount, hipStream_t hstream) { return OCCA_HIP_IS_NOT_ENABLED; } inline hipError_t hipMemcpyHtoD(hipDeviceptr_t dstDevice, const void *srcHost, size_t ByteCount) { return OCCA_HIP_IS_NOT_ENABLED; } inline hipError_t hipMemcpyHtoDAsync(hipDeviceptr_t dstDevice, const void *srcHost, size_t ByteCount, hipStream_t hstream) { return OCCA_HIP_IS_NOT_ENABLED; } inline hipError_t hipMemcpyPeer(hipDeviceptr_t dstDevice, hipCtx_t dstContext, hipDeviceptr_t srcDevice, hipCtx_t srcContext, size_t ByteCount) { return OCCA_HIP_IS_NOT_ENABLED; } inline hipError_t hipMemcpyPeerAsync(hipDeviceptr_t dstDevice, hipCtx_t dstContext, hipDeviceptr_t srcDevice, hipCtx_t srcContext, size_t ByteCount, hipStream_t hStream) { return OCCA_HIP_IS_NOT_ENABLED; } // ---[ Module ]-------------------- inline hipError_t hipModuleGetFunction(hipFunction_t *hfunc, hipModule_t hmod, const char *name) { return OCCA_HIP_IS_NOT_ENABLED; } inline hipError_t hipModuleLoad(hipModule_t *module, const char *fname) { return OCCA_HIP_IS_NOT_ENABLED; } inline hipError_t hipModuleUnload(hipModule_t hmod) { return OCCA_HIP_IS_NOT_ENABLED; } // ---[ Stream ]-------------------- inline hipError_t hipStreamCreate(hipStream_t *phStream) { return OCCA_HIP_IS_NOT_ENABLED; } inline hipError_t hipStreamDestroy(hipStream_t hStream) { return OCCA_HIP_IS_NOT_ENABLED; } inline hipError_t hipStreamSynchronize(hipStream_t hStream) { return OCCA_HIP_IS_NOT_ENABLED; } } #endif #endif
3,850
348
{"nom":"Morganx","circ":"3ème circonscription","dpt":"Landes","inscrits":163,"abs":50,"votants":113,"blancs":6,"nuls":3,"exp":104,"res":[{"nuance":"SOC","nom":"<NAME>","voix":59},{"nuance":"REM","nom":"<NAME>","voix":45}]}
88
11,356
#ifndef BOOST_CONTRACT_DETAIL_NONE_HPP_ #define BOOST_CONTRACT_DETAIL_NONE_HPP_ // Copyright (C) 2008-2018 <NAME> // Distributed under the Boost Software License, Version 1.0 (see accompanying // file LICENSE_1_0.txt or a copy at http://www.boost.org/LICENSE_1_0.txt). // See: http://www.boost.org/doc/libs/release/libs/contract/doc/html/index.html namespace boost { namespace contract { namespace detail { // Tag for "no type". struct none { // Some lib code use this to avoid unused local var warnings on #if, etc. static none& value() { static none none_value; return none_value; } }; // Transform `void` to `none` type (for convenience, instead of using MPL). template<typename T> struct none_if_void { typedef T type; }; template<> struct none_if_void<void> { typedef none type; }; } } } // namespace #endif // #include guard
308
1,553
import logging import re import sys import ssl from copy import deepcopy from time import sleep from collections import OrderedDict from requests.sessions import Session from requests.adapters import HTTPAdapter from requests.packages.urllib3.util.ssl_ import create_urllib3_context from .interpreters import JavaScriptInterpreter from .user_agent import User_Agent try: from requests_toolbelt.utils import dump except ImportError: pass try: import brotli except ImportError: pass try: from urlparse import urlparse from urlparse import urlunparse except ImportError: from urllib.parse import urlparse from urllib.parse import urlunparse ########################################################################################################################################################## __version__ = '1.1.9' BUG_REPORT = 'Cloudflare may have changed their technique, or there may be a bug in the script.' ########################################################################################################################################################## class CipherSuiteAdapter(HTTPAdapter): def __init__(self, cipherSuite=None, **kwargs): self.cipherSuite = cipherSuite if hasattr(ssl, 'PROTOCOL_TLS'): self.ssl_context = create_urllib3_context( ssl_version=getattr(ssl, 'PROTOCOL_TLSv1_3', ssl.PROTOCOL_TLSv1_2), ciphers=self.cipherSuite ) else: self.ssl_context = create_urllib3_context(ssl_version=ssl.PROTOCOL_TLSv1) super(CipherSuiteAdapter, self).__init__(**kwargs) ########################################################################################################################################################## def init_poolmanager(self, *args, **kwargs): kwargs['ssl_context'] = self.ssl_context return super(CipherSuiteAdapter, self).init_poolmanager(*args, **kwargs) ########################################################################################################################################################## def proxy_manager_for(self, *args, **kwargs): kwargs['ssl_context'] = self.ssl_context return super(CipherSuiteAdapter, self).proxy_manager_for(*args, **kwargs) ########################################################################################################################################################## class CloudScraper(Session): def __init__(self, *args, **kwargs): self.debug = kwargs.pop('debug', False) self.delay = kwargs.pop('delay', None) self.interpreter = kwargs.pop('interpreter', 'js2py') self.allow_brotli = kwargs.pop('allow_brotli', True if 'brotli' in sys.modules.keys() else False) self.cipherSuite = None super(CloudScraper, self).__init__(*args, **kwargs) if 'requests' in self.headers['User-Agent']: # Set a random User-Agent if no custom User-Agent has been set self.headers = User_Agent(allow_brotli=self.allow_brotli).headers self.mount('https://', CipherSuiteAdapter(self.loadCipherSuite())) ########################################################################################################################################################## @staticmethod def debugRequest(req): try: print(dump.dump_all(req).decode('utf-8')) except: # noqa pass ########################################################################################################################################################## def loadCipherSuite(self): if self.cipherSuite: return self.cipherSuite self.cipherSuite = '' if hasattr(ssl, 'PROTOCOL_TLS'): ciphers = [ 'ECDHE-ECDSA-AES128-GCM-SHA256', 'ECDHE-RSA-AES128-GCM-SHA256', 'ECDHE-ECDSA-AES256-GCM-SHA384', 'ECDHE-RSA-AES256-GCM-SHA384', 'ECDHE-ECDSA-CHACHA20-POLY1305-SHA256', 'ECDHE-RSA-CHACHA20-POLY1305-SHA256', 'ECDHE-RSA-AES128-CBC-SHA', 'ECDHE-RSA-AES256-CBC-SHA', 'RSA-AES128-GCM-SHA256', 'RSA-AES256-GCM-SHA384', 'ECDHE-RSA-AES128-GCM-SHA256', 'RSA-AES256-SHA', '3DES-EDE-CBC' ] if hasattr(ssl, 'PROTOCOL_TLSv1_3'): ciphers.insert(0, ['GREASE_3A', 'GREASE_6A', 'AES128-GCM-SHA256', 'AES256-GCM-SHA256', 'AES256-GCM-SHA384', 'CHACHA20-POLY1305-SHA256']) ctx = ssl.SSLContext(getattr(ssl, 'PROTOCOL_TLSv1_3', ssl.PROTOCOL_TLSv1_2)) for cipher in ciphers: try: ctx.set_ciphers(cipher) self.cipherSuite = '{}:{}'.format(self.cipherSuite, cipher).rstrip(':') except ssl.SSLError: pass return self.cipherSuite ########################################################################################################################################################## def request(self, method, url, *args, **kwargs): ourSuper = super(CloudScraper, self) resp = ourSuper.request(method, url, *args, **kwargs) if resp.headers.get('Content-Encoding') == 'br': if self.allow_brotli and resp._content: resp._content = brotli.decompress(resp.content) else: logging.warning('Brotli content detected, But option is disabled, we will not continue.') return resp # Debug request if self.debug: self.debugRequest(resp) # Check if Cloudflare anti-bot is on if self.isChallengeRequest(resp): if resp.request.method != 'GET': # Work around if the initial request is not a GET, # Supersede with a GET then re-request the original METHOD. self.request('GET', resp.url) resp = ourSuper.request(method, url, *args, **kwargs) else: # Solve Challenge resp = self.sendChallengeResponse(resp, **kwargs) return resp ########################################################################################################################################################## @staticmethod def isChallengeRequest(resp): if resp.headers.get('Server', '').startswith('cloudflare'): if b'why_captcha' in resp.content or b'/cdn-cgi/l/chk_captcha' in resp.content: raise ValueError('Captcha') return ( resp.status_code in [429, 503] and all(s in resp.content for s in [b'jschl_vc', b'jschl_answer']) ) return False ########################################################################################################################################################## def sendChallengeResponse(self, resp, **original_kwargs): body = resp.text # Cloudflare requires a delay before solving the challenge if not self.delay: try: delay = float(re.search(r'submit\(\);\r?\n\s*},\s*([0-9]+)', body).group(1)) / float(1000) if isinstance(delay, (int, float)): self.delay = delay except: # noqa pass sleep(self.delay) parsed_url = urlparse(resp.url) domain = parsed_url.netloc submit_url = '{}://{}/cdn-cgi/l/chk_jschl'.format(parsed_url.scheme, domain) cloudflare_kwargs = deepcopy(original_kwargs) try: params = OrderedDict() s = re.search(r'name="s"\svalue="(?P<s_value>[^"]+)', body) if s: params['s'] = s.group('s_value') params.update( [ ('jschl_vc', re.search(r'name="jschl_vc" value="(\w+)"', body).group(1)), ('pass', re.search(r'name="pass" value="(.+?)"', body).group(1)) ] ) params = cloudflare_kwargs.setdefault('params', params) except Exception as e: raise ValueError('Unable to parse Cloudflare anti-bots page: {} {}'.format(e.message, BUG_REPORT)) # Solve the Javascript challenge params['jschl_answer'] = JavaScriptInterpreter.dynamicImport(self.interpreter).solveChallenge(body, domain) # Requests transforms any request into a GET after a redirect, # so the redirect has to be handled manually here to allow for # performing other types of requests even as the first request. cloudflare_kwargs['allow_redirects'] = False redirect = self.request(resp.request.method, submit_url, **cloudflare_kwargs) redirect_location = urlparse(redirect.headers['Location']) if not redirect_location.netloc: redirect_url = urlunparse( ( parsed_url.scheme, domain, redirect_location.path, redirect_location.params, redirect_location.query, redirect_location.fragment ) ) return self.request(resp.request.method, redirect_url, **original_kwargs) return self.request(resp.request.method, redirect.headers['Location'], **original_kwargs) ########################################################################################################################################################## @classmethod def create_scraper(cls, sess=None, **kwargs): """ Convenience function for creating a ready-to-go CloudScraper object. """ scraper = cls(**kwargs) if sess: attrs = ['auth', 'cert', 'cookies', 'headers', 'hooks', 'params', 'proxies', 'data'] for attr in attrs: val = getattr(sess, attr, None) if val: setattr(scraper, attr, val) return scraper ########################################################################################################################################################## # Functions for integrating cloudscraper with other applications and scripts @classmethod def get_tokens(cls, url, **kwargs): scraper = cls.create_scraper( debug=kwargs.pop('debug', False), delay=kwargs.pop('delay', None), interpreter=kwargs.pop('interpreter', 'js2py'), allow_brotli=kwargs.pop('allow_brotli', True), ) try: resp = scraper.get(url, **kwargs) resp.raise_for_status() except Exception: logging.error('"{}" returned an error. Could not collect tokens.'.format(url)) raise domain = urlparse(resp.url).netloc # noinspection PyUnusedLocal cookie_domain = None for d in scraper.cookies.list_domains(): if d.startswith('.') and d in ('.{}'.format(domain)): cookie_domain = d break else: raise ValueError('Unable to find Cloudflare cookies. Does the site actually have Cloudflare IUAM ("I\'m Under Attack Mode") enabled?') return ( { '__cfduid': scraper.cookies.get('__cfduid', '', domain=cookie_domain), 'cf_clearance': scraper.cookies.get('cf_clearance', '', domain=cookie_domain) }, scraper.headers['User-Agent'] ) ########################################################################################################################################################## @classmethod def get_cookie_string(cls, url, **kwargs): """ Convenience function for building a Cookie HTTP header value. """ tokens, user_agent = cls.get_tokens(url, **kwargs) return '; '.join('='.join(pair) for pair in tokens.items()), user_agent ########################################################################################################################################################## create_scraper = CloudScraper.create_scraper get_tokens = CloudScraper.get_tokens get_cookie_string = CloudScraper.get_cookie_string
4,943
451
<reponame>kikislater/micmac /*Header-MicMac-eLiSe-25/06/2007 MicMac : Multi Image Correspondances par Methodes Automatiques de Correlation eLiSe : ELements of an Image Software Environnement www.micmac.ign.fr Copyright : Institut Geographique National Author : <NAME> Contributors : <NAME>, <NAME>. [1] <NAME>, <NAME>. "A multiresolution and optimization-based image matching approach: An application to surface reconstruction from SPOT5-HRS stereo imagery." In IAPRS vol XXXVI-1/W41 in ISPRS Workshop On Topographic Mapping From Space (With Special Emphasis on Small Satellites), Ankara, Turquie, 02-2006. [2] <NAME>, "MicMac, un lociel de mise en correspondance d'images, adapte au contexte geograhique" to appears in Bulletin d'information de l'Institut Geographique National, 2007. Francais : MicMac est un logiciel de mise en correspondance d'image adapte au contexte de recherche en information geographique. Il s'appuie sur la bibliotheque de manipulation d'image eLiSe. Il est distibue sous la licences Cecill-B. Voir en bas de fichier et http://www.cecill.info. English : MicMac is an open source software specialized in image matching for research in geographic information. MicMac is built on the eLiSe image library. MicMac is governed by the "Cecill-B licence". See below and http://www.cecill.info. Header-MicMac-eLiSe-25/06/2007*/ #ifndef _ELISE_GENERAL_CUBE_FLUX_H_ #define _ELISE_GENERAL_CUBE_FLUX_H_ class cNappe2DGen { public : INT Sz() const {return mSz;} INT YMin(INT anX) const {return mDataYMin[anX];} INT YMax(INT anX) const {return mDataYMax[anX];} INT NbObj () const {return mNbObj;} INT NbInCol(const INT & anX) const { return YMax(anX)-YMin(anX); } void Resize(INT aSz,Fonc_Num aYMin,Fonc_Num aYMax); cNappe2DGen(INT aSz,Fonc_Num aYMin,Fonc_Num aYMax); bool Inside(Pt2di aPt) const { return (aPt.x>=0) && (aPt.x<mSz) && (aPt.y>=mDataYMin[aPt.x]) && (aPt.y< mDataYMax[aPt.x]); } INT OffsetPt(Pt2di aPt) const { return mDataOffset[aPt.x] +aPt.y; } // Offset de la colone pour y=0 INT OffsetCol(INT anX) const { return mDataOffset[anX]; } private : INT mSz; Im1D_INT4 mIYMin; INT4 * mDataYMin; Im1D_INT4 mIYMax; INT4 * mDataYMax; Im1D_INT4 mIOffset; INT4 * mDataOffset; INT mNbObj; }; template <class Type> class cTplNape2D : public cNappe2DGen { public : typedef Type value_type; ~cTplNape2D() {delete [] mData;} cTplNape2D ( INT aSz, Fonc_Num aYMin, Fonc_Num aYMax, INT aNbObjMin=0 ) : cNappe2DGen(aSz,aYMin,aYMax), mNbObjMem (ElMax(NbObj(),aNbObjMin)), mData (new Type[mNbObjMem]) { } Type & El(Pt2di aPt) {return mData[OffsetPt(aPt)];} // Retourne l'adresse de la colonne pour y = 0 Type * Colum(INT anX) {return mData+OffsetCol(anX);} void Resize(INT aSz,Fonc_Num aZMin,Fonc_Num aZMax) { cNappe2DGen::Resize(aSz,aZMin,aZMax); if (NbObj() > mNbObjMem) { mNbObjMem = NbObj(); delete [] mData; mData =new Type[NbObj()]; } } private : INT mNbObjMem; Type * mData; }; class cNappe3DGen { public : Pt2di Sz() const {return mSz;} INT tx() const {return mSz.x;} INT ty() const {return mSz.y;} INT ZMin(Pt2di aP) const {return mDataZMin[aP.y][aP.x];} INT ZMax(Pt2di aP) const {return mDataZMax[aP.y][aP.x];} INT NbObj () const {return mNbObj;} Fonc_Num ZMin(){return mIZMin.in();} Fonc_Num ZMax(){return mIZMax.in();} Pt3di PMin(Pt2di aP) const {return Pt3di(aP.x,aP.y,ZMin(aP));} void Resize(Pt2di aSz,Fonc_Num aZMin,Fonc_Num aZMax); cNappe3DGen(Pt2di aSz,Fonc_Num aZMin,Fonc_Num aZMax); bool Inside(const Pt3di & aPt) const { return (aPt.x>=0) && (aPt.y>=0) && (aPt.x<mSz.x) && (aPt.y<mSz.y) && (aPt.z>=mDataZMin[aPt.y][aPt.x]) && (aPt.z< mDataZMax[aPt.y][aPt.x]); } bool ZInside(const Pt3di & aPt) const { return (aPt.z>=mDataZMin[aPt.y][aPt.x]) && (aPt.z< mDataZMax[aPt.y][aPt.x]); } bool Inside(const Pt2di & aPt) const { return (aPt.x>=0) && (aPt.y>=0) && (aPt.x<mSz.x) && (aPt.y<mSz.y); } INT OffsetPt(const Pt3di & aPt) const { return mDataOffset[aPt.y][aPt.x] +aPt.z; } INT OffsetPMin(const Pt2di & aPt) const { return mDataOffset[aPt.y][aPt.x] +ZMin(aPt); } INT OffsetPMax(const Pt2di & aPt) const { return mDataOffset[aPt.y][aPt.x] +ZMax(aPt)-1; } INT NbInCol(const Pt2di & aPt) const { return ZMax(aPt)-ZMin(aPt); } private : Pt2di mSz; Im2D_INT2 mIZMin; INT2 * mLineZMin; INT2 ** mDataZMin; Im2D_INT2 mIZMax; INT2 * mLineZMax; INT2 ** mDataZMax; Im2D_INT4 mIOffset; INT4 * mLineOffset; INT4 ** mDataOffset; INT mNbObj; }; template <class Type> class cTplNape3D : public cNappe3DGen { public : typedef Type value_type; ~cTplNape3D() {delete [] mData;} cTplNape3D ( Pt2di aSz, Fonc_Num aZMin, Fonc_Num aZMax, INT aNbObjMin=0 ) : cNappe3DGen(aSz,aZMin,aZMax), mNbObjMem (ElMax(NbObj(),aNbObjMin)), mData (new Type[mNbObjMem]) { } // const Type& El(Pt3di aPt) const {return *(mData+OffsetPt(aPt));} Type & El(const Pt3di & aPt) {return mData[OffsetPt(aPt)];} Type & El0(const Pt2di & aPt) {return mData[OffsetPMin(aPt)];} Type & El1(const Pt2di & aPt) {return mData[OffsetPMax(aPt)];} Type & El(INT aK) {return mData[aK];} void Resize(Pt2di aSz,Fonc_Num aZMin,Fonc_Num aZMax) { cNappe3DGen::Resize(aSz,aZMin,aZMax); if (NbObj() > mNbObjMem) { mNbObjMem = NbObj(); delete [] mData ; mData =new Type[NbObj()]; } } private : INT mNbObjMem; Type * mData; cTplNape3D(const cTplNape3D&); }; struct cCoxAlgoGenRelVois{ cCoxAlgoGenRelVois(Pt3di,bool,bool,INT,bool,INT); Pt3di mPt; bool mVert; bool mDirect; INT mNumFlow; bool mV4; INT mSym; } ; class cCoxAlgoGen { public : cCoxAlgoGen ( INT aCostRegul, INT aCostV8, Pt2di aSz ); // protected : static const cCoxAlgoGenRelVois TheVois[10]; void NewOrder(Pt2di aP0,Pt2di aP1); INT mCostV4; INT mCostV8; INT mOrder; INT mDx; INT mDy; INT mX0; INT mX1; INT mY0; INT mY1; }; class cLineMapRect { public : typedef std::vector<Pt2di> tContPts; const tContPts * Next(); ~cLineMapRect(); cLineMapRect(Pt2di SzMax); void Init(Pt2di u,Pt2di p0,Pt2di p1); private : cLineMapRect(const cLineMapRect &); // Non Def class Line_Map_Rect_Comp * mPLMRP; tContPts mCPts; }; class cInterfaceCoxAlgo { public : virtual void SetCost(Pt3di aP, INT aCost) = 0; virtual INT PccMaxFlow() =0; virtual INT NbChem() =0; virtual void Reinit() =0; virtual ~cInterfaceCoxAlgo(); virtual Im2D_INT2 Sol(INT aDef) =0; static cInterfaceCoxAlgo * StdNewOne ( Pt2di aSz, Fonc_Num aZMin, Fonc_Num aZMax, INT aCostRegul, bool V8 = false ) ; }; Im2D_INT2 TestCoxRoy ( INT aSzV, Im2D_U_INT1 imG1, Im2D_U_INT1 imG2, Pt2di aP0, Pt2di aP1, INT aParMin, INT aParMax ); #endif // _ELISE_GENERAL_CUBE_FLUX_H_ /*Footer-MicMac-eLiSe-25/06/2007 Ce logiciel est un programme informatique servant à la mise en correspondances d'images pour la reconstruction du relief. Ce logiciel est régi par la licence CeCILL-B soumise au droit français et respectant les principes de diffusion des logiciels libres. Vous pouvez utiliser, modifier et/ou redistribuer ce programme sous les conditions de la licence CeCILL-B telle que diffusée par le CEA, le CNRS et l'INRIA sur le site "http://www.cecill.info". En contrepartie de l'accessibilité au code source et des droits de copie, de modification et de redistribution accordés par cette licence, il n'est offert aux utilisateurs qu'une garantie limitée. Pour les mêmes raisons, seule une responsabilité restreinte pèse sur l'auteur du programme, le titulaire des droits patrimoniaux et les concédants successifs. A cet égard l'attention de l'utilisateur est attirée sur les risques associés au chargement, à l'utilisation, à la modification et/ou au développement et à la reproduction du logiciel par l'utilisateur étant donné sa spécificité de logiciel libre, qui peut le rendre complexe à manipuler et qui le réserve donc à des développeurs et des professionnels avertis possédant des connaissances informatiques approfondies. Les utilisateurs sont donc invités à charger et tester l'adéquation du logiciel à leurs besoins dans des conditions permettant d'assurer la sécurité de leurs systèmes et ou de leurs données et, plus généralement, à l'utiliser et l'exploiter dans les mêmes conditions de sécurité. Le fait que vous puissiez accéder à cet en-tête signifie que vous avez pris connaissance de la licence CeCILL-B, et que vous en avez accepté les termes. Footer-MicMac-eLiSe-25/06/2007*/
5,452
575
/* * Copyright (C) 2012, Google Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH * DAMAGE. */ #include "third_party/blink/renderer/modules/webaudio/offline_audio_context.h" #include "base/metrics/histogram_functions.h" #include "third_party/blink/public/platform/platform.h" #include "third_party/blink/renderer/bindings/modules/v8/v8_offline_audio_context_options.h" #include "third_party/blink/renderer/core/dom/document.h" #include "third_party/blink/renderer/core/dom/dom_exception.h" #include "third_party/blink/renderer/core/execution_context/execution_context.h" #include "third_party/blink/renderer/core/frame/local_dom_window.h" #include "third_party/blink/renderer/modules/webaudio/audio_listener.h" #include "third_party/blink/renderer/modules/webaudio/deferred_task_handler.h" #include "third_party/blink/renderer/modules/webaudio/offline_audio_completion_event.h" #include "third_party/blink/renderer/modules/webaudio/offline_audio_destination_node.h" #include "third_party/blink/renderer/platform/audio/audio_utilities.h" #include "third_party/blink/renderer/platform/bindings/exception_messages.h" #include "third_party/blink/renderer/platform/bindings/exception_state.h" #include "third_party/blink/renderer/platform/bindings/script_state.h" #include "third_party/blink/renderer/platform/heap/heap.h" #include "third_party/blink/renderer/platform/wtf/cross_thread_functional.h" namespace blink { OfflineAudioContext* OfflineAudioContext::Create( ExecutionContext* context, unsigned number_of_channels, unsigned number_of_frames, float sample_rate, ExceptionState& exception_state) { // FIXME: add support for workers. auto* window = DynamicTo<LocalDOMWindow>(context); if (!window) { exception_state.ThrowDOMException(DOMExceptionCode::kNotSupportedError, "Workers are not supported."); return nullptr; } if (context->IsContextDestroyed()) { exception_state.ThrowDOMException( DOMExceptionCode::kNotSupportedError, "Cannot create OfflineAudioContext on a detached context."); return nullptr; } if (!number_of_frames) { exception_state.ThrowDOMException( DOMExceptionCode::kNotSupportedError, ExceptionMessages::IndexExceedsMinimumBound<unsigned>( "number of frames", number_of_frames, 1)); return nullptr; } if (number_of_channels == 0 || number_of_channels > BaseAudioContext::MaxNumberOfChannels()) { exception_state.ThrowDOMException( DOMExceptionCode::kNotSupportedError, ExceptionMessages::IndexOutsideRange<unsigned>( "number of channels", number_of_channels, 1, ExceptionMessages::kInclusiveBound, BaseAudioContext::MaxNumberOfChannels(), ExceptionMessages::kInclusiveBound)); return nullptr; } if (!audio_utilities::IsValidAudioBufferSampleRate(sample_rate)) { exception_state.ThrowDOMException( DOMExceptionCode::kNotSupportedError, ExceptionMessages::IndexOutsideRange( "sampleRate", sample_rate, audio_utilities::MinAudioBufferSampleRate(), ExceptionMessages::kInclusiveBound, audio_utilities::MaxAudioBufferSampleRate(), ExceptionMessages::kInclusiveBound)); return nullptr; } OfflineAudioContext* audio_context = MakeGarbageCollected<OfflineAudioContext>( window->document(), number_of_channels, number_of_frames, sample_rate, exception_state); audio_context->UpdateStateIfNeeded(); #if DEBUG_AUDIONODE_REFERENCES fprintf(stderr, "[%16p]: OfflineAudioContext::OfflineAudioContext()\n", audio_context); #endif return audio_context; } OfflineAudioContext* OfflineAudioContext::Create( ExecutionContext* context, const OfflineAudioContextOptions* options, ExceptionState& exception_state) { OfflineAudioContext* offline_context = Create(context, options->numberOfChannels(), options->length(), options->sampleRate(), exception_state); return offline_context; } OfflineAudioContext::OfflineAudioContext(Document* document, unsigned number_of_channels, uint32_t number_of_frames, float sample_rate, ExceptionState& exception_state) : BaseAudioContext(document, kOfflineContext), is_rendering_started_(false), total_render_frames_(number_of_frames) { destination_node_ = OfflineAudioDestinationNode::Create( this, number_of_channels, number_of_frames, sample_rate); Initialize(); } OfflineAudioContext::~OfflineAudioContext() { #if DEBUG_AUDIONODE_REFERENCES fprintf(stderr, "[%16p]: OfflineAudioContext::~OfflineAudioContext()\n", this); #endif } void OfflineAudioContext::Trace(Visitor* visitor) const { visitor->Trace(complete_resolver_); visitor->Trace(scheduled_suspends_); BaseAudioContext::Trace(visitor); } ScriptPromise OfflineAudioContext::startOfflineRendering( ScriptState* script_state, ExceptionState& exception_state) { DCHECK(IsMainThread()); // Calling close() on an OfflineAudioContext is not supported/allowed, // but it might well have been stopped by its execution context. // // See: crbug.com/435867 if (IsContextClosed()) { exception_state.ThrowDOMException( DOMExceptionCode::kInvalidStateError, "cannot call startRendering on an OfflineAudioContext in a stopped " "state."); return ScriptPromise(); } // If the context is not in the suspended state (i.e. running), reject the // promise. if (ContextState() != AudioContextState::kSuspended) { exception_state.ThrowDOMException( DOMExceptionCode::kInvalidStateError, "cannot startRendering when an OfflineAudioContext is " + state()); return ScriptPromise(); } // Can't call startRendering more than once. Return a rejected promise now. if (is_rendering_started_) { exception_state.ThrowDOMException( DOMExceptionCode::kInvalidStateError, "cannot call startRendering more than once"); return ScriptPromise(); } DCHECK(!is_rendering_started_); complete_resolver_ = MakeGarbageCollected<ScriptPromiseResolver>(script_state); // Allocate the AudioBuffer to hold the rendered result. float sample_rate = DestinationHandler().SampleRate(); unsigned number_of_channels = DestinationHandler().NumberOfChannels(); AudioBuffer* render_target = AudioBuffer::CreateUninitialized( number_of_channels, total_render_frames_, sample_rate); if (!render_target) { exception_state.ThrowDOMException( DOMExceptionCode::kNotSupportedError, "startRendering failed to create AudioBuffer(" + String::Number(number_of_channels) + ", " + String::Number(total_render_frames_) + ", " + String::Number(sample_rate) + ")"); return ScriptPromise(); } // Start rendering and return the promise. is_rendering_started_ = true; SetContextState(kRunning); static_cast<OfflineAudioDestinationNode*>(destination()) ->SetDestinationBuffer(render_target); DestinationHandler().InitializeOfflineRenderThread(render_target); DestinationHandler().StartRendering(); return complete_resolver_->Promise(); } ScriptPromise OfflineAudioContext::suspendContext(ScriptState* script_state, double when) { DCHECK(IsMainThread()); auto* resolver = MakeGarbageCollected<ScriptPromiseResolver>(script_state); ScriptPromise promise = resolver->Promise(); // If the rendering is finished, reject the promise. if (ContextState() == AudioContextState::kClosed) { resolver->Reject(MakeGarbageCollected<DOMException>( DOMExceptionCode::kInvalidStateError, "the rendering is already finished")); return promise; } // The specified suspend time is negative; reject the promise. if (when < 0) { resolver->Reject(MakeGarbageCollected<DOMException>( DOMExceptionCode::kInvalidStateError, "negative suspend time (" + String::Number(when) + ") is not allowed")); return promise; } // The suspend time should be earlier than the total render frame. If the // requested suspension time is equal to the total render frame, the promise // will be rejected. double total_render_duration = total_render_frames_ / sampleRate(); if (total_render_duration <= when) { resolver->Reject(MakeGarbageCollected<DOMException>( DOMExceptionCode::kInvalidStateError, "cannot schedule a suspend at " + String::NumberToStringECMAScript(when) + " seconds because it is greater than " "or equal to the total " "render duration of " + String::Number(total_render_frames_) + " frames (" + String::NumberToStringECMAScript(total_render_duration) + " seconds)")); return promise; } // Find the sample frame and round up to the nearest render quantum // boundary. This assumes the render quantum is a power of two. size_t frame = when * sampleRate(); frame = GetDeferredTaskHandler().RenderQuantumFrames() * ((frame + GetDeferredTaskHandler().RenderQuantumFrames() - 1) / GetDeferredTaskHandler().RenderQuantumFrames()); // The specified suspend time is in the past; reject the promise. if (frame < CurrentSampleFrame()) { size_t current_frame_clamped = std::min(CurrentSampleFrame(), static_cast<size_t>(length())); double current_time_clamped = std::min(currentTime(), length() / static_cast<double>(sampleRate())); resolver->Reject(MakeGarbageCollected<DOMException>( DOMExceptionCode::kInvalidStateError, "suspend(" + String::Number(when) + ") failed to suspend at frame " + String::Number(frame) + " because it is earlier than the current " + "frame of " + String::Number(current_frame_clamped) + " (" + String::Number(current_time_clamped) + " seconds)")); return promise; } // Wait until the suspend map is available for the insertion. Here we should // use GraphAutoLocker because it locks the graph from the main thread. GraphAutoLocker locker(this); // If there is a duplicate suspension at the same quantized frame, // reject the promise. if (scheduled_suspends_.Contains(frame)) { resolver->Reject(MakeGarbageCollected<DOMException>( DOMExceptionCode::kInvalidStateError, "cannot schedule more than one suspend at frame " + String::Number(frame) + " (" + String::Number(when) + " seconds)")); return promise; } scheduled_suspends_.insert(frame, resolver); return promise; } ScriptPromise OfflineAudioContext::resumeContext(ScriptState* script_state) { DCHECK(IsMainThread()); auto* resolver = MakeGarbageCollected<ScriptPromiseResolver>(script_state); ScriptPromise promise = resolver->Promise(); // If the rendering has not started, reject the promise. if (!is_rendering_started_) { resolver->Reject(MakeGarbageCollected<DOMException>( DOMExceptionCode::kInvalidStateError, "cannot resume an offline context that has not started")); return promise; } // If the context is in a closed state or it really is closed (cleared), // reject the promise. if (IsContextClosed()) { resolver->Reject(MakeGarbageCollected<DOMException>( DOMExceptionCode::kInvalidStateError, "cannot resume a closed offline context")); return promise; } // If the context is already running, resolve the promise without altering // the current state or starting the rendering loop. if (ContextState() == AudioContextState::kRunning) { resolver->Resolve(); return promise; } DCHECK_EQ(ContextState(), AudioContextState::kSuspended); // If the context is suspended, resume rendering by setting the state to // "Running". and calling startRendering(). Note that resuming is possible // only after the rendering started. SetContextState(kRunning); DestinationHandler().StartRendering(); // Resolve the promise immediately. resolver->Resolve(); return promise; } void OfflineAudioContext::FireCompletionEvent() { DCHECK(IsMainThread()); // Context is finished, so remove any tail processing nodes; there's nowhere // for the output to go. GetDeferredTaskHandler().FinishTailProcessing(); // We set the state to closed here so that the oncomplete event handler sees // that the context has been closed. SetContextState(kClosed); // Avoid firing the event if the document has already gone away. if (GetExecutionContext()) { AudioBuffer* rendered_buffer = static_cast<OfflineAudioDestinationNode*>(destination()) ->DestinationBuffer(); DCHECK(rendered_buffer); if (!rendered_buffer) return; // Call the offline rendering completion event listener and resolve the // promise too. DispatchEvent(*OfflineAudioCompletionEvent::Create(rendered_buffer)); complete_resolver_->Resolve(rendered_buffer); } else { // The resolver should be rejected when the execution context is gone. complete_resolver_->Reject(MakeGarbageCollected<DOMException>( DOMExceptionCode::kInvalidStateError, "the execution context does not exist")); } is_rendering_started_ = false; PerformCleanupOnMainThread(); } bool OfflineAudioContext::HandlePreRenderTasks( const AudioIOPosition* output_position, const AudioCallbackMetric* metric) { // TODO(hongchan, rtoy): passing |nullptr| as an argument is not a good // pattern. Consider rewriting this method/interface. DCHECK_EQ(output_position, nullptr); DCHECK_EQ(metric, nullptr); DCHECK(IsAudioThread()); // OfflineGraphAutoLocker here locks the audio graph for this scope. Note // that this locker does not use tryLock() inside because the timing of // suspension MUST NOT be delayed. OfflineGraphAutoLocker locker(this); // Update the dirty state of the listener. listener()->UpdateState(); GetDeferredTaskHandler().HandleDeferredTasks(); HandleStoppableSourceNodes(); return ShouldSuspend(); } void OfflineAudioContext::HandlePostRenderTasks() { DCHECK(IsAudioThread()); // OfflineGraphAutoLocker here locks the audio graph for the same reason // above in |handlePreOfflineRenderTasks|. { OfflineGraphAutoLocker locker(this); GetDeferredTaskHandler().BreakConnections(); GetDeferredTaskHandler().HandleDeferredTasks(); GetDeferredTaskHandler().RequestToDeleteHandlersOnMainThread(); } } OfflineAudioDestinationHandler& OfflineAudioContext::DestinationHandler() { return static_cast<OfflineAudioDestinationHandler&>( destination()->GetAudioDestinationHandler()); } void OfflineAudioContext::ResolveSuspendOnMainThread(size_t frame) { DCHECK(IsMainThread()); // Suspend the context first. This will fire onstatechange event. SetContextState(kSuspended); // Wait until the suspend map is available for the removal. GraphAutoLocker locker(this); // If the context is going away, m_scheduledSuspends could have had all its // entries removed. Check for that here. if (scheduled_suspends_.size()) { // |frame| must exist in the map. DCHECK(scheduled_suspends_.Contains(frame)); SuspendMap::iterator it = scheduled_suspends_.find(frame); it->value->Resolve(); scheduled_suspends_.erase(it); } } void OfflineAudioContext::RejectPendingResolvers() { DCHECK(IsMainThread()); // Wait until the suspend map is available for removal. GraphAutoLocker locker(this); // Offline context is going away so reject any promises that are still // pending. for (auto& pending_suspend_resolver : scheduled_suspends_) { pending_suspend_resolver.value->Reject(MakeGarbageCollected<DOMException>( DOMExceptionCode::kInvalidStateError, "Audio context is going away")); } scheduled_suspends_.clear(); DCHECK_EQ(resume_resolvers_.size(), 0u); RejectPendingDecodeAudioDataResolvers(); } bool OfflineAudioContext::IsPullingAudioGraph() const { DCHECK(IsMainThread()); // For an offline context, we're rendering only while the context is running. // Unlike an AudioContext, there's no audio device that keeps pulling on graph // after the context has finished rendering. return ContextState() == BaseAudioContext::kRunning; } bool OfflineAudioContext::ShouldSuspend() { DCHECK(IsAudioThread()); // Note that the GraphLock is required before this check. Since this needs // to run on the audio thread, OfflineGraphAutoLocker must be used. if (scheduled_suspends_.Contains(CurrentSampleFrame())) return true; return false; } bool OfflineAudioContext::HasPendingActivity() const { return is_rendering_started_; } } // namespace blink
6,028
1,968
<gh_stars>1000+ ////////////////////////////////////////////////////////////////////////////// // // This file is part of the Corona game engine. // For overview and more information on licensing please refer to README.md // Home page: https://github.com/coronalabs/corona // Contact: <EMAIL> // ////////////////////////////////////////////////////////////////////////////// #pragma once #include "resource.h" class CAboutDlg : public CDialog { // DECLARE_DYNAMIC(CPreferencesDlg) public: CAboutDlg(CWnd* pParent = NULL); ~CAboutDlg(); // Dialog Data enum { IDD = IDD_ABOUTBOX }; protected: virtual void DoDataExchange(CDataExchange* pDX); // DDX/DDV support virtual BOOL OnInitDialog(); // Implementation protected: DECLARE_MESSAGE_MAP() public: afx_msg void OnClickLinkWebsite(NMHDR *pNMHDR, LRESULT *pResult); };
270
1,273
package org.broadinstitute.hellbender.utils.genotyper; import org.broadinstitute.hellbender.utils.collections.IndexedSet; import java.util.Collection; /** * Simple implementation of a sample-list using an indexed-set. * * @author <NAME> &lt;<EMAIL>&gt; */ public final class IndexedSampleList implements SampleList { private final IndexedSet<String> samples; /** * Constructs an empty sample-list. */ public IndexedSampleList() { samples = new IndexedSet<>(0); } /** * Constructs a sample-list from a collection of samples. * * <p> * Repeats in the input collection are ignored (just the first occurrence is kept). * Sample names will be sorted based on the traversal order * of the original collection. * </p> * * @param samples input sample collection. * * @throws IllegalArgumentException if {@code samples} is {@code null} or it contains {@code nulls}. */ public IndexedSampleList(final Collection<String> samples) { //note: no checking here - IndexedSet constructor does it this.samples = new IndexedSet<>(samples); } /** * Constructs a sample-list from an array of samples. * * <p> * Repeats in the input array are ignored (just the first occurrence is kept). * Sample names will be sorted based on the traversal order * of the original array. * </p> * * @param samples input sample array. * * @throws IllegalArgumentException if {@code samples} is {@code null} or it contains {@code nulls}. */ public IndexedSampleList(final String... samples) { //note: no checking here - IndexedSet constructor does it this.samples = new IndexedSet<>(samples); } @Override public int numberOfSamples() { return samples.size(); } @Override public int indexOfSample(final String sample) { return samples.indexOf(sample); } @Override public String getSample(final int sampleIndex) { return samples.get(sampleIndex); } }
772
375
<reponame>obrienben/openwayback<filename>wayback-core/src/main/java/org/archive/wayback/resourcestore/locationdb/ResourceFileLocationDBUpdater.java /* * This file is part of the Wayback archival access software * (http://archive-access.sourceforge.net/projects/wayback/). * * Licensed to the Internet Archive (IA) by one or more individual * contributors. * * The IA licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.archive.wayback.resourcestore.locationdb; import java.io.File; import java.io.IOException; import java.util.Iterator; import java.util.logging.Logger; import org.archive.wayback.Shutdownable; import org.archive.wayback.resourcestore.resourcefile.ResourceFileList; import org.archive.wayback.resourcestore.resourcefile.ResourceFileLocation; import org.archive.wayback.util.DirMaker; /** * Class which performs updates on a ResourceFileLocationDB, based on files * appearing in a incoming directory. When files are noticed in the "incoming" * directory, they are assumed to be in the format serialized by * org.archive.wayback.resourcestore.resourcefile.ResourceFileList * * These files are synchronized with the ResourceFileLocationDB, and deleted. * * Each file has a logical name, which is assumed to uniquely identify a * ResourceFileSource. As an optimization, the last state of each * ResouceFileSource is kept in a file under the "state" directory. * * This allows this class to compute a difference of the last state with the * new files in incoming, and only deltas: new files, removed files, * and possibly moved files, need to applied to the ResourceFileLocationDB. * * @author brad * @version $Date$, $Revision$ */ public class ResourceFileLocationDBUpdater implements Shutdownable { private static final Logger LOGGER = Logger.getLogger(ResourceFileLocationDBUpdater.class.getName()); private ResourceFileLocationDB db = null; private File stateDir = null; private File incomingDir = null; private UpdateThread thread = null; private long interval = 120000; public final static String TMP_SUFFIX = ".TMP"; public void init() { if(interval > 0) { thread = new UpdateThread(this,interval); thread.start(); } } public void shutdown() { if(thread != null) { thread.interrupt(); try { thread.join(1000); } catch (InterruptedException e) { e.printStackTrace(); } } } public int synchronizeIncoming() throws IOException { File[] updates = incomingDir.listFiles(); int updated = 0; for(File update : updates) { if(update.getName().endsWith(TMP_SUFFIX)) { continue; } if(synchronize(update)) { updated++; } } return updated; } public boolean synchronize(File update) throws IOException { String name = update.getName(); File current = new File(stateDir,name); if(!current.isFile()) { current.createNewFile(); } ResourceFileList updateFL = ResourceFileList.load(update); ResourceFileList currentFL = ResourceFileList.load(current); boolean updated = false; ResourceFileList removedFiles = currentFL.subtract(updateFL); ResourceFileList addedFiles = updateFL.subtract(currentFL); Iterator<ResourceFileLocation> addedItr = addedFiles.iterator(); Iterator<ResourceFileLocation> removedItr = removedFiles.iterator(); while(addedItr.hasNext()) { updated = true; ResourceFileLocation location = addedItr.next(); LOGGER.info("Added " + location.getName() + " " + location.getUrl()); db.addNameUrl(location.getName(), location.getUrl()); } while(removedItr.hasNext()) { updated = true; ResourceFileLocation location = removedItr.next(); LOGGER.info("Removed " + location.getName() + " " + location.getUrl()); db.removeNameUrl(location.getName(), location.getUrl()); } if(updated) { // lastly replace the state file with the new version: if(!current.delete()) { throw new IOException("Unable to delete " + current.getAbsolutePath()); } if(!update.renameTo(current)) { throw new IOException("Unable to rename " + update.getAbsolutePath() + " to " + current.getAbsolutePath()); } } else { if(!update.delete()) { throw new IOException("Unable to delete " + update.getAbsolutePath()); } } return updated; } private class UpdateThread extends Thread { private long runInterval = 120000; private ResourceFileLocationDBUpdater updater = null; public UpdateThread(ResourceFileLocationDBUpdater updater, long runInterval) { this.updater = updater; this.runInterval = runInterval; } public void run() { LOGGER.info("ResourceFileLocationDBUpdater.UpdateThread is alive."); long sleepInterval = runInterval; while (true) { try { int updated = updater.synchronizeIncoming(); if(updated > 0) { sleepInterval = runInterval; } else { sleepInterval += runInterval; } sleep(sleepInterval); } catch (InterruptedException e) { LOGGER.info("Shutting Down."); return; } catch (IOException e) { e.printStackTrace(); } } } } /** * @return the db */ public ResourceFileLocationDB getDb() { return db; } /** * @param db the db to set */ public void setDb(ResourceFileLocationDB db) { this.db = db; } /** * @return the stateDir */ public String getStateDir() { return DirMaker.getAbsolutePath(stateDir); } /** * @param stateDir the stateDir to set * @throws IOException */ public void setStateDir(String stateDir) throws IOException { this.stateDir = DirMaker.ensureDir(stateDir); } /** * @return the incomingDir */ public String getIncomingDir() { return DirMaker.getAbsolutePath(incomingDir); } /** * @param incomingDir the incomingDir to set * @throws IOException */ public void setIncomingDir(String incomingDir) throws IOException { this.incomingDir = DirMaker.ensureDir(incomingDir); } /** * @return the interval */ public long getInterval() { return interval; } /** * @param interval the interval to set */ public void setInterval(long interval) { this.interval = interval; } }
2,263
679
/************************************************************** * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. * *************************************************************/ #ifndef TOOLKIT_ANIMATEDIMAGES_HXX #define TOOLKIT_ANIMATEDIMAGES_HXX #include "toolkit/controls/unocontrolbase.hxx" #include "toolkit/controls/unocontrolmodel.hxx" /** === begin UNO includes === **/ #include <com/sun/star/awt/XAnimation.hpp> #include <com/sun/star/awt/XAnimatedImages.hpp> #include <com/sun/star/container/XContainerListener.hpp> /** === end UNO includes === **/ #include <cppuhelper/implbase1.hxx> #include <cppuhelper/implbase2.hxx> #include <boost/scoped_ptr.hpp> //...................................................................................................................... namespace toolkit { //...................................................................................................................... //================================================================================================================== //= //================================================================================================================== typedef ::cppu::AggImplInheritanceHelper2 < UnoControlBase , ::com::sun::star::awt::XAnimation , ::com::sun::star::container::XContainerListener > AnimatedImagesControl_Base; class AnimatedImagesControl : public AnimatedImagesControl_Base { public: AnimatedImagesControl( ::com::sun::star::uno::Reference< ::com::sun::star::lang::XMultiServiceFactory > const & i_factory ); ::rtl::OUString GetComponentServiceName(); // XAnimation virtual void SAL_CALL startAnimation( ) throw (::com::sun::star::uno::RuntimeException); virtual void SAL_CALL stopAnimation( ) throw (::com::sun::star::uno::RuntimeException); virtual ::sal_Bool SAL_CALL isAnimationRunning( ) throw (::com::sun::star::uno::RuntimeException); // XServiceInfo ::rtl::OUString SAL_CALL getImplementationName( ) throw(::com::sun::star::uno::RuntimeException); ::com::sun::star::uno::Sequence< ::rtl::OUString > SAL_CALL getSupportedServiceNames() throw(::com::sun::star::uno::RuntimeException); // XControl sal_Bool SAL_CALL setModel( const ::com::sun::star::uno::Reference< ::com::sun::star::awt::XControlModel >& i_rModel ) throw ( ::com::sun::star::uno::RuntimeException ); void SAL_CALL createPeer( const ::com::sun::star::uno::Reference< ::com::sun::star::awt::XToolkit >& i_toolkit, const ::com::sun::star::uno::Reference< ::com::sun::star::awt::XWindowPeer >& i_parentPeer ) throw(::com::sun::star::uno::RuntimeException); // XContainerListener virtual void SAL_CALL elementInserted( const ::com::sun::star::container::ContainerEvent& Event ) throw (::com::sun::star::uno::RuntimeException); virtual void SAL_CALL elementRemoved( const ::com::sun::star::container::ContainerEvent& Event ) throw (::com::sun::star::uno::RuntimeException); virtual void SAL_CALL elementReplaced( const ::com::sun::star::container::ContainerEvent& Event ) throw (::com::sun::star::uno::RuntimeException); // XEventListener virtual void SAL_CALL disposing( const ::com::sun::star::lang::EventObject& i_event ) throw (::com::sun::star::uno::RuntimeException); }; //================================================================================================================== //= AnimatedImagesControlModel //================================================================================================================== struct AnimatedImagesControlModel_Data; typedef ::cppu::AggImplInheritanceHelper1 < UnoControlModel , ::com::sun::star::awt::XAnimatedImages > AnimatedImagesControlModel_Base; class AnimatedImagesControlModel : public AnimatedImagesControlModel_Base { public: AnimatedImagesControlModel( ::com::sun::star::uno::Reference< ::com::sun::star::lang::XMultiServiceFactory > const & i_factory ); AnimatedImagesControlModel( const AnimatedImagesControlModel& i_copySource ); virtual UnoControlModel* Clone() const; // XPropertySet ::com::sun::star::uno::Reference< ::com::sun::star::beans::XPropertySetInfo > SAL_CALL getPropertySetInfo( ) throw(::com::sun::star::uno::RuntimeException); // XPersistObject ::rtl::OUString SAL_CALL getServiceName() throw(::com::sun::star::uno::RuntimeException); // XServiceInfo ::rtl::OUString SAL_CALL getImplementationName( ) throw(::com::sun::star::uno::RuntimeException); ::com::sun::star::uno::Sequence< ::rtl::OUString > SAL_CALL getSupportedServiceNames() throw(::com::sun::star::uno::RuntimeException); // XAnimatedImages virtual ::sal_Int32 SAL_CALL getStepTime() throw (::com::sun::star::uno::RuntimeException); virtual void SAL_CALL setStepTime( ::sal_Int32 _steptime ) throw (::com::sun::star::uno::RuntimeException); virtual ::sal_Bool SAL_CALL getAutoRepeat() throw (::com::sun::star::uno::RuntimeException); virtual void SAL_CALL setAutoRepeat( ::sal_Bool _autorepeat ) throw (::com::sun::star::uno::RuntimeException); virtual ::sal_Int16 SAL_CALL getScaleMode() throw (::com::sun::star::uno::RuntimeException); virtual void SAL_CALL setScaleMode( ::sal_Int16 _scalemode ) throw (::com::sun::star::lang::IllegalArgumentException, ::com::sun::star::uno::RuntimeException); virtual ::sal_Int32 SAL_CALL getImageSetCount( ) throw (::com::sun::star::uno::RuntimeException); virtual ::com::sun::star::uno::Sequence< ::rtl::OUString > SAL_CALL getImageSet( ::sal_Int32 i_index ) throw (::com::sun::star::lang::IndexOutOfBoundsException, ::com::sun::star::uno::RuntimeException); virtual void SAL_CALL insertImageSet( ::sal_Int32 i_index, const ::com::sun::star::uno::Sequence< ::rtl::OUString >& i_imageURLs ) throw (::com::sun::star::lang::IndexOutOfBoundsException, ::com::sun::star::uno::RuntimeException); virtual void SAL_CALL replaceImageSet( ::sal_Int32 i_index, const ::com::sun::star::uno::Sequence< ::rtl::OUString >& i_imageURLs ) throw (::com::sun::star::lang::IndexOutOfBoundsException, ::com::sun::star::uno::RuntimeException); virtual void SAL_CALL removeImageSet( ::sal_Int32 i_index ) throw (::com::sun::star::lang::IndexOutOfBoundsException, ::com::sun::star::uno::RuntimeException); // XAnimatedImages::XContainer virtual void SAL_CALL addContainerListener( const ::com::sun::star::uno::Reference< ::com::sun::star::container::XContainerListener >& i_listener ) throw (::com::sun::star::uno::RuntimeException); virtual void SAL_CALL removeContainerListener( const ::com::sun::star::uno::Reference< ::com::sun::star::container::XContainerListener >& i_listener ) throw (::com::sun::star::uno::RuntimeException); protected: ~AnimatedImagesControlModel(); ::com::sun::star::uno::Any ImplGetDefaultValue( sal_uInt16 nPropId ) const; ::cppu::IPropertyArrayHelper& SAL_CALL getInfoHelper(); void SAL_CALL setFastPropertyValue_NoBroadcast( sal_Int32 nHandle, const ::com::sun::star::uno::Any& rValue ) throw (::com::sun::star::uno::Exception); private: ::boost::scoped_ptr< AnimatedImagesControlModel_Data > m_pData; }; //...................................................................................................................... } // namespace toolkit //...................................................................................................................... #endif // TOOLKIT_ANIMATEDIMAGES_HXX
3,034
605
<gh_stars>100-1000 //===-- atof_fuzz.cpp -----------------------------------------------------===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// /// /// Fuzzing test for llvm-libc atof implementation. /// //===----------------------------------------------------------------------===// #include "src/stdlib/atof.h" #include <stddef.h> #include <stdint.h> #include <stdlib.h> #include "fuzzing/stdlib/StringParserOutputDiff.h" extern "C" int LLVMFuzzerTestOneInput(const uint8_t *data, size_t size) { uint8_t *container = new uint8_t[size + 1]; if (!container) __builtin_trap(); size_t i; for (i = 0; i < size; ++i) container[i] = data[i]; container[size] = '\0'; // Add null terminator to container. StringParserOutputDiff<double>(&__llvm_libc::atof, &::atof, container, size); delete[] container; return 0; }
349
2,695
// Copyright (C) 2010 <NAME> (<EMAIL>) // License: Boost Software License See LICENSE.txt for the full license. #ifndef DLIB_AnY_TRAINER_H_ #define DLIB_AnY_TRAINER_H_ #include "any.h" #include "../smart_pointers.h" #include "any_decision_function.h" #include "any_trainer_abstract.h" #include <vector> namespace dlib { // ---------------------------------------------------------------------------------------- template < typename sample_type_, typename scalar_type_ = double > class any_trainer { public: typedef sample_type_ sample_type; typedef scalar_type_ scalar_type; typedef default_memory_manager mem_manager_type; typedef any_decision_function<sample_type, scalar_type> trained_function_type; any_trainer() { } any_trainer ( const any_trainer& item ) { if (item.data) { item.data->copy_to(data); } } template <typename T> any_trainer ( const T& item ) { typedef typename basic_type<T>::type U; data.reset(new derived<U>(item)); } void clear ( ) { data.reset(); } template <typename T> bool contains ( ) const { typedef typename basic_type<T>::type U; return dynamic_cast<derived<U>*>(data.get()) != 0; } bool is_empty( ) const { return data.get() == 0; } trained_function_type train ( const std::vector<sample_type>& samples, const std::vector<scalar_type>& labels ) const { // make sure requires clause is not broken DLIB_ASSERT(is_empty() == false, "\t trained_function_type any_trainer::train()" << "\n\t You can't call train() on an empty any_trainer" << "\n\t this: " << this ); return data->train(samples, labels); } template <typename T> T& cast_to( ) { typedef typename basic_type<T>::type U; derived<U>* d = dynamic_cast<derived<U>*>(data.get()); if (d == 0) { throw bad_any_cast(); } return d->item; } template <typename T> const T& cast_to( ) const { typedef typename basic_type<T>::type U; derived<U>* d = dynamic_cast<derived<U>*>(data.get()); if (d == 0) { throw bad_any_cast(); } return d->item; } template <typename T> T& get( ) { typedef typename basic_type<T>::type U; derived<U>* d = dynamic_cast<derived<U>*>(data.get()); if (d == 0) { d = new derived<U>(); data.reset(d); } return d->item; } any_trainer& operator= ( const any_trainer& item ) { any_trainer(item).swap(*this); return *this; } void swap ( any_trainer& item ) { data.swap(item.data); } private: struct base { virtual ~base() {} virtual trained_function_type train ( const std::vector<sample_type>& samples, const std::vector<scalar_type>& labels ) const = 0; virtual void copy_to ( scoped_ptr<base>& dest ) const = 0; }; template <typename T> struct derived : public base { T item; derived() {} derived(const T& val) : item(val) {} virtual void copy_to ( scoped_ptr<base>& dest ) const { dest.reset(new derived<T>(item)); } virtual trained_function_type train ( const std::vector<sample_type>& samples, const std::vector<scalar_type>& labels ) const { return item.train(samples, labels); } }; scoped_ptr<base> data; }; // ---------------------------------------------------------------------------------------- template < typename sample_type, typename scalar_type > inline void swap ( any_trainer<sample_type,scalar_type>& a, any_trainer<sample_type,scalar_type>& b ) { a.swap(b); } // ---------------------------------------------------------------------------------------- template <typename T, typename U, typename V> T& any_cast(any_trainer<U,V>& a) { return a.template cast_to<T>(); } template <typename T, typename U, typename V> const T& any_cast(const any_trainer<U,V>& a) { return a.template cast_to<T>(); } // ---------------------------------------------------------------------------------------- } #endif // DLIB_AnY_TRAINER_H_
2,716
397
<reponame>spatric5/robosuite<gh_stars>100-1000 from .objects import MujocoObject, MujocoXMLObject, MujocoGeneratedObject from .generated_objects import CompositeBodyObject, CompositeObject, PrimitiveObject from .xml_objects import ( BottleObject, CanObject, LemonObject, MilkObject, BreadObject, CerealObject, SquareNutObject, RoundNutObject, MilkVisualObject, BreadVisualObject, CerealVisualObject, CanVisualObject, PlateWithHoleObject, DoorObject, ) from .primitive import * from .composite import * from .composite_body import *
208
766
/* * Copyright 2019 <NAME> * * See LICENCE for the full copyright terms. */ #include <assert.h> #include <stddef.h> #include <string.h> #include <stdio.h> /* If non-zero, use an experimental edge_set implementation * that collects edges leading to the same set in a group * and uses bitflags for individual labels. While there should * be some space savings vs. storing every <label, state> pair * individually, the bigger motivation is to do processing for * some operations (especially fsm_determinise) by group rather * than for every individual pair. */ #define USE_EDGE_BITSET 1 #if !USE_EDGE_BITSET #include "libfsm/internal.h" /* XXX: for allocating struct fsm_edge, and the edges array */ #include <print/esc.h> #include <adt/alloc.h> #include <adt/bitmap.h> #include <adt/set.h> #include <adt/stateset.h> #include <adt/edgeset.h> /* This is a simple linear-probing hash table, keyed by the edge symbol. * Since many edge sets only contain a single item, there is a special * case to box the symbol and state ID boxed in the edge_set pointer. * Otherwise, the hash table starts with SET_INITIAL buckets and grows * as necessary. */ #define SET_INITIAL 8 #define SINGLETON_MAX_STATE ((~ (uintptr_t) 0U) >> (CHAR_BIT + 1)) #define SINGLETON_ENCODE(symbol, state) ((void *) ( \ (((uintptr_t) (state)) << (CHAR_BIT + 1)) | \ (((uintptr_t) (symbol)) << 1) | \ 0x1)) #define SINGLETON_DECODE_SYMBOL(ptr) ((unsigned char) ((((uintptr_t) (ptr)) >> 1) & 0xFFU)) #define SINGLETON_DECODE_STATE(ptr) ((fsm_state_t) (((uintptr_t) (ptr)) >> (CHAR_BIT + 1))) #define IS_SINGLETON(ptr) (((uintptr_t) (ptr)) & 0x1) /* Used to represent buckets that are not currently used; * tombstone is for states that were removed (and can be replaced), * but may be followed by other elements due to previous collisions. */ #define BUCKET_UNUSED ((fsm_state_t)-1) #define BUCKET_TOMBSTONE ((fsm_state_t)-2) /* 32-bit approximation of the inverse golden ratio / UINT32_MAX: * (sqrt(5) - 1)/2 -> 0.618, so 0.618 * 0xffffffff. See Knuth 6.4. */ #define PHI32 0x9e3779b9 #define EOI_DONE ((size_t)-1) #define EOI_SINGLETON_SET ((size_t)-2) struct edge_set { struct fsm_edge *b; /* buckets */ size_t count; size_t ceil; }; struct edge_set * edge_set_new(void) { return NULL; } static struct edge_set * edge_set_create(const struct fsm_alloc *a) { struct edge_set *set; set = f_malloc(a, sizeof *set); if (set == NULL) { return NULL; } set->b = f_malloc(a, SET_INITIAL * sizeof *set->b); if (set->b == NULL) { f_free(a, set); return NULL; } memset(set->b, 0xff, SET_INITIAL * sizeof *set->b); assert(set->b[0].state == BUCKET_UNUSED); set->count = 0; set->ceil = SET_INITIAL; return set; } void edge_set_free(const struct fsm_alloc *alloc, struct edge_set *set) { if (set == NULL) { return; } if (IS_SINGLETON(set)) { return; } assert(set->b != NULL); f_free(alloc, set->b); f_free(alloc, set); } static int grow_buckets(const struct fsm_alloc *alloc, struct edge_set *set) { struct fsm_edge *nb, *ob = set->b; /* new and old buckets */ const size_t oceil = set->ceil; const size_t nceil = 2 * oceil; const size_t nmask = nceil - 1; size_t o_i, n_i, added; /* assumed to be a power of 2 */ assert((nceil & nmask) == 0); nb = f_malloc(alloc, nceil * sizeof *nb); if (nb == NULL) { return 0; } memset(nb, 0xff, nceil * sizeof *nb); assert(nb[0].state == BUCKET_UNUSED); added = 0; for (o_i = 0; o_i < oceil; o_i++) { unsigned h; const fsm_state_t bs = ob[o_i].state; if (bs == BUCKET_UNUSED || bs == BUCKET_TOMBSTONE) { continue; } h = PHI32 * ob[o_i].symbol; for (n_i = 0; n_i < nceil; n_i++) { const size_t b_i = (h + n_i) & nmask; if (nb[b_i].state != BUCKET_UNUSED) { assert(nb[b_i].state != BUCKET_TOMBSTONE); continue; } nb[b_i].symbol = ob[o_i].symbol; nb[b_i].state = ob[o_i].state; added++; break; } } assert(added == set->count); f_free(alloc, ob); set->b = nb; set->ceil = nceil; return 1; } int edge_set_add(struct edge_set **setp, const struct fsm_alloc *alloc, unsigned char symbol, fsm_state_t state) { struct edge_set *set; assert(setp != NULL); /* XXX: assert(state <= SINGLETON_MAX_STATE); */ if (IS_SINGLETON(*setp)) { unsigned char prev_symbol; fsm_state_t prev_state; prev_symbol = SINGLETON_DECODE_SYMBOL(*setp); prev_state = SINGLETON_DECODE_STATE(*setp); *setp = edge_set_create(alloc); if (*setp == NULL) { return 0; } assert(!IS_SINGLETON(*setp)); /* TODO: bulk add */ if (!edge_set_add(setp, alloc, prev_symbol, prev_state)) { return 0; } if (!edge_set_add(setp, alloc, symbol, state)) { return 0; } return 1; } /* XXX: only if it fits */ if (*setp == NULL) { *setp = SINGLETON_ENCODE(symbol, state); assert(SINGLETON_DECODE_SYMBOL(*setp) == symbol); assert(SINGLETON_DECODE_STATE(*setp) == state); return 1; } set = *setp; /* Grow buckets at 50% capacity */ if (set->count == set->ceil/2) { if (!grow_buckets(alloc, set)) { return 0; } } { const size_t mask = set->ceil - 1; const unsigned h = PHI32 * symbol; int has_tombstone_candidate = 0; size_t tc_pos; size_t i; for (i = 0; i < set->ceil; i++) { const size_t b_i = (h + i) & mask; const fsm_state_t bs = set->b[b_i].state; /* Continue past a tombstone, but note where * it was -- as long as the value being added * isn't already present later, we can add it there. * This fills the first tombstone, if there are more * than one, because search will find it sooner. */ if (bs == BUCKET_TOMBSTONE) { if (!has_tombstone_candidate) { has_tombstone_candidate = 1; tc_pos = b_i; } continue; } else if (bs == BUCKET_UNUSED) { const size_t pos = (has_tombstone_candidate ? tc_pos : b_i); set->b[pos].state = state; set->b[pos].symbol = symbol; set->count++; assert(edge_set_contains(set, symbol)); return 1; } else if (bs == state && set->b[b_i].symbol == symbol) { return 1; /* already present */ } else { /* ignore other edges */ continue; } } if (has_tombstone_candidate) { set->b[tc_pos].state = state; set->b[tc_pos].symbol = symbol; set->count++; return 1; } assert(!"unreachable"); return 0; } } int edge_set_advise_growth(struct edge_set **pset, const struct fsm_alloc *alloc, size_t count) { /* not implemented */ (void)pset; (void)alloc; (void)count; return 1; } int edge_set_add_bulk(struct edge_set **pset, const struct fsm_alloc *alloc, uint64_t symbols[256/64], fsm_state_t state) { size_t i; for (i = 0; i < 256; i++) { if (SYMBOLS_GET(symbols, i)) { if (!edge_set_add(pset, alloc, i, state)) { return 0; } } } return 1; } int edge_set_add_state_set(struct edge_set **setp, const struct fsm_alloc *alloc, unsigned char symbol, const struct state_set *state_set) { struct state_iter it; fsm_state_t s; assert(setp != NULL); /* TODO: bulk add */ for (state_set_reset((void *) state_set, &it); state_set_next(&it, &s); ) { if (!edge_set_add(setp, alloc, symbol, s)) { return 0; } } return 1; } int edge_set_find(const struct edge_set *set, unsigned char symbol, struct fsm_edge *e) { assert(e != NULL); if (edge_set_empty(set)) { return 0; } assert(set != NULL); if (IS_SINGLETON(set)) { if (SINGLETON_DECODE_SYMBOL(set) == symbol) { e->symbol = symbol; e->state = SINGLETON_DECODE_STATE(set); return 1; } return 0; } else { const size_t mask = set->ceil - 1; const unsigned h = PHI32 * symbol; size_t i; for (i = 0; i < set->ceil; i++) { const size_t b_i = (h + i) & mask; const fsm_state_t bs = set->b[b_i].state; if (bs == BUCKET_UNUSED) { break; } else if (bs == BUCKET_TOMBSTONE) { continue; /* search past deleted */ } else if (set->b[b_i].symbol == symbol) { memcpy(e, &set->b[b_i], sizeof *e); return 1; /* found */ } } } /* not found */ return 0; } int edge_set_contains(const struct edge_set *set, unsigned char symbol) { struct fsm_edge unused; return edge_set_find(set, symbol, &unused); } int edge_set_hasnondeterminism(const struct edge_set *set, struct bm *bm) { size_t i; assert(bm != NULL); if (edge_set_empty(set)) { return 0; } /* * Instances of struct fsm_edge aren't unique, and are not ordered. * The bitmap here is to identify duplicate symbols between structs. * * The same bitmap is shared between all states in an epsilon closure. */ if (IS_SINGLETON(set)) { if (bm_get(bm, SINGLETON_DECODE_SYMBOL(set))) { return 1; } bm_set(bm, SINGLETON_DECODE_SYMBOL(set)); return 0; } for (i = 0; i < set->ceil; i++) { const fsm_state_t bs = set->b[i].state; if (bs == BUCKET_UNUSED || bs == BUCKET_TOMBSTONE) { continue; /* no element */ } if (bm_get(bm, set->b[i].symbol)) { return 1; } bm_set(bm, set->b[i].symbol); } return 0; } int edge_set_transition(const struct edge_set *set, unsigned char symbol, fsm_state_t *state) { /* * This function is meaningful for DFA only; we require a DFA * by contract in order to identify a single destination state * for a given symbol. */ struct fsm_edge e; if (!edge_set_find(set, symbol, &e)) { return 0; } *state = e.state; return 1; } size_t edge_set_count(const struct edge_set *set) { if (set == NULL) { return 0; } if (IS_SINGLETON(set)) { return 1; } assert(set->b != NULL); return set->count; } int edge_set_copy(struct edge_set **dst, const struct fsm_alloc *alloc, const struct edge_set *src) { struct edge_iter jt; struct fsm_edge e; assert(dst != NULL); if (edge_set_empty(src)) { return 1; } if (IS_SINGLETON(src)) { if (!edge_set_add(dst, alloc, SINGLETON_DECODE_SYMBOL(src), SINGLETON_DECODE_STATE(src))) { return 0; } return 1; } for (edge_set_reset(src, &jt); edge_set_next(&jt, &e); ) { /* TODO: bulk add */ if (!edge_set_add(dst, alloc, e.symbol, e.state)) { return 0; } } return 1; } void edge_set_remove(struct edge_set **setp, unsigned char symbol) { struct edge_set *set; assert(setp != NULL); if (IS_SINGLETON(*setp)) { if (SINGLETON_DECODE_SYMBOL(*setp) == symbol) { *setp = NULL; } return; } set = *setp; if (edge_set_empty(set)) { return; } else { const size_t mask = set->ceil - 1; const unsigned h = PHI32 * symbol; size_t i; for (i = 0; i < set->ceil; i++) { const size_t b_i = (h + i) & mask; const fsm_state_t bs = set->b[b_i].state; if (bs == BUCKET_UNUSED) { break; /* not found */ } else if (set->b[b_i].symbol == symbol && set->b[b_i].state != BUCKET_TOMBSTONE) { /* Set to a distinct marker for a deleted * entry; there may be entries past this * due to collisions that still need to * be checked. */ set->b[b_i].state = BUCKET_TOMBSTONE; set->count--; } } } assert(!edge_set_contains(set, symbol)); } void edge_set_remove_state(struct edge_set **setp, fsm_state_t state) { struct edge_set *set; size_t i; assert(setp != NULL); assert(state != BUCKET_UNUSED && state != BUCKET_TOMBSTONE); if (IS_SINGLETON(*setp)) { if (SINGLETON_DECODE_STATE(*setp) == state) { *setp = NULL; } return; } set = *setp; if (edge_set_empty(set)) { return; } /* Remove all edges with that state */ for (i = 0; i < set->ceil; i++) { if (set->b[i].state == state) { set->b[i].state = BUCKET_TOMBSTONE; set->count--; } } } int edge_set_compact(struct edge_set **setp, const struct fsm_alloc *alloc, fsm_state_remap_fun *remap, const void *opaque) { struct edge_set *set; size_t i; assert(setp != NULL); if (IS_SINGLETON(*setp)) { const unsigned char symbol = SINGLETON_DECODE_SYMBOL(*setp); const fsm_state_t s = SINGLETON_DECODE_STATE(*setp); const fsm_state_t new_id = remap(s, opaque); if (new_id == FSM_STATE_REMAP_NO_STATE) { *setp = NULL; } else { *setp = SINGLETON_ENCODE(symbol, new_id); } return 1; } set = *setp; if (edge_set_empty(set)) { return 1; } i = 0; for (i = 0; i < set->ceil; i++) { const fsm_state_t to = set->b[i].state; fsm_state_t new_to; if (to == BUCKET_UNUSED || to == BUCKET_TOMBSTONE) { continue; } new_to = remap(to, opaque); if (new_to == FSM_STATE_REMAP_NO_STATE) { /* drop */ set->b[i].state = BUCKET_TOMBSTONE; set->count--; } else { /* keep */ set->b[i].state = new_to; } } /* todo: if set->count < set->ceil/2, shrink buckets */ return 1; } void edge_set_reset(const struct edge_set *set, struct edge_iter *it) { it->i = 0; it->set = set; } int edge_set_next(struct edge_iter *it, struct fsm_edge *e) { assert(it != NULL); assert(e != NULL); if (it->set == NULL) { return 0; } if (IS_SINGLETON(it->set)) { if (it->i >= 1) { return 0; } e->symbol = SINGLETON_DECODE_SYMBOL(it->set); e->state = SINGLETON_DECODE_STATE(it->set); it->i++; return 1; } while (it->i < it->set->ceil) { const fsm_state_t bs = it->set->b[it->i].state; if (bs == BUCKET_UNUSED || bs == BUCKET_TOMBSTONE) { it->i++; continue; } *e = it->set->b[it->i]; it->i++; return 1; } return 0; } void edge_set_rebase(struct edge_set **setp, fsm_state_t base) { struct edge_set *set; size_t i; assert(setp != NULL); if (IS_SINGLETON(*setp)) { fsm_state_t state; unsigned char symbol; state = SINGLETON_DECODE_STATE(*setp) + base; symbol = SINGLETON_DECODE_SYMBOL(*setp); *setp = SINGLETON_ENCODE(symbol, state); assert(SINGLETON_DECODE_SYMBOL(*setp) == symbol); assert(SINGLETON_DECODE_STATE(*setp) == state); return; } set = *setp; if (edge_set_empty(set)) { return; } for (i = 0; i < set->ceil; i++) { const fsm_state_t bs = set->b[i].state; if (bs == BUCKET_UNUSED || bs == BUCKET_TOMBSTONE) { continue; } set->b[i].state += base; } } int edge_set_replace_state(struct edge_set **setp, const struct fsm_alloc *alloc, fsm_state_t old, fsm_state_t new) { struct edge_set *set; size_t i; assert(setp != NULL); assert(old != BUCKET_UNUSED); assert(old != BUCKET_TOMBSTONE); (void)alloc; if (IS_SINGLETON(*setp)) { if (SINGLETON_DECODE_STATE(*setp) == old) { unsigned char symbol; symbol = SINGLETON_DECODE_SYMBOL(*setp); *setp = SINGLETON_ENCODE(symbol, new); assert(SINGLETON_DECODE_SYMBOL(*setp) == symbol); assert(SINGLETON_DECODE_STATE(*setp) == new); } return 1; } set = *setp; if (edge_set_empty(set)) { return 1; } for (i = 0; i < set->ceil; i++) { if (set->b[i].state == old) { set->b[i].state = new; } } /* If there is now more than one edge <label, new> for * any label, then the later ones need to be removed and * the count adjusted. */ { uint64_t seen[4]; memset(seen, 0x00, sizeof(seen)); for (i = 0; i < set->ceil; i++) { const fsm_state_t bs = set->b[i].state; unsigned char symbol; uint64_t bit; if (bs != new) { continue; } symbol = set->b[i].symbol; bit = (uint64_t)1 << (symbol & 63); if (seen[symbol/64] & bit) { /* remove duplicate, update count */ set->b[i].state = BUCKET_TOMBSTONE; set->count--; } else { seen[symbol/64] |= bit; } } } return 1; } int edge_set_empty(const struct edge_set *set) { if (set == NULL) { return 1; } if (IS_SINGLETON(set)) { return 0; } return set->count == 0; } void edge_set_ordered_iter_reset_to(const struct edge_set *set, struct edge_ordered_iter *eoi, unsigned char symbol) { /* Create an ordered iterator for the hash table by figuring * out which symbols are present (0x00 <= x <= 0xff, tracked * in a bit set) and either yielding the next bucket for the * current symbol or advancing to the next symbol present. */ size_t i, found, mask; memset(eoi, 0x00, sizeof(*eoi)); eoi->symbol = symbol; /* Check for special case unboxed sets first. */ if (IS_SINGLETON(set)) { eoi->set = set; eoi->pos = EOI_SINGLETON_SET; return; } else if (edge_set_empty(set)) { eoi->pos = EOI_DONE; return; } found = 0; for (i = 0; i < set->ceil; i++) { const fsm_state_t bs = set->b[i].state; unsigned char symbol; if (bs == BUCKET_UNUSED || bs == BUCKET_TOMBSTONE) { continue; } symbol = set->b[i].symbol; eoi->symbols_used[symbol/64] |= ((uint64_t)1 << (symbol & 63)); found++; } assert(found == set->count); mask = set->ceil - 1; /* Start out pointing to the first bucket with a matching symbol, * or the first unused bucket if not present. */ { const unsigned h = PHI32 * eoi->symbol; for (i = 0; i < set->ceil; i++) { const size_t b_i = (h + i) & mask; const fsm_state_t bs = set->b[b_i].state; if (bs == BUCKET_TOMBSTONE) { continue; /* search past deleted */ } else if (bs == BUCKET_UNUSED) { eoi->pos = b_i; /* will advance to next symbol */ break; } else if (set->b[b_i].symbol == eoi->symbol) { eoi->pos = b_i; /* pointing at first bucket */ break; } else { continue; /* find first entry with symbol */ } } } eoi->set = set; } static int advance_symbol(struct edge_ordered_iter *eoi) { unsigned i = eoi->symbol + 1; while (i < 0x100) { if (eoi->symbols_used[i/64] & ((uint64_t)1 << (i & 63))) { eoi->symbol = i; return 1; } i++; } eoi->pos = EOI_DONE; eoi->steps = 0; return 0; } void edge_set_ordered_iter_reset(const struct edge_set *set, struct edge_ordered_iter *eoi) { edge_set_ordered_iter_reset_to(set, eoi, 0); } int edge_set_ordered_iter_next(struct edge_ordered_iter *eoi, struct fsm_edge *e) { fsm_state_t bs; unsigned char symbol; const struct edge_set *set = eoi->set; size_t mask; if (eoi->pos == EOI_DONE) { return 0; /* done */ } else if (eoi->pos == EOI_SINGLETON_SET) { e->state = SINGLETON_DECODE_STATE(eoi->set); e->symbol = SINGLETON_DECODE_SYMBOL(eoi->set); eoi->pos = EOI_DONE; return 1; } mask = set->ceil - 1; for (;;) { eoi->pos &= mask; bs = set->b[eoi->pos].state; symbol = set->b[eoi->pos].symbol; eoi->steps++; if (bs == BUCKET_UNUSED || eoi->steps == set->ceil) { size_t i; unsigned h; /* after current symbol's entries -- check next */ if (!advance_symbol(eoi)) { return 0; /* done */ } h = PHI32 * eoi->symbol; for (i = 0; i < set->ceil; i++) { const size_t b_i = (h + i) & mask; bs = set->b[b_i].state; if (bs == BUCKET_TOMBSTONE) { continue; /* search past deleted */ } else if (bs == BUCKET_UNUSED) { /* should never get here -- searching for * a symbol that isn't present, but we * already know what's present */ assert(!"internal error"); } else if (set->b[b_i].symbol != eoi->symbol) { continue; /* skip collision */ } else { assert(set->b[b_i].symbol == eoi->symbol); /* yield next match and then advance */ eoi->pos = b_i; eoi->steps = 0; memcpy(e, &set->b[eoi->pos], sizeof(*e)); eoi->pos++; return 1; } } /* should always find a match or an unused bucket */ assert(!"internal error"); return 0; } else if (bs == BUCKET_TOMBSTONE) { eoi->pos++; /* skip over */ continue; } else if (symbol != eoi->symbol) { eoi->pos++; continue; /* skip collision */ } else { /* if pointing at next bucket, yield it */ assert(symbol == eoi->symbol); memcpy(e, &set->b[eoi->pos], sizeof(*e)); eoi->pos++; return 1; } } } #else /* USE_EDGE_BITSET */ #define LOG_BITSET 0 #include "libfsm/internal.h" /* XXX: for allocating struct fsm_edge, and the edges array */ #include <print/esc.h> #include <adt/alloc.h> #include <adt/bitmap.h> #include <adt/set.h> #include <adt/stateset.h> #include <adt/edgeset.h> #define DEF_EDGE_GROUP_CEIL 1 /* Array of <to, symbols> tuples, sorted by to. * * Design assumption: It is significantly more likely in practice to * have to be more edges with different labels going to the same state * than the same symbol going to different states. This does not * include epsilon edges, which can be stored in a state_set. */ struct edge_set { size_t ceil; /* nonzero */ size_t count; /* <= ceil */ struct edge_group { fsm_state_t to; /* distinct */ uint64_t symbols[256/64]; } *groups; /* sorted by .to */ }; #define SYMBOLS_SET(S, ID) (S[ID/64] |= (1ULL << (ID & 63))) #define SYMBOLS_GET(S, ID) (S[ID/64] & (1ULL << (ID & 63))) #define SYMBOLS_CLEAR(S, ID) (S[ID/64] &=~ (1ULL << (ID & 63))) struct edge_set * edge_set_new(void) { #if LOG_BITSET fprintf(stderr, " -- edge_set_new\n"); #endif return NULL; /* -> empty set */ } void edge_set_free(const struct fsm_alloc *a, struct edge_set *set) { #if LOG_BITSET fprintf(stderr, " -- edge_set_free %p\n", (void *)set); #endif if (set == NULL) { return; } f_free(a, set->groups); f_free(a, set); } static int grow_groups(struct edge_set *set, const struct fsm_alloc *alloc) { /* TODO: This could also squash out any groups where * none of the symbols are set anymore. */ const size_t nceil = 2 *set->ceil; struct edge_group *ng; assert(nceil > 0); #if LOG_BITSET fprintf(stderr, " -- edge_set grow_groups: %lu -> %lu\n", set->ceil, nceil); #endif ng = f_realloc(alloc, set->groups, nceil * sizeof(set->groups[0])); if (ng == NULL) { return 0; } set->ceil = nceil; set->groups = ng; return 1; } static void dump_edge_set(const struct edge_set *set) { const struct edge_group *eg; size_t i; #if LOG_BITSET < 2 return; #endif if (edge_set_empty(set)) { fprintf(stderr, "dump_edge_set: <empty>\n"); return; } fprintf(stderr, "dump_edge_set: %p\n", (void *)set); for (i = 0; i < set->count; i++) { eg = &set->groups[i]; fprintf(stderr, " -- %ld: [0x%lx, 0x%lx, 0x%lx, 0x%lx] -> %u\n", i, eg->symbols[0], eg->symbols[1], eg->symbols[2], eg->symbols[3], eg->to); } } static struct edge_set * init_empty(const struct fsm_alloc *alloc) { struct edge_set *set = f_calloc(alloc, 1, sizeof(*set)); if (set == NULL) { return NULL; } set->groups = f_malloc(alloc, DEF_EDGE_GROUP_CEIL * sizeof(set->groups[0])); if (set->groups == NULL) { f_free(alloc, set); return NULL; } set->ceil = DEF_EDGE_GROUP_CEIL; set->count = 0; return set; } int edge_set_add(struct edge_set **pset, const struct fsm_alloc *alloc, unsigned char symbol, fsm_state_t state) { uint64_t symbols[256/64] = { 0 }; SYMBOLS_SET(symbols, symbol); return edge_set_add_bulk(pset, alloc, symbols, state); } int edge_set_advise_growth(struct edge_set **pset, const struct fsm_alloc *alloc, size_t count) { struct edge_set *set = *pset; if (set == NULL) { set = init_empty(alloc); if (set == NULL) { return 0; } *pset = set; } const size_t oceil = set->ceil; size_t nceil = 1; while (nceil < oceil + count) { nceil *= 2; } assert(nceil > 0); #if LOG_BITSET fprintf(stderr, " -- edge_set advise_growth: %lu -> %lu\n", set->ceil, nceil); #endif struct edge_group *ng = f_realloc(alloc, set->groups, nceil * sizeof(set->groups[0])); if (ng == NULL) { return 0; } set->ceil = nceil; set->groups = ng; return 1; } int edge_set_add_bulk(struct edge_set **pset, const struct fsm_alloc *alloc, uint64_t symbols[256/64], fsm_state_t state) { struct edge_set *set; struct edge_group *eg; size_t i; assert(pset != NULL); set = *pset; if (set == NULL) { /* empty */ set = init_empty(alloc); if (set == NULL) { return 0; } eg = &set->groups[0]; eg->to = state; memcpy(eg->symbols, symbols, sizeof(eg->symbols)); set->count++; *pset = set; #if LOG_BITSET fprintf(stderr, " -- edge_set_add: symbols [0x%lx, 0x%lx, 0x%lx, 0x%lx] -> state %d on empty -> %p\n", symbols[0], symbols[1], symbols[2], symbols[3], state, (void *)set); #endif dump_edge_set(set); return 1; } assert(set->ceil > 0); assert(set->count <= set->ceil); #if LOG_BITSET fprintf(stderr, " -- edge_set_add: symbols [0x%lx, 0x%lx, 0x%lx, 0x%lx] -> state %d on %p\n", symbols[0], symbols[1], symbols[2], symbols[3], state, (void *)set); #endif /* Linear search for a group with the same destination * state, or the position where that group would go. */ for (i = 0; i < set->count; i++) { eg = &set->groups[i]; if (eg->to == state) { /* This API does not indicate whether that * symbol -> to edge was already present. */ size_t i; for (i = 0; i < 256/64; i++) { eg->symbols[i] |= symbols[i]; } dump_edge_set(set); return 1; } else if (eg->to > state) { break; /* will shift down and insert below */ } else { continue; } } /* insert/append at i */ if (set->count == set->ceil) { if (!grow_groups(set, alloc)) { return 0; } } eg = &set->groups[i]; if (i < set->count && eg->to != state) { /* shift down by one */ const size_t to_mv = set->count - i; #if LOG_BITSET fprintf(stderr, " --- shifting, count %ld, i %ld, to_mv %ld\n", set->count, i, to_mv); #endif if (to_mv > 0) { memmove(&set->groups[i + 1], &set->groups[i], to_mv * sizeof(set->groups[i])); } eg = &set->groups[i]; } eg->to = state; memset(eg->symbols, 0x00, sizeof(eg->symbols)); memcpy(eg->symbols, symbols, sizeof(eg->symbols)); set->count++; dump_edge_set(set); return 1; } int edge_set_add_state_set(struct edge_set **setp, const struct fsm_alloc *alloc, unsigned char symbol, const struct state_set *state_set) { struct state_iter si; fsm_state_t state; state_set_reset(state_set, &si); while (state_set_next(&si, &state)) { if (!edge_set_add(setp, alloc, symbol, state)) { return 0; } } return 1; } int edge_set_find(const struct edge_set *set, unsigned char symbol, struct fsm_edge *e) { const struct edge_group *eg; size_t i; if (set == NULL) { return 0; } #if LOG_BITSET fprintf(stderr, " -- edge_set_find: symbol 0x%x on %p\n", symbol, (void *)set); #endif for (i = 0; i < set->count; i++) { eg = &set->groups[i]; if (SYMBOLS_GET(eg->symbols, symbol)) { e->state = eg->to; e->symbol = symbol; return 1; } } return 0; /* not found */ } int edge_set_contains(const struct edge_set *set, unsigned char symbol) { const struct edge_group *eg; size_t i; if (edge_set_empty(set)) { return 0; } #if LOG_BITSET fprintf(stderr, " -- edge_set_contains: symbol 0x%x on %p\n", symbol, (void *)set); #endif for (i = 0; i < set->count; i++) { eg = &set->groups[i]; if (SYMBOLS_GET(eg->symbols, symbol)) { return 1; } } return 0; } int edge_set_hasnondeterminism(const struct edge_set *set, struct bm *bm) { size_t i, w_i; assert(bm != NULL); #if LOG_BITSET fprintf(stderr, " -- edge_set_hasnondeterminism: on %p\n", (void *)set); #endif dump_edge_set(set); if (edge_set_empty(set)) { return 0; } for (i = 0; i < set->count; i++) { const struct edge_group *eg = &set->groups[i]; for (w_i = 0; w_i < 4; w_i++) { const uint64_t cur = eg->symbols[w_i]; size_t b_i; if (cur == 0) { continue; } #if LOG_BITSET > 1 fprintf(stderr, " -- eshnd: [0x%lx, 0x%lx, 0x%lx, 0x%lx] + group %ld cur %ld: 0x%lx\n", eg->symbols[0], eg->symbols[1], eg->symbols[2], eg->symbols[3], i, w_i, cur); #endif for (b_i = 0; b_i < 64; b_i++) { if (cur & (1ULL << b_i)) { const size_t bit = 64*w_i + b_i; if (bm_get(bm, bit)) { #if LOG_BITSET > 1 fprintf(stderr, "-- eshnd: hit on bit %lu\n", bit); #endif return 1; } bm_set(bm, bit); } } } } return 0; } int edge_set_transition(const struct edge_set *set, unsigned char symbol, fsm_state_t *state) { /* * This function is meaningful for DFA only; we require a DFA * by contract in order to identify a single destination state * for a given symbol. */ struct fsm_edge e; if (!edge_set_find(set, symbol, &e)) { return 0; } *state = e.state; return 1; } size_t edge_set_count(const struct edge_set *set) { size_t res = 0; size_t i; #if LOG_BITSET fprintf(stderr, " -- edge_set_count: on %p\n", (void *)set); #endif /* This does not call edge_set_empty directly * here, because that does the same scan as below, * it just exits at the first label found. */ if (set == NULL) { return 0; } for (i = 0; i < set->count; i++) { size_t w_i; const struct edge_group *eg = &set->groups[i]; for (w_i = 0; w_i < 4; w_i++) { /* TODO: res += popcount64(w) */ const uint64_t w = eg->symbols[w_i]; uint64_t bit; if (w == 0) { continue; } for (bit = (uint64_t)1; bit; bit <<= 1) { if (w & bit) { res++; } } } } #if LOG_BITSET fprintf(stderr, " -- edge_set_count: %zu\n", res); #endif return res; } static int find_first_group_label(const struct edge_group *g, unsigned char *label) { size_t i, bit; for (i = 0; i < 4; i++) { if (g->symbols[i] == 0) { continue; } for (bit = 0; bit < 64; bit++) { if (g->symbols[i] & ((uint64_t)1 << bit)) { *label = i*64 + bit; return 1; } } } return 0; } static int edge_set_copy_into(struct edge_set **pdst, const struct fsm_alloc *alloc, const struct edge_set *src) { /* Because the source and destination edge_set groups are * sorted by .to, we can pairwise bulk merge them. If the * to label appears in src, we can just bitwise-or the * labels over in parallel; if not, we need to add it * first, but it will be added at the current offset. */ size_t sg_i, dg_i, w_i; /* src/dst group index, word index */ struct edge_set *dst = *pdst; dg_i = 0; sg_i = 0; while (sg_i < src->count) { const struct edge_group *src_g = &src->groups[sg_i]; struct edge_group *dst_g = (dg_i < dst->count ? &dst->groups[dg_i] : NULL); if (dst_g == NULL || dst_g->to > src_g->to) { unsigned char label; /* If the src group is empty, skip it (that can * happen as labels are added and removed, * we don't currently prune empty ones), * otherwise get the first label present for * the edge_set_add below. */ if (!find_first_group_label(src_g, &label)) { sg_i++; continue; } /* Insert the first label, so a group with * that .to will exist at the current offset. */ if (!edge_set_add(&dst, alloc, label, src_g->to)) { return 0; } dst_g = &dst->groups[dg_i]; } assert(dst_g != NULL); /* exists now */ if (dst_g->to < src_g->to) { dg_i++; continue; } assert(dst_g->to == src_g->to); for (w_i = 0; w_i < 4; w_i++) { /* bit-parallel union */ dst_g->symbols[w_i] |= src_g->symbols[w_i]; } sg_i++; dg_i++; } return 1; } int edge_set_copy(struct edge_set **dst, const struct fsm_alloc *alloc, const struct edge_set *src) { struct edge_set *set; assert(dst != NULL); #if LOG_BITSET fprintf(stderr, " -- edge_set_copy: on %p -> %p\n", (void *)src, (void *)*dst); #endif if (*dst != NULL) { /* The other `edge_set_copy` copies in the * edges from src if *dst already exists. */ return edge_set_copy_into(dst, alloc, src); } if (edge_set_empty(src)) { *dst = NULL; return 1; } set = f_calloc(alloc, 1, sizeof(*set)); if (set == NULL) { return 0; } set->groups = f_malloc(alloc, src->ceil * sizeof(set->groups[0])); if (set->groups == NULL) { f_free(alloc, set); return 0; } set->ceil = src->ceil; memcpy(set->groups, src->groups, src->count * sizeof(src->groups[0])); set->count = src->count; #if LOG_BITSET { size_t i; for (i = 0; i < set->count; i++) { fprintf(stderr, "edge_set[%zd]: to %d, [0x%lx, 0x%lx, 0x%lx, 0x%lx]\n", i, set->groups[i].to, set->groups[i].symbols[0], set->groups[i].symbols[1], set->groups[i].symbols[2], set->groups[i].symbols[3]); } } #endif *dst = set; return 1; } void edge_set_remove(struct edge_set **pset, unsigned char symbol) { size_t i; struct edge_set *set; assert(pset != NULL); set = *pset; #if LOG_BITSET fprintf(stderr, " -- edge_set_remove: symbol 0x%x on %p\n", symbol, (void *)set); #endif if (edge_set_empty(set)) { return; } /* This does not track whether edge(s) were actually removed. * It also does not remove edge groups where none of the * symbols are set anymore. */ for (i = 0; i < set->count; i++) { struct edge_group *eg = &set->groups[i]; SYMBOLS_CLEAR(eg->symbols, symbol); } } void edge_set_remove_state(struct edge_set **pset, fsm_state_t state) { size_t i; struct edge_set *set; assert(pset != NULL); set = *pset; #if LOG_BITSET fprintf(stderr, " -- edge_set_remove_state: state %d on %p\n", state, (void *)set); #endif if (edge_set_empty(set)) { return; } i = 0; while (i < set->count) { const struct edge_group *eg = &set->groups[i]; if (eg->to == state) { const size_t to_mv = set->count - i; memmove(&set->groups[i], &set->groups[i + 1], to_mv * sizeof(*eg)); set->count--; } else { i++; } } } struct to_info { fsm_state_t old_to; fsm_state_t new_to; fsm_state_t assignment; }; static int collate_info_by_new_to(const void *pa, const void *pb) { const struct to_info *a = (const struct to_info *)pa; const struct to_info *b = (const struct to_info *)pb; if (a->new_to < b->new_to) { return -1; } else if (a->new_to > b->new_to) { return 1; } else { return 0; } } static int collate_info_by_old_to(const void *pa, const void *pb) { const struct to_info *a = (const struct to_info *)pa; const struct to_info *b = (const struct to_info *)pb; if (a->old_to < b->old_to) { return -1; } else if (a->old_to > b->old_to) { return 1; } else { assert(!"violated uniqueness invariant"); return 0; } } #define LOG_COMPACT 0 int edge_set_compact(struct edge_set **pset, const struct fsm_alloc *alloc, fsm_state_remap_fun *remap, const void *opaque) { struct edge_set *set; size_t i; struct to_info *info; struct edge_group *ngroups; size_t ncount = 0; assert(pset != NULL); set = *pset; #if LOG_BITSET || LOG_COMPACT fprintf(stderr, " -- edge_set_compact: set %p\n", (void *)set); #endif if (edge_set_empty(set)) { return 1; } assert(set->count > 0); info = f_malloc(alloc, set->count * sizeof(info[0])); if (info == NULL) { return 0; } ngroups = f_calloc(alloc, set->ceil, sizeof(ngroups[0])); if (ngroups == NULL) { f_free(alloc, info); return 0; } /* first pass, construct mapping */ for (i = 0; i < set->count; i++) { struct edge_group *eg = &set->groups[i]; const fsm_state_t new_to = remap(eg->to, opaque); #if LOG_BITSET > 1 || LOG_COMPACT fprintf(stderr, "compact: %ld, old_to %d -> new_to %d\n", i, eg->to, new_to); #endif info[i].old_to = eg->to; info[i].new_to = new_to; info[i].assignment = (fsm_state_t)-1; /* not yet assigned */ } /* sort info by new_state */ qsort(info, set->count, sizeof(info[0]), collate_info_by_new_to); #if LOG_BITSET > 1 || LOG_COMPACT fprintf(stderr, "== after sort by new_state\n"); for (i = 0; i < set->count; i++) { fprintf(stderr, " -- %lu: old_to: %d, new_to: %d, assignment: %d\n", i, info[i].old_to, info[i].new_to, info[i].assignment); } #endif info[0].assignment = 0; ncount++; for (i = 1; i < set->count; i++) { const fsm_state_t prev_new_to = info[i - 1].new_to; const fsm_state_t prev_assignment = info[i - 1].assignment; assert(info[i].new_to >= prev_new_to); if (info[i].new_to == FSM_STATE_REMAP_NO_STATE) { break; } if (info[i].new_to == prev_new_to) { info[i].assignment = prev_assignment; } else { info[i].assignment = prev_assignment + 1; ncount++; } } /* sort again, by old_state */ qsort(info, set->count, sizeof(info[0]), collate_info_by_old_to); #if LOG_BITSET > 1 || LOG_COMPACT fprintf(stderr, "== after sort by old_state\n"); for (i = 0; i < set->count; i++) { fprintf(stderr, " -- %lu: old_to: %d, new_to: %d, assignment: %d\n", i, info[i].old_to, info[i].new_to, info[i].assignment); } #endif /* second pass, copy/condense */ for (i = 0; i < set->count; i++) { struct edge_group *g; size_t w_i; if (info[i].new_to == FSM_STATE_REMAP_NO_STATE) { continue; } g = &ngroups[info[i].assignment]; g->to = info[i].new_to; for (w_i = 0; w_i < 256/64; w_i++) { g->symbols[w_i] |= set->groups[i].symbols[w_i]; } } f_free(alloc, info); f_free(alloc, set->groups); set->groups = ngroups; set->count = ncount; #if LOG_BITSET > 1 || LOG_COMPACT for (i = 0; i < set->count; i++) { fprintf(stderr, "ngroups[%zu]: to %d\n", i, set->groups[i].to); } #endif return 1; } void edge_set_reset(const struct edge_set *set, struct edge_iter *it) { assert(it != NULL); #if LOG_BITSET fprintf(stderr, " -- edge_set_reset: set %p\n", (void *)set); #endif it->i = 0; it->j = 0; it->set = set; } int edge_set_next(struct edge_iter *it, struct fsm_edge *e) { const struct edge_set *set; assert(it != NULL); set = it->set; set = it->set; #if LOG_BITSET > 1 fprintf(stderr, " -- edge_set_next: set %p, i %ld, j 0x%x\n", (void *)set, it->i, (unsigned)it->j); #endif if (set == NULL) { return 0; } while (it->i < set->count) { const struct edge_group *eg = &set->groups[it->i]; while (it->j < 256) { if ((it->j & 63) == 0 && 0 == eg->symbols[it->j/64]) { it->j += 64; } else { if (SYMBOLS_GET(eg->symbols, it->j)) { e->symbol = it->j; e->state = eg->to; it->j++; return 1; } it->j++; } } it->i++; it->j = 0; } return 0; } void edge_set_rebase(struct edge_set **pset, fsm_state_t base) { size_t i; struct edge_set *set; assert(pset != NULL); set = *pset; #if LOG_BITSET fprintf(stderr, " -- edge_set_rebase: set %p, base %d\n", (void *)set, base); #endif if (edge_set_empty(set)) { return; } for (i = 0; i < set->count; i++) { struct edge_group *eg = &set->groups[i]; eg->to += base; } } int edge_set_replace_state(struct edge_set **pset, const struct fsm_alloc *alloc, fsm_state_t old, fsm_state_t new) { size_t i; struct edge_set *set; struct edge_group cp; assert(pset != NULL); set = *pset; #if LOG_BITSET fprintf(stderr, " -- edge_set_replace_state: set %p, state %d -> %d\n", (void *)set, old, new); #endif if (edge_set_empty(set)) { return 1; } /* Invariants: if a group with .to == old appears in the group, * it should only appear once. Replacing .to may lead to * duplicates, so duplicates may need to be merged after. */ for (i = 0; i < set->count; i++) { struct edge_group *eg = &set->groups[i]; if (eg->to == old) { const size_t to_mv = set->count - i; unsigned char label; if (!find_first_group_label(eg, &label)) { return 1; /* ignore empty group */ } #if LOG_BITSET fprintf(stderr, " -- edge_set_replace_state: removing group at %ld with .to=%d\n", i, old); #endif /* Remove group */ memcpy(&cp, eg, sizeof(cp)); memmove(&set->groups[i], &set->groups[i + 1], to_mv * sizeof(set->groups[i])); set->count--; #if LOG_BITSET dump_edge_set(set); fprintf(stderr, " -- edge_set_replace_state: reinserting group with .to=%d and label 0x%x\n", new, (unsigned)label); #endif /* Realistically, this shouldn't fail, because * edge_set_add only fails on allocation failure * when it needs to grow the backing array, but * we're removing a group and then adding the * group again so add's bookkeeping puts the * group in the appropriate place. */ if (!edge_set_add(&set, alloc, label, new)) { return 0; } dump_edge_set(set); for (i = 0; i < set->count; i++) { eg = &set->groups[i]; if (eg->to == new) { size_t w_i; #if LOG_BITSET fprintf(stderr, " -- edge_set_replace_state: found new group at %ld, setting other labels from copy\n", i); #endif for (w_i = 0; w_i < 4; w_i++) { eg->symbols[w_i] |= cp.symbols[w_i]; } dump_edge_set(set); return 1; } } assert(!"internal error: just added, but not found"); } } return 1; } int edge_set_empty(const struct edge_set *s) { size_t i; if (s == NULL || s->count == 0) { return 1; } for (i = 0; i < s->count; i++) { unsigned char label; if (find_first_group_label(&s->groups[i], &label)) { return 0; } } return 1; } void edge_set_ordered_iter_reset_to(const struct edge_set *set, struct edge_ordered_iter *eoi, unsigned char symbol) { eoi->symbol = symbol; /* stride by character */ eoi->pos = 0; eoi->set = set; #if LOG_BITSET fprintf(stderr, " -- edge_set_ordered_iter_reset_to: set %p, symbol 0x%x\n", (void *)set, symbol); #endif } /* Reset an ordered iterator, equivalent to * edge_set_ordered_iter_reset_to(set, eoi, '\0'). */ void edge_set_ordered_iter_reset(const struct edge_set *set, struct edge_ordered_iter *eoi) { edge_set_ordered_iter_reset_to(set, eoi, 0x00); } /* Get the next edge from an ordered iterator and return 1, * or return 0 when no more are available. */ int edge_set_ordered_iter_next(struct edge_ordered_iter *eoi, struct fsm_edge *e) { const struct edge_set *set; assert(eoi != NULL); set = eoi->set; #if LOG_BITSET fprintf(stderr, " -- edge_set_ordered_iter_next: set %p, pos %ld, symbol 0x%x\n", (void *)set, eoi->pos, eoi->symbol); #endif if (set == NULL) { return 0; } for (;;) { while (eoi->pos < set->count) { struct edge_group *eg = &set->groups[eoi->pos++]; if (SYMBOLS_GET(eg->symbols, eoi->symbol)) { e->symbol = eoi->symbol; e->state = eg->to; return 1; } } if (eoi->symbol == 255) { /* done */ eoi->set = NULL; return 0; } else { eoi->symbol++; eoi->pos = 0; } } return 0; } void edge_set_group_iter_reset(const struct edge_set *set, enum edge_group_iter_type iter_type, struct edge_group_iter *egi) { memset(egi, 0x00, sizeof(*egi)); egi->set = set; egi->flag = iter_type; #if LOG_BITSET > 1 fprintf(stderr, " -- edge_set_group_iter_reset: set %p, type %d\n", (void *)set, iter_type); #endif if (iter_type == EDGE_GROUP_ITER_UNIQUE && set != NULL) { struct edge_group *g; size_t g_i, i; uint64_t seen[256/64] = { 0 }; for (g_i = 0; g_i < set->count; g_i++) { g = &set->groups[g_i]; for (i = 0; i < 256; i++) { if ((i & 63) == 0 && g->symbols[i/64] == 0) { i += 63; /* skip empty word */ continue; } if (SYMBOLS_GET(g->symbols, i)) { if (SYMBOLS_GET(seen, i)) { SYMBOLS_SET(egi->internal, i); } else { SYMBOLS_SET(seen, i); } } } } } } int edge_set_group_iter_next(struct edge_group_iter *egi, struct edge_group_iter_info *eg) { struct edge_group *g; int any = 0; size_t i; advance: if (egi->set == NULL || egi->i == egi->set->count) { #if LOG_BITSET > 1 fprintf(stderr, " -- edge_set_group_iter_next: set %p, count %lu, done\n", (void *)egi->set, egi->i); #endif return 0; } g = &egi->set->groups[egi->i]; eg->to = g->to; #if LOG_BITSET > 1 fprintf(stderr, " -- edge_set_group_iter_next: flag %d, i %zu, to %d\n", egi->flag, egi->i, g->to); #endif if (egi->flag == EDGE_GROUP_ITER_ALL) { egi->i++; for (i = 0; i < 4; i++) { eg->symbols[i] = g->symbols[i]; if (eg->symbols[i] != 0) { any = 1; } } if (!any) { goto advance; } return 1; } else if (egi->flag == EDGE_GROUP_ITER_UNIQUE) { /* uniques first */ for (i = 0; i < 4; i++) { eg->symbols[i] = g->symbols[i] &~ egi->internal[i]; if (eg->symbols[i] != 0) { any = 1; } } /* next time, yield non-uniques */ egi->flag = EDGE_GROUP_ITER_UNIQUE + 1; /* if there are any uniques, yield them, otherwise * continue to the non-unique branch below. */ if (any) { eg->unique = 1; return 1; } } if (egi->flag == EDGE_GROUP_ITER_UNIQUE + 1) { for (i = 0; i < 4; i++) { eg->symbols[i] = g->symbols[i] & egi->internal[i]; if (eg->symbols[i]) { any = 1; } } eg->unique = 0; egi->flag = EDGE_GROUP_ITER_UNIQUE; egi->i++; if (!any) { goto advance; } return 1; } else { assert("match fail"); return 0; } } #endif
20,138
4,612
<filename>SCRAPE/Lib/site-packages/twisted/test/process_cmdline.py """ Write to stdout the command line args it received, one per line. """ import sys for x in sys.argv[1:]: print(x)
70
1,540
package test.testng109; import org.testng.annotations.BeforeClass; import org.testng.annotations.Test; /** This class/interface */ public class SkippedTestWithExpectedExceptionTest { @BeforeClass public void setup() { throw new RuntimeException("test-exception"); } @Test public void test1() { // empty } @Test(expectedExceptions = {OutOfMemoryError.class}) public void test2() { // empty } }
143
348
/* * Copyright 2010-2021 Australian Signals Directorate * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package au.gov.asd.tac.constellation.plugins.arrangements.uncollide.experimental; import au.gov.asd.tac.constellation.graph.GraphElementType; import au.gov.asd.tac.constellation.graph.GraphReadMethods; import au.gov.asd.tac.constellation.graph.schema.visual.concept.VisualConcept; import java.util.ArrayList; import java.util.List; /** * This class provides an abstract representation of a Tree designed to help * detect collisions quickly by dividing an X-dimensional space into 2^X * sub-spaces. The minimum value of X is 2. * * @author Nova */ abstract class AbstractTree { static final int MAX_OBJECTS = 50; static final int MAX_LEVELS = 4; final int xId; final int yId; final int rId; final GraphReadMethods wg; final int level; AbstractBoundingBox box; List<Integer> objects; AbstractTree[] nodes; /** * Constructor creates QuadTree and inserts all nodes * * @param graph The graph the QuadTree should be based on */ AbstractTree(final GraphReadMethods graph, final Dimensions d) { this.level = 0; this.objects = new ArrayList<>(); this.nodes = null; this.box = BoxFactory.create(graph, d); this.wg = graph; this.xId = wg.getAttribute(GraphElementType.VERTEX, VisualConcept.VertexAttribute.X.getName()); this.yId = wg.getAttribute(GraphElementType.VERTEX, VisualConcept.VertexAttribute.Y.getName()); this.rId = wg.getAttribute(GraphElementType.VERTEX, VisualConcept.VertexAttribute.NODE_RADIUS.getName()); } /** * Create a subtree of the current tree * * @param parent * @param box */ AbstractTree(AbstractTree parent, final AbstractBoundingBox box) { this.level = parent.level + 1; this.box = box; objects = new ArrayList<>(); nodes = null; // Inherit parent values for graph based variables. wg = parent.wg; xId = parent.xId; yId = parent.yId; rId = parent.rId; } /* * Splits the node into 2^X subnodes. * <p> * Divide the node into 2^X equal parts and initialise the 2^X subnodes with the new bounds. */ abstract void split(); /* * Determine which node the object belongs to. * <p> * -1 means object cannot completely fit within a child node and is part of the parent node. * <p> * Determine where an object belongs in the quadtree by determining which node the object can fit into. */ abstract int getIndex(final int vxId); abstract double getDelta(final int vertex1, final int vertex2); abstract double getCollisionDistance(final int vertex1, final int vertex2); /* * Insert the object into the tree. If the node exceeds the capacity, it will split and add * objects that fit to their corresponding nodes. */ private void insert(final int vxId) { if (nodes != null) { // if their are subnodes final int index = getIndex(vxId); // find the correct subnode if (index != -1) { // if it fits neatly in a subnode nodes[index].insert(vxId); // insert into that subnode return; } } // if it fits in this node objects.add(vxId); // add to list of objects if (objects.size() > MAX_OBJECTS && level < MAX_LEVELS) { if (nodes == null) { // if no subnodes then split split(); } int i = 0; while (i < objects.size()) { // For each object get the index and insert it into the subnode if it fits in one. If it fits in a subnode remove it from this list of objects. final int index = getIndex(objects.get(i)); if (index != -1) { nodes[index].insert(objects.remove(i)); } else { i++; } } } } /** * Insert all verticies in the graph into the tree. */ final void insertAll() { for (int position = 0; position < wg.getVertexCount(); position++) { insert(wg.getVertex(position)); } } /* * Return all objects that could collide with the given object. */ final List<Integer> getPossibleColliders(final List<Integer> colliders, final int vxId) { // Recursively find all child colliders... final int index = getIndex(vxId); if (index != -1 && nodes != null) { nodes[index].getPossibleColliders(colliders, vxId); } // ...and colliders at this level. colliders.addAll(objects); return colliders; } /** * Check the entire graph for collisions. * * @return boolean indicating whether the graph contains colliding verticies */ public final boolean hasCollision() { for (int position = 0; position < wg.getVertexCount(); position++) { if (nodeCollides(wg.getVertex(position))) { return true; } } return false; } /** * Returns boolean indicating whether or not the vertex collides with any * other verticies. Two verticies in exactly the same spot are not counted * as overlapping. * * @param subject The vertex to check for collisions. * @param padding The minimum distance between the vertex's edge and the * edges of each neighbor. * @return the number of collisions. */ final boolean nodeCollides(final int subject) { final List<Integer> possibles = new ArrayList<>(); getPossibleColliders(possibles, subject); // We need to deal with pathological cases such as everything at the same x,y point, // or everything co-linear. // We add a perturbation so points go different ways at different stages. for (final int possible : possibles) { if (subject != possible) { final double delta = getDelta(subject, possible); final double collisionDistance = getCollisionDistance(subject, possible); if (delta < collisionDistance) { return true; } } } return false; } /** * Check the subject for "twin" verticies * * A twin verticie is defined as a verticie that falls within twinThreshold * x (subject radius + twin radius + padding) of the subject. The average * radius is the average of the subject verticies radius and the potential * twins radius. * * @param subject The id of the vertex you wish to check for twins. * @param twinThreshold A scaling factor for the collision distance within * which the two noes are considered to be "twins". That is the distance * between them is so insignificant that we consider them in the same spot. * * @return A set of vertex ideas for verticies that are twins with the * subject */ public List<Integer> getTwins(final int subject, final double twinThreshold) { final List<Integer> possibles = new ArrayList<>(); getPossibleColliders(possibles, subject); final List<Integer> twins = new ArrayList<>(); for (final int possible : possibles) { if (subject != possible) { final double delta = getDelta(subject, possible); final double collisionDistance = getCollisionDistance(subject, possible); final double twinDistance = collisionDistance * twinThreshold; // The required distance for the nodes to be uncollided if (delta < twinDistance) { twins.add(possible); } } } return twins; } }
3,242
852
<reponame>ckamtsikis/cmssw // to see it compile #include "CondFormats/CastorObjects/interface/CastorPedestalWidths.h"
47
3,643
package io.vertx.example.core.execblocking; import io.vertx.core.DeploymentOptions; import io.vertx.example.util.Runner; /* * @author <a href="http://tfox.org"><NAME></a> */ public class ExecBlockingDedicatedPoolExample { // Convenience method so you can run it in your IDE public static void main(String[] args) { Runner.runExample(ExecBlockingExample.class, new DeploymentOptions() .setWorkerPoolName("dedicated-pool") .setMaxWorkerExecuteTime(120000) .setWorkerPoolSize(5)); } }
188
1,178
# Copyright 2020 Makani Technologies LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Functions to write controller gains into output files.""" import json import pprint from makani.control import system_types from makani.lib.python import c_helpers _WING_SERIAL_HELPER = c_helpers.EnumHelper('WingSerial', system_types) def WriteControllers(generating_file, filename, controllers): """Writes gain matrices to configuration file. Args: generating_file: File calling this function. filename: Full path to the gain matrix file to write. controllers: OrderedDict of airspeed values and gain matrices. """ docstring = (' """Returns control gain matrices' 'for any kite serial number."""') with open(filename, 'w') as f: f.write('"""Automatically generated controllers.' '\n' '\nThis file was generated by: ' + generating_file + '.' '\n"""' '\n' '\nfrom makani.control import control_types as m' '\n' '\n' '\ndef GetControllers(wing_serial):' '\n' + docstring) for i, c in enumerate(controllers): if i == 0: f.write('\n if wing_serial == m.%s:' % c['wing_serial']) else: f.write('\n' '\n elif wing_serial == m.%s:' % c['wing_serial']) for k, v in c.iteritems(): pyexcept = '' if k == 'B_flaps_to_pqr_min_airspeed': pyexcept = ' # pylint: disable=invalid-name' if k != 'wing_serial': f.write('\n %s = (%s' % (k, pyexcept)) f.write('\n ') f.write('\n '.join(pprint.pformat(v, width=74).splitlines())) f.write('\n )') f.write('\n' '\n else:') f.write('\n assert False, ') f.write("'wing_serial %d was not recognized' % wing_serial") f.write('\n' '\n return {') for k in controllers[0].iterkeys(): if k != 'wing_serial': f.write("\n '%s': (" "\n %s)," % (k, k)) f.write('\n }\n') def WriteControllersToJson(filename, controllers): """Writes gain matrices to json file. Args: filename: Full path to the json file to write. controllers: OrderedDict of airspeed values and gain matrices. """ with open(filename, 'w') as f: output_string = json.dumps(controllers, indent=2) f.write(output_string)
1,230
1,531
package com.sleekbyte.tailor.grammar; import static org.junit.Assert.assertThat; import com.sleekbyte.tailor.Tailor; import org.hamcrest.text.IsEmptyString; import org.junit.After; import org.junit.Before; import org.junit.Test; import org.junit.runner.RunWith; import org.mockito.runners.MockitoJUnitRunner; import java.io.ByteArrayOutputStream; import java.io.File; import java.io.PrintStream; import java.io.UnsupportedEncodingException; import java.nio.charset.Charset; @RunWith(MockitoJUnitRunner.class) public class GrammarTest { private static final String TEST_INPUT_DIR = "src/test/swift/com/sleekbyte/tailor/grammar/"; private File[] swiftFiles; @Before public void setUp() throws UnsupportedEncodingException { File inputDir = new File(TEST_INPUT_DIR); swiftFiles = inputDir.listFiles((File file, String name) -> name.endsWith(".swift")); ByteArrayOutputStream outContent = new ByteArrayOutputStream(); System.setOut(new PrintStream(outContent, false, Charset.defaultCharset().name())); } @After public void tearDown() { System.setOut(null); } @Test public void testGrammar() throws UnsupportedEncodingException { for (File swiftFile : swiftFiles) { ByteArrayOutputStream errContent = new ByteArrayOutputStream(); System.setErr(new PrintStream(errContent, false, Charset.defaultCharset().name())); String[] command = { "--debug", (TEST_INPUT_DIR + swiftFile.getName()) }; Tailor.main(command); assertThat(errContent.toString(Charset.defaultCharset().name()), IsEmptyString.isEmptyString()); System.setErr(null); } } }
654
14,668
<filename>chrome/browser/ash/policy/reporting/arc_app_install_event_log_manager.h // Copyright 2018 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef CHROME_BROWSER_ASH_POLICY_REPORTING_ARC_APP_INSTALL_EVENT_LOG_MANAGER_H_ #define CHROME_BROWSER_ASH_POLICY_REPORTING_ARC_APP_INSTALL_EVENT_LOG_MANAGER_H_ #include <memory> #include <set> #include <string> #include "chrome/browser/ash/policy/reporting/arc_app_install_event_log.h" #include "chrome/browser/ash/policy/reporting/arc_app_install_event_log_uploader.h" #include "chrome/browser/ash/policy/reporting/arc_app_install_event_logger.h" #include "chrome/browser/ash/policy/reporting/install_event_log_manager.h" #include "components/policy/proto/device_management_backend.pb.h" class Profile; namespace policy { // Owns an |ArcAppInstallEventLog| for log storage and an // |ArcAppInstallEventLogger| for log collection. The // |ArcAppInstallEventUploader| is passed to the constructor and must outlive // |this|. class ArcAppInstallEventLogManager : public InstallEventLogManagerBase, public ArcAppInstallEventLogger::Delegate, public ArcAppInstallEventLogUploader::Delegate { public: // All accesses to the |profile|'s app push-install event log file must use // the same |log_task_runner_wrapper| to ensure correct I/O serialization. // |uploader| must outlive |this|. ArcAppInstallEventLogManager(LogTaskRunnerWrapper* log_task_runner_wrapper, ArcAppInstallEventLogUploader* uploader, Profile* profile); // Posts a task to |log_task_runner_| that stores the log to file and destroys // |log_|. |log_| thus outlives |this| but any pending callbacks are canceled // by invalidating weak pointers. ~ArcAppInstallEventLogManager() override; // Clears all data related to the app-install event log for |profile|. Must // not be called while an |ArcAppInstallEventLogManager| exists for |profile|. // This method and any other accesses to the |profile|'s app push-install // event log must use the same |log_task_runner_wrapper| to ensure correct I/O // serialization. static void Clear(LogTaskRunnerWrapper* log_task_runner_wrapper, Profile* profile); // ArcAppInstallEventLogger::Delegate: void Add( const std::set<std::string>& packages, const enterprise_management::AppInstallReportLogEvent& event) override; void GetAndroidId( ArcAppInstallEventLogger::Delegate::AndroidIdCallback) const override; // ArcAppInstallEventLogUploader::Delegate: void SerializeForUpload( ArcAppInstallEventLogUploader::Delegate::SerializationCallback callback) override; void OnUploadSuccess() override; private: // Once created, |ArcLog| runs in the background and must be accessed and // eventually destroyed via |log_task_runner_|. |ArcLog| outlives its parent // and stores the current log to disk in its destructor. // TODO(crbub/1092387): Remove this class to handle sequence checking in // ArcAppInstallEventLog. class ArcLog : public InstallEventLogManagerBase::InstallLog< enterprise_management::AppInstallReportLogEvent, ArcAppInstallEventLog> { public: ArcLog(); // Stores the current log to disk. ~ArcLog() override; // Serializes the log to a protobuf for upload. std::unique_ptr<enterprise_management::AppInstallReportRequest> Serialize(); private: // Ensures that methods are not called from the wrong thread. SEQUENCE_CHECKER(sequence_checker_); }; // |AppLogUpload| is owned by |owner_| and |owner_| outlives it. class AppLogUpload : public InstallEventLogManagerBase::LogUpload { public: explicit AppLogUpload(ArcAppInstallEventLogManager* owner); ~AppLogUpload() override; void StoreLog() override; void RequestUploadForUploader() override; private: ArcAppInstallEventLogManager* owner_; }; // Uploads logs to the server. ArcAppInstallEventLogUploader* const uploader_; // Helper that owns the log store. Once created, must only be accessed via // |log_task_runner_|. Outlives |this| and ensures the extension log is stored // to disk in its destructor. std::unique_ptr<ArcLog> log_; // Handles storing the logs and preparing them for upload. std::unique_ptr<AppLogUpload> app_log_upload_; // Collects log events and passes them to |this|. std::unique_ptr<ArcAppInstallEventLogger> logger_; }; } // namespace policy #endif // CHROME_BROWSER_ASH_POLICY_REPORTING_ARC_APP_INSTALL_EVENT_LOG_MANAGER_H_
1,546
380
/* * oxAuth is available under the MIT License (2008). See http://opensource.org/licenses/MIT for full text. * * Copyright (c) 2014, Gluu */ package org.gluu.oxauth.model.clientinfo; import org.gluu.oxauth.model.error.IErrorType; /** * @author <NAME> Date: 07.19.2012 */ public enum ClientInfoErrorResponseType implements IErrorType { INVALID_REQUEST("invalid_request"), INVALID_TOKEN("invalid_token"); private final String paramName; private ClientInfoErrorResponseType(String paramName) { this.paramName = paramName; } public static ClientInfoErrorResponseType fromString(String param) { if (param != null) { for (ClientInfoErrorResponseType err : ClientInfoErrorResponseType.values()) { if (param.equals(err.paramName)) { return err; } } } return null; } @Override public String toString() { return paramName; } @Override public String getParameter() { return paramName; } }
431
310
{ "name": "Hot Rod Deville III 410", "description": "A guitar amp.", "url": "https://www.fender.com/guitar-amplifiers/contemporary/hot-rod-deville-iii-410/2230100000.html" }
69
864
/********************************************************************************************************************** This file is part of the Control Toolbox (https://github.com/ethz-adrl/control-toolbox), copyright by ETH Zurich. Licensed under the BSD-2 license (see LICENSE file in main directory) **********************************************************************************************************************/ #pragma once #include <ct/core/types/StateVector.h> #include <ct/core/types/ControlVector.h> namespace ct { namespace core { //! interface class for a general switched linear system or linearized system /*! * Defines the interface for a switched linear system * * \tparam STATE_DIM size of state vector * \tparam CONTROL_DIM size of input vector */ template <size_t STATE_DIM, size_t CONTROL_DIM, typename SCALAR = double> class SwitchedLinearSystem : public LinearSystem<STATE_DIM, CONTROL_DIM, SCALAR> { public: EIGEN_MAKE_ALIGNED_OPERATOR_NEW typedef typename std::shared_ptr<LinearSystem<STATE_DIM, CONTROL_DIM, SCALAR>> LinearSystemPtr; typedef Switched<LinearSystemPtr> SwitchedLinearSystems; typedef ControlledSystem<STATE_DIM, CONTROL_DIM, SCALAR> Base; typedef typename Base::time_t time_t; typedef StateVector<STATE_DIM, SCALAR> state_vector_t; //!< state vector type typedef ControlVector<CONTROL_DIM, SCALAR> control_vector_t; //!< input vector type typedef StateMatrix<STATE_DIM, SCALAR> state_matrix_t; //!< state Jacobian type typedef StateControlMatrix<STATE_DIM, CONTROL_DIM, SCALAR> state_control_matrix_t; //!< input Jacobian type //! default constructor /*! * @param type system type */ SwitchedLinearSystem(const SwitchedLinearSystems& switchedLinearSystems, const ContinuousModeSequence& continuousModeSequence, const ct::core::SYSTEM_TYPE& type = ct::core::SYSTEM_TYPE::GENERAL) : LinearSystem<STATE_DIM, CONTROL_DIM, SCALAR>(type), switchedLinearSystems_(switchedLinearSystems), continuousModeSequence_(continuousModeSequence) { } //! copy constructor SwitchedLinearSystem(const SwitchedLinearSystem& arg) : LinearSystem<STATE_DIM, CONTROL_DIM, SCALAR>(arg), continuousModeSequence_(arg.continuousModeSequence_) { switchedLinearSystems_.clear(); for (auto& subSystem : arg.switchedLinearSystems_) { switchedLinearSystems_.emplace_back(subSystem->clone()); } } //! destructor virtual ~SwitchedLinearSystem(){}; //! deep cloning virtual SwitchedLinearSystem<STATE_DIM, CONTROL_DIM, SCALAR>* clone() const override { return new SwitchedLinearSystem(*this); }; using LinearSystem<STATE_DIM, CONTROL_DIM, SCALAR>::computeControlledDynamics; virtual const state_matrix_t& getDerivativeState(const state_vector_t& x, const control_vector_t& u, const time_t t = time_t(0.0)) override { auto mode = continuousModeSequence_.getPhaseFromTime(t); return switchedLinearSystems_[mode]->getDerivativeState(x, u, t); }; virtual const state_control_matrix_t& getDerivativeControl(const state_vector_t& x, const control_vector_t& u, const time_t t = time_t(0.0)) override { auto mode = continuousModeSequence_.getPhaseFromTime(t); return switchedLinearSystems_[mode]->getDerivativeControl(x, u, t); }; private: SwitchedLinearSystems switchedLinearSystems_; //!< Switched linear system container ContinuousModeSequence continuousModeSequence_; //!< the prespecified mode sequence }; } }
1,328
3,508
<reponame>Anshul1507/Leetcode package com.fishercoder.solutions; import java.util.HashMap; import java.util.Map; public class _677 { public static class Solution1 { public static class MapSum { Map<String, Integer> map; /** * Initialize your data structure here. */ public MapSum() { map = new HashMap<>(); } public void insert(String key, int val) { map.put(key, val); } public int sum(String prefix) { int sum = 0; for (String key : map.keySet()) { if (key.startsWith(prefix)) { sum += map.get(key); } } return sum; } } } }
475
348
{"nom":"Rhinau","circ":"5ème circonscription","dpt":"Bas-Rhin","inscrits":2197,"abs":1434,"votants":763,"blancs":63,"nuls":22,"exp":678,"res":[{"nuance":"LR","nom":"<NAME>","voix":342},{"nuance":"REG","nom":"<NAME>","voix":336}]}
92
3,172
# Third party code # # The following code are copied or modified from: # https://github.com/ray-project/ray/blob/master/python/ray/rllib/utils/filter.py import numpy as np class SharedNoiseTable(object): """Shared noise table used by learner and actor. Learner and actor will create a same noise table by passing the same seed. With the same noise table, learner and actor can communicate the noises by index of noise table instead of numpy array of noises. """ def __init__(self, noise_size, seed=1024): self.noise_size = noise_size self.seed = seed self.noise = self._create_noise() def _create_noise(self): noise = np.random.RandomState(self.seed).randn(self.noise_size).astype( np.float32) return noise def get(self, i, dim): return self.noise[i:i + dim] def sample_index(self, dim): return np.random.randint(0, len(self.noise) - dim + 1)
363
708
from __future__ import division import numpy as np import scipy as sp import pylab as py import random from scipy.cluster.vq import kmeans2,whiten ''' the scipy contain a kmeans,so there is no need to write one bu for the convenience of using,I pack it with my code I don't know how to translate the input space to whitened space so If you need please add white reference to http://blog.pluskid.org/?p=17 ''' class KMEANSC: def __init__(self,X,K): self.X=np.array(X) self.K=np.array(K) self.N=X.shape[0] self.labels = np.zeros(self.N, dtype=int) self.centroids=np.array(random.sample(X, K)) self.J=self.calcJ() pass def calcJ(self): sum=0 for i in range(self.N): sum += np.sum((self.X[i]-self.centroids[self.labels[i]])**2) return sum def distmat(self,X,Y): ''' return the distance for centroids ''' dm = np.zeros((X.shape[0],Y.shape[0])); for i in range(X.shape[0]): for j in range(Y.shape[0]): dm[i][j]=np.sum(((X[i]-Y[j])**2)) return dm def train(self,maxiter=100,threshold=0.1): ''' each train change everything ''' iter=0 cJ=0 while True: distmats = self.distmat(self.X,self.centroids) self.labels = distmats.argmin(axis=1) for j in range(self.K): idx_j = (self.labels == j).nonzero() self.centroids[j] = self.X[idx_j].mean(axis=0) nJ=self.calcJ() cJ=self.J-nJ self.J=nJ if (cJ<threshold and iter!=0): break iter+=1 if (iter>maxiter): break def result(self): return self.centroids,self.labels,self.J
715
14,668
<filename>media/midi/message_util.h<gh_stars>1000+ // Copyright 2013 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef MEDIA_MIDI_MESSAGE_UTIL_H_ #define MEDIA_MIDI_MESSAGE_UTIL_H_ #include <stddef.h> #include <stdint.h> #include <vector> #include "media/midi/midi_export.h" namespace midi { // Returns the length of a MIDI message in bytes. Never returns 4 or greater. // Returns 0 if |status_byte| is: // - not a valid status byte, namely data byte. // - MIDI System Exclusive message. // - End of System Exclusive message. // - Reserved System Common Message (0xf4, 0xf5) MIDI_EXPORT size_t GetMessageLength(uint8_t status_byte); // Checks if the specified byte is a valid data byte. MIDI_EXPORT bool IsDataByte(uint8_t data); // Checks if the specified byte is a valid system real time message. MIDI_EXPORT bool IsSystemRealTimeMessage(uint8_t data); // Checks if the specified byte is a valid system message. MIDI_EXPORT bool IsSystemMessage(uint8_t data); // Checks if |data| fulfills the requirements of MidiOutput.send API that is // defined in the Web MIDI spec. // - |data| must be any number of complete MIDI messages (data abbreviation // called "running status" is disallowed). // - 1-byte MIDI realtime messages can be placed at any position of |data|. MIDI_EXPORT bool IsValidWebMIDIData(const std::vector<uint8_t>& data); const uint8_t kSysExByte = 0xf0; const uint8_t kEndOfSysExByte = 0xf7; const uint8_t kSysMessageBitMask = 0xf0; const uint8_t kSysMessageBitPattern = 0xf0; const uint8_t kSysRTMessageBitMask = 0xf8; const uint8_t kSysRTMessageBitPattern = 0xf8; } // namespace midi #endif // MEDIA_MIDI_MESSAGE_UTIL_H_
598
390
/* * Copyright 2021 <NAME> * * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ package org.tinylog.util; import java.nio.charset.Charset; import java.nio.charset.spi.CharsetProvider; import java.util.Iterator; import java.util.List; import java.util.Objects; import java.util.stream.Collectors; import java.util.stream.Stream; import org.tinylog.writers.JsonWriterTest; /** * Service factory for {@link CustomTestCharset}. */ public class CustomTestCharsetProvider extends CharsetProvider { /** * All required charsets for {@link JsonWriterTest.IllegalCharsetTest}. */ public static final List<Charset> CHARSETS = Stream.of('\n', '\r', ' ', '\t', ',', '[', ']') .map(CustomTestCharset::new) .collect(Collectors.toList()); /** */ public CustomTestCharsetProvider() { } @Override public Iterator<Charset> charsets() { return CHARSETS.iterator(); } @Override public Charset charsetForName(final String charsetName) { return CHARSETS.stream() .filter(charset -> Objects.equals(charsetName, charset.name())) .findFirst() .orElse(null); } }
514
369
<filename>module-gui/gui/widgets/status-bar/Lock.cpp // Copyright (c) 2017-2021, Mudita <NAME>.o.o. All rights reserved. // For licensing, see https://github.com/mudita/MuditaOS/LICENSE.md #include "Lock.hpp" #include "Style.hpp" namespace gui::status_bar { constexpr auto lock = "locked_status"; Lock::Lock(Item *parent, uint32_t x, uint32_t y) : StatusBarWidgetBase(parent, x, y, 0, 0) { set(lock, style::status_bar::imageTypeSpecifier); } }; // namespace gui::status_bar
195
575
// Copyright 2019 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef CHROME_BROWSER_AUTOFILL_MOCK_ADDRESS_ACCESSORY_CONTROLLER_H_ #define CHROME_BROWSER_AUTOFILL_MOCK_ADDRESS_ACCESSORY_CONTROLLER_H_ #include "base/macros.h" #include "chrome/browser/autofill/address_accessory_controller.h" #include "components/autofill/core/browser/ui/accessory_sheet_data.h" #include "components/autofill/core/browser/ui/accessory_sheet_enums.h" #include "testing/gmock/include/gmock/gmock.h" class MockAddressAccessoryController : public autofill::AddressAccessoryController { public: MockAddressAccessoryController(); ~MockAddressAccessoryController() override; MOCK_METHOD(void, RegisterFillingSourceObserver, (FillingSourceObserver), (override)); MOCK_METHOD(base::Optional<autofill::AccessorySheetData>, GetSheetData, (), (const, override)); MOCK_METHOD(void, OnFillingTriggered, (autofill::FieldGlobalId, const autofill::UserInfo::Field&), (override)); MOCK_METHOD(void, OnOptionSelected, (autofill::AccessoryAction selected_action), (override)); MOCK_METHOD(void, OnToggleChanged, (autofill::AccessoryAction toggled_action, bool enabled), (override)); MOCK_METHOD(void, RefreshSuggestions, (), (override)); private: DISALLOW_COPY_AND_ASSIGN(MockAddressAccessoryController); }; #endif // CHROME_BROWSER_AUTOFILL_MOCK_ADDRESS_ACCESSORY_CONTROLLER_H_
708
367
# This module has an external summary
8