max_stars_count
int64
301
224k
text
stringlengths
6
1.05M
token_count
int64
3
727k
764
{"symbol": "AWX","address": "0x1EFdfC6146cAd8909817284AE99325EF1cAF623e","overview":{"en": ""},"email": "<EMAIL>","website": "https://aurus.io/","state": "NORMAL","links": {"blog": "https://medium.com/aurus-blog","twitter": "https://twitter.com/AurusOfficial","telegram": "https://t.me/AurusOfficial","github": ""}}
114
1,056
<gh_stars>1000+ /* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /* * DepResolverFactoryImplTest.java * JUnit based test * * Created on January 18, 2006, 7:28 PM */ package org.netbeans.modules.xml.retriever.catalog.impl; import java.io.File; import java.io.IOException; import java.io.OutputStream; import java.net.URI; import java.net.URISyntaxException; import java.net.URL; import java.util.logging.Formatter; import java.util.logging.Level; import javax.swing.text.Document; import junit.framework.*; import java.util.logging.Logger; import java.util.logging.SimpleFormatter; import java.util.logging.StreamHandler; import org.netbeans.editor.BaseDocument; import org.netbeans.modules.xml.retriever.catalog.Utilities; import org.netbeans.modules.xml.xam.locator.CatalogModelException; import org.netbeans.modules.xml.retriever.catalog.CatalogWriteModel; import org.netbeans.modules.xml.retriever.catalog.model.TestUtil; import org.netbeans.modules.xml.xam.ModelSource; import org.netbeans.modules.xml.xam.locator.CatalogModel; import org.openide.cookies.EditorCookie; import org.openide.filesystems.FileObject; import org.openide.filesystems.FileUtil; import org.openide.loaders.DataObject; import org.openide.loaders.DataObjectNotFoundException; import org.openide.util.Lookup; import org.openide.util.lookup.Lookups; /** * * @author girix */ public class CatalogModelTest extends TestCase { static { TestUtil.registerXMLKit(); } public CatalogModelTest(String testName) { super(testName); } protected void setUp() throws Exception { } protected void tearDown() throws Exception { } public static Test suite() { TestSuite suite = new TestSuite(CatalogModelTest.class); return suite; } public void testDepResolver() throws URISyntaxException, CatalogModelException, IOException { Logger logger = Logger.getLogger(CatalogModelTest.class.getName()); logger.setLevel(Level.ALL); StreamHandler sh = new MyHandler(System.out, new SimpleFormatter()); sh.setLevel(logger.getLevel()); //logger.addHandler(sh); CatalogFileWrapperDOMImpl.TEST_ENVIRONMENT = true; File catFile = new File(System.getProperty("java.io.tmpdir")+File.separator+CatalogWriteModel.PUBLIC_CATALOG_FILE_NAME+CatalogWriteModel.CATALOG_FILE_EXTENSION+".girish"); catFile.delete(); catFile.createNewFile(); FileObject catFO = FileUtil.toFileObject(FileUtil.normalizeFile(catFile)); URL url = getClass().getResource("dummyFile.txt"); FileObject peerfo = FileUtil.toFileObject(new File(url.toURI()).getAbsoluteFile()); System.out.println(catFile); CatalogWriteModel drz = new MyCatalogWriteModel(catFO); //CatalogWriteModel drz = new MyCatalogWriteModel(new File(System.getProperty("java.io.tmpdir"))); drz.addURI(new URI("dummy/dummy"), peerfo); int length = drz.getCatalogEntries().size(); assertEquals(1, length); //System.out.println("%%%%"+drz.getModelSource(new URI("dummy/dummy")).getFileObject()); //System.out.println("$$$$"+LSResourceResolverFactory.getDefault().resolveResource(null, null, null, "dummy/dummy", url.toURI().toString()).getSystemId()); //assertTrue(LSResourceResolverFactory.getDefault().resolveResource(null, null, null, "dummy/dummy", url.toURI().toString()).getSystemId().endsWith("dummyFile.txt")); FileObject fob = (FileObject) drz.getModelSource(new URI("dummy/dummy")).getLookup().lookup(FileObject.class); assertNotNull(fob); drz.removeURI(new URI("dummy/dummy")); length = drz.getCatalogEntries().size(); assertEquals(0, length); } class MyCatalogWriteModel extends CatalogWriteModelImpl { MyCatalogWriteModel(File file) throws IOException{ super(file); } MyCatalogWriteModel(FileObject fo) throws IOException{ super(fo); } /** * This method could be overridden by the Unit testcase to return a special * ModelSource object for a FileObject with custom impl of classes added to the lookup. * This is optional if both getDocument(FO) and createCatalogModel(FO) are overridden. */ protected ModelSource createModelSource(final FileObject thisFileObj, boolean editable) throws CatalogModelException{ assert thisFileObj != null : "Null file object."; final CatalogModel catalogModel = createCatalogModel(thisFileObj); final DataObject dobj; try { dobj = DataObject.find(thisFileObj); } catch (DataObjectNotFoundException ex) { throw new CatalogModelException(ex); } Lookup proxyLookup = Lookups.proxy( new Lookup.Provider() { public Lookup getLookup() { Document document = null; Logger l = Logger.getLogger(getClass().getName()); document = getDocument(thisFileObj); return Lookups.fixed(new Object[] { thisFileObj, document, dobj, catalogModel }); } } ); return new ModelSource(proxyLookup, editable); } private Document getDocument(FileObject fo){ Document result = null; try { DataObject dObject = DataObject.find(fo); EditorCookie ec = (EditorCookie)dObject.getCookie(EditorCookie.class); Document doc = ec.openDocument(); if(doc instanceof BaseDocument) return doc; result = new org.netbeans.editor.BaseDocument(true, fo.getMIMEType()); String str = doc.getText(0, doc.getLength()); result.insertString(0,str,null); } catch (Exception dObjEx) { return null; } return result; } } class MyHandler extends StreamHandler{ public MyHandler(OutputStream out, Formatter fmt){ super(out, fmt); } public void publish(java.util.logging.LogRecord record) { super.publish(record); flush(); } } }
3,064
593
<filename>exploitable/tests/testBranchAv.c int main(int argc, char *argv[]) { #if defined(__arm__) void (*p)() = (void*)0xFFFFFFFF; p(); return 0; #else asm ("call *0xFFFFFFF"); #endif return 0; }
102
2,151
// Copyright 2017 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "components/feature_engagement/internal/test/event_util.h" #include "components/feature_engagement/internal/proto/event.pb.h" #include "testing/gtest/include/gtest/gtest.h" namespace feature_engagement { namespace test { void SetEventCountForDay(Event* event, uint32_t day, uint32_t count) { Event_Count* event_count = event->add_events(); event_count->set_day(day); event_count->set_count(count); } void VerifyEventCount(const Event* event, uint32_t day, uint32_t count) { bool found_day = false; for (const auto& event_count : event->events()) { if (event_count.day() == day) { EXPECT_FALSE(found_day); found_day = true; EXPECT_EQ(count, event_count.count()); } } EXPECT_TRUE(found_day); } void VerifyEventsEqual(const Event* a, const Event* b) { if (!a || !b) { // If one of the events are nullptr, both should be nullptr. ASSERT_EQ(a, b); return; } EXPECT_EQ(a->name(), b->name()); EXPECT_EQ(a->events_size(), b->events_size()); for (int i = 0; i < a->events_size(); ++i) { VerifyEventCount(b, a->events(i).day(), a->events(i).count()); } } } // namespace test } // namespace feature_engagement
501
763
<filename>projects/batfish/src/test/java/org/batfish/representation/juniper/FwFromFragmentOffsetTest.java package org.batfish.representation.juniper; import static org.junit.Assert.assertEquals; import com.google.common.collect.ImmutableSet; import org.batfish.datamodel.HeaderSpace; import org.batfish.datamodel.SubRange; import org.junit.Test; /** Class for {@link FwFromFragmentOffset} */ public class FwFromFragmentOffsetTest { @Test public void testToHeaderspace_notExcept() { FwFromFragmentOffset from = new FwFromFragmentOffset(new SubRange(1, 2), false); assertEquals( from.toHeaderspace(), HeaderSpace.builder().setFragmentOffsets(ImmutableSet.of(new SubRange(1, 2))).build()); } @Test public void testToHeaderspace_except() { FwFromFragmentOffset from = new FwFromFragmentOffset(new SubRange(1, 2), true); assertEquals( from.toHeaderspace(), HeaderSpace.builder().setNotFragmentOffsets(ImmutableSet.of(new SubRange(1, 2))).build()); } }
367
479
<gh_stars>100-1000 # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from __future__ import print_function import os import pwd import re import sys import time from twitter.common import app from apache.thermos.cli.common import get_path_detector from apache.thermos.common.ckpt import CheckpointDispatcher from apache.thermos.monitoring.detector import TaskDetector from gen.apache.thermos.ttypes import ProcessState, TaskState @app.command @app.command_option("--verbosity", default=0, dest='verbose', type='int', help="Display more verbosity") @app.command_option("--only", default=None, dest='only', type='choice', choices=('active', 'finished'), help="Display only tasks of this type.") def status(args, options): """Get the status of task(s). Usage: thermos status [options] [task_name(s) or task_regexp(s)] """ path_detector = get_path_detector() def format_task(detector, task_id): checkpoint_filename = detector.get_checkpoint(task_id) checkpoint_stat = os.stat(checkpoint_filename) try: checkpoint_owner = pwd.getpwuid(checkpoint_stat.st_uid).pw_name except KeyError: checkpoint_owner = 'uid:%s' % checkpoint_stat.st_uid print(' %-20s [owner: %8s]' % (task_id, checkpoint_owner), end='') if options.verbose == 0: print() if options.verbose > 0: state = CheckpointDispatcher.from_file(checkpoint_filename) if state is None or state.header is None: print(' - checkpoint stream CORRUPT or outdated format') return print(' state: %8s' % TaskState._VALUES_TO_NAMES.get(state.statuses[-1].state, 'Unknown'), end='') print(' start: %25s' % time.asctime(time.localtime(state.header.launch_time_ms / 1000.0))) if options.verbose > 1: print(' user: %s' % state.header.user, end='') if state.header.ports: print(' ports: %s' % ' '.join('%s -> %s' % (key, val) for key, val in state.header.ports.items())) else: print(' ports: None') print(' sandbox: %s' % state.header.sandbox) if options.verbose > 2: print(' process table:') for process, process_history in state.processes.items(): print(' - %s runs: %s' % (process, len(process_history)), end='') last_run = process_history[-1] print(' last: pid=%s, rc=%s, finish:%s, state:%s' % ( last_run.pid or 'None', last_run.return_code if last_run.return_code is not None else '', time.asctime(time.localtime(last_run.stop_time)) if last_run.stop_time else 'None', ProcessState._VALUES_TO_NAMES.get(last_run.state, 'Unknown'))) print() matchers = map(re.compile, args or ['.*']) active = [] finished = [] for root in path_detector.get_paths(): detector = TaskDetector(root) active.extend((detector, t_id) for _, t_id in detector.get_task_ids(state='active') if any(pattern.match(t_id) for pattern in matchers)) finished.extend((detector, t_id)for _, t_id in detector.get_task_ids(state='finished') if any(pattern.match(t_id) for pattern in matchers)) found = False if options.only is None or options.only == 'active': if active: print('Active tasks:') found = True for detector, task_id in active: format_task(detector, task_id) print() if options.only is None or options.only == 'finished': if finished: print('Finished tasks:') found = True for detector, task_id in finished: format_task(detector, task_id) print() if not found: print('No tasks found.') sys.exit(1)
1,624
4,452
<gh_stars>1000+ { "scripts": { "start:dev": "run-p build:*:watch start:docusaurus", "start:docusaurus": "docusaurus-start", "build:styles": "sass sass/main.scss static/css/custom.css", "build:styles:watch": "sass sass/main.scss static/css/custom.css --watch", "build": "run-s build:styles build:docusaurus", "build:docusaurus": "docusaurus-build", "version": "docusaurus-version", "rename-version": "docusaurus-rename-version", "publish": "firebase deploy" }, "devDependencies": { "docusaurus": "^1.14.6", "firebase-tools": "^8.19.0", "npm-run-all": "^4.1.5", "sass": "^1.30.0" }, "version": "4.0.0" }
300
667
/** * Tencent is pleased to support the open source community by making Tseer available. * * Copyright (C) 2018 THL A29 Limited, a Tencent company. All rights reserved. * * Licensed under the BSD 3-Clause License (the "License"); you may not use this file except * in compliance with the License. You may obtain a copy of the License at * * https://opensource.org/licenses/BSD-3-Clause * * Unless required by applicable law or agreed to in writing, software distributed * under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR * CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ #ifndef __TSEER_API_CONN_H_ #define __TSEER_API_CONN_H_ /********************************************************* * 文件封装了使用TUP访问TARS服务的操作 *********************************************************/ #include <time.h> #include <string> namespace Tseerapi { class Conn { public: //模调数据上报到Agent static int NodeStatSendAgent(const char *sendBuff, unsigned int sendLen, int timeOut, std::string &errMsg); //与Agent通信获取节点信息 static int QueryAndRecvRouterFromAgent(const char *sendBuff, unsigned int sendLen, int timeOut, char *recvBuff, unsigned int& recvSize, std::string &errMsg); //与Registry通信获取节点信息 static int QueryAndRecvRouterFromRegistry(const char *sendBuff, unsigned int sendLen, int timeOut, char *recvBuff, unsigned int& recvSize, const std::string &registryIp, std::string &errMsg); }; } #endif
558
332
#ifndef SLACK_H #define SLACK_H #include <stdbool.h> #include "json-actor-boxed.h" #include "common.h" #include "logconf.h" /* see specs/slack/ for specs */ #include "specs-code/slack/one-specs.h" #define SLACK_BASE_API_URL "https://slack.com/api" struct slack; /* forward declaration */ /** @todo generate as specs */ enum slack_sm_types { SLACK_SOCKETMODE_TYPE_NONE = 0, /* EVENTS API ENUMS */ SLACK_SOCKETMODE_TYPE_MESSAGE, /* INTERACTION ENUMS */ SLACK_SOCKETMODE_TYPE_BLOCK_ACTIONS, SLACK_SOCKETMODE_TYPE_MESSAGE_ACTIONS, SLACK_SOCKETMODE_TYPE_VIEW_CLOSED, SLACK_SOCKETMODE_TYPE_VIEW_SUBMISSION }; typedef void (*slack_on_event)(struct slack *client, const char payload[], size_t len); struct slack *slack_config_init(const char config_file[]); void slack_cleanup(struct slack *client); void slack_set_on_idle(struct slack *client, slack_on_event callback); void slack_set_on_hello(struct slack *client, slack_on_event callback); void slack_set_on_message(struct slack *client, slack_on_event callback); void slack_set_on_block_actions(struct slack *client, slack_on_event callback); void slack_set_on_message_actions(struct slack *client, slack_on_event callback); void slack_set_on_view_closed(struct slack *client, slack_on_event callback); void slack_set_on_view_submission(struct slack *client, slack_on_event callback); void slack_run(struct slack *client); ORCAcode slack_apps_connections_open(struct slack *client, struct sized_buffer *ret); ORCAcode slack_auth_test(struct slack *client, struct sized_buffer *ret); ORCAcode slack_chat_post_message(struct slack *client, struct slack_chat_post_message_params *params, struct sized_buffer *ret); ORCAcode slack_users_info(struct slack *client, struct slack_users_info_params *params, struct sized_buffer *ret); typedef enum slack_event_scheduler { /** this event has been handled */ SLACK_EVENT_IGNORE, /** handle this event in main thread */ SLACK_EVENT_MAIN_THREAD, /** handle this event in a worker thread */ SLACK_EVENT_WORKER_THREAD } slack_event_scheduler_t; typedef slack_event_scheduler_t (*slack_on_scheduler)( struct slack *client, struct sized_buffer *event_data, enum slack_sm_types type); void slack_set_event_scheduler(struct slack *client, slack_on_scheduler callback); #endif /* SLACK_H */
1,122
861
<gh_stars>100-1000 /* * Copyright 2016-2020 <NAME> * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package hu.akarnokd.rxjava3.interop; import static org.junit.Assert.*; import static org.mockito.ArgumentMatchers.anyLong; import static org.mockito.Mockito.*; import java.util.List; import java.util.concurrent.atomic.*; import org.junit.Test; import org.reactivestreams.Subscription; import io.reactivex.rxjava3.exceptions.ProtocolViolationException; import io.reactivex.rxjava3.internal.subscriptions.BooleanSubscription; import io.reactivex.rxjava3.plugins.RxJavaPlugins; public class SubscriptionHelperTest { @Test public void checkEnum() { TestHelper.checkEnum(SubscriptionHelper.class); } @Test public void validateNullThrows() { List<Throwable> errors = TestHelper.trackPluginErrors(); try { SubscriptionHelper.validate(null, null); TestHelper.assertError(errors, 0, NullPointerException.class, "next is null"); } finally { RxJavaPlugins.reset(); } } @Test public void cancelNoOp() { SubscriptionHelper.CANCELLED.cancel(); } @Test public void cancelRace() { for (int i = 0; i < TestHelper.RACE_DEFAULT_LOOPS; i++) { final AtomicReference<Subscription> atomicSubscription = new AtomicReference<>(); Runnable r = new Runnable() { @Override public void run() { SubscriptionHelper.cancel(atomicSubscription); } }; TestHelper.race(r, r); } } @Test public void invalidDeferredRequest() { AtomicReference<Subscription> atomicSubscription = new AtomicReference<>(); AtomicLong r = new AtomicLong(); List<Throwable> errors = TestHelper.trackPluginErrors(); try { SubscriptionHelper.deferredRequest(atomicSubscription, r, -99); TestHelper.assertError(errors, 0, IllegalArgumentException.class, "n > 0 required but it was -99"); } finally { RxJavaPlugins.reset(); } } @Test public void deferredRace() { for (int i = 0; i < TestHelper.RACE_DEFAULT_LOOPS; i++) { final AtomicReference<Subscription> atomicSubscription = new AtomicReference<>(); final AtomicLong r = new AtomicLong(); final AtomicLong q = new AtomicLong(); final Subscription a = new Subscription() { @Override public void request(long n) { q.addAndGet(n); } @Override public void cancel() { } }; Runnable r1 = new Runnable() { @Override public void run() { SubscriptionHelper.deferredSetOnce(atomicSubscription, r, a); } }; Runnable r2 = new Runnable() { @Override public void run() { SubscriptionHelper.deferredRequest(atomicSubscription, r, 1); } }; TestHelper.race(r1, r2); assertSame(a, atomicSubscription.get()); assertEquals(1, q.get()); assertEquals(0, r.get()); } } @Test public void setOnceAndRequest() { AtomicReference<Subscription> ref = new AtomicReference<>(); Subscription sub = mock(Subscription.class); assertTrue(SubscriptionHelper.setOnce(ref, sub, 1)); verify(sub).request(1); verify(sub, never()).cancel(); List<Throwable> errors = TestHelper.trackPluginErrors(); try { sub = mock(Subscription.class); assertFalse(SubscriptionHelper.setOnce(ref, sub, 1)); verify(sub, never()).request(anyLong()); verify(sub).cancel(); TestHelper.assertError(errors, 0, ProtocolViolationException.class); } finally { RxJavaPlugins.reset(); } } @Test public void reportDisposableSet() throws Throwable { TestHelper.withErrorTracking(errors -> { SubscriptionHelper.validate(new BooleanSubscription(), new BooleanSubscription()); TestHelper.assertError(errors, 0, ProtocolViolationException.class); }); } @Test public void validate() { assertTrue(SubscriptionHelper.validate(null, new BooleanSubscription())); } }
2,155
354
<filename>libs/libpeconv/libpeconv/include/peconv/buffer_util.h #pragma once #include <Windows.h> namespace peconv { //validates pointers, checks if the particular field is inside the given buffer bool validate_ptr(const LPVOID buffer_bgn, SIZE_T buffer_size, const LPVOID field_bgn, SIZE_T field_size); //----------------------------------------------------------------------------------- // // supported buffers: // typedef PBYTE UNALIGNED_BUF; // not aligned to the beginning of section typedef PBYTE ALIGNED_BUF; //always starting from the beginning of the new section // // alloc/free unaligned buffers: // //allocates a buffer that does not have to start from the beginning of the section UNALIGNED_BUF alloc_unaligned(size_t buf_size); //frees buffer allocated by alloc_unaligned: void free_unaligned(UNALIGNED_BUF section_buffer); // // alloc/free aligned buffers: // //allocates buffer starting from the beginning of the section (this function is a wrapper for VirtualAlloc) ALIGNED_BUF alloc_aligned(size_t buffer_size, DWORD protect, ULONGLONG desired_base=NULL); //frees buffer allocated by alloc_alligned: bool free_aligned(ALIGNED_BUF buffer, size_t buffer_size=0); //PE buffers (wrappers) ALIGNED_BUF alloc_pe_buffer(size_t buffer_size, DWORD protect, ULONGLONG desired_base=NULL); // Free loaded module (wrapper) bool free_pe_buffer(ALIGNED_BUF buffer, size_t buffer_size=0); UNALIGNED_BUF alloc_pe_section(size_t buf_size); void free_pe_section(UNALIGNED_BUF section_buffer); }; //namespace peconv
526
903
package org.develnext.jphp.core.tokenizer.token.expr.value.macro; import org.develnext.jphp.core.tokenizer.TokenType; import org.develnext.jphp.core.tokenizer.TokenMeta; public class FileMacroToken extends MacroToken { public FileMacroToken(TokenMeta meta) { super(meta, TokenType.T_FILE); } }
117
1,444
<reponame>nnadams/mage package mage.cards.p; import java.util.UUID; import mage.abilities.Ability; import mage.abilities.effects.Effect; import mage.abilities.effects.OneShotEffect; import mage.abilities.effects.common.SacrificeEffect; import mage.cards.CardImpl; import mage.cards.CardSetInfo; import mage.cards.CardsImpl; import mage.choices.ChooseFriendsAndFoes; import mage.constants.CardType; import mage.constants.Outcome; import mage.constants.Zone; import mage.filter.StaticFilters; import mage.game.Game; import mage.players.Player; import mage.target.common.TargetCardInLibrary; import mage.target.targetpointer.FixedTarget; /** * * @author TheElk801 */ public final class PirsWhim extends CardImpl { public PirsWhim(UUID ownerId, CardSetInfo setInfo) { super(ownerId, setInfo, new CardType[]{CardType.SORCERY}, "{3}{G}"); // For each player, choose friend or foe. Each friend searches their library for a land card, puts it on the battlefield tapped, then shuffles their library. Each foe sacrifices an artifact or enchantment they control. this.getSpellAbility().addEffect(new PirsWhimEffect()); } private PirsWhim(final PirsWhim card) { super(card); } @Override public PirsWhim copy() { return new PirsWhim(this); } } class PirsWhimEffect extends OneShotEffect { PirsWhimEffect() { super(Outcome.Benefit); this.staticText = "For each player, choose friend or foe. " + "Each friend searches their library for a land card, " + "puts it onto the battlefield tapped, then shuffles. " + "Each foe sacrifices an artifact or enchantment they control."; } PirsWhimEffect(final PirsWhimEffect effect) { super(effect); } @Override public PirsWhimEffect copy() { return new PirsWhimEffect(this); } @Override public boolean apply(Game game, Ability source) { Player controller = game.getPlayer(source.getControllerId()); ChooseFriendsAndFoes choice = new ChooseFriendsAndFoes(); if (controller != null && !choice.chooseFriendOrFoe(controller, source, game)) { return false; } for (Player player : choice.getFriends()) { if (player != null) { TargetCardInLibrary target = new TargetCardInLibrary(0, 1, StaticFilters.FILTER_CARD_LAND); if (player.searchLibrary(target, source, game)) { player.moveCards(new CardsImpl(target.getTargets()).getCards(game), Zone.BATTLEFIELD, source, game, true, false, true, null); player.shuffleLibrary(source, game); } } } for (Player player : choice.getFoes()) { if (player != null) { Effect effect = new SacrificeEffect(StaticFilters.FILTER_PERMANENT_ARTIFACT_OR_ENCHANTMENT, 1, ""); effect.setTargetPointer(new FixedTarget(player.getId(), game)); effect.apply(game, source); } } return true; } }
1,221
940
<reponame>sicara/mentat import numpy as np from tf_explain.callbacks.vanilla_gradients import VanillaGradientsCallback def test_should_call_vanilla_gradients_callback( random_data, convolutional_model, output_dir, mocker ): images, labels = random_data score_model = convolutional_model vanilla_gradient_callback = VanillaGradientsCallback( validation_data=random_data, class_index=0, output_dir=output_dir ) mock_explainer = mocker.MagicMock( get_score_model=mocker.MagicMock(return_value=score_model), explain_score_model=mocker.MagicMock(return_value=np.zeros((28, 28))), ) vanilla_gradient_callback.explainer = mock_explainer callbacks = [vanilla_gradient_callback] convolutional_model.fit(images, labels, batch_size=2, epochs=1, callbacks=callbacks) mock_explainer.get_score_model.assert_called_once() mock_explainer.explain_score_model.assert_called_once_with( random_data, score_model, 0 ) assert len(list(output_dir.iterdir())) == 1 def test_should_only_compute_score_model_once( random_data, convolutional_model, output_dir, mocker ): """ Tests that the Vanilla Gradients explainer only computes the score model once during training. This improves performance as it ensures the gradients are always calculated with the same score model, which prevents tf.function retracing. """ images, labels = random_data score_model = convolutional_model vanilla_gradient_callback = VanillaGradientsCallback( validation_data=random_data, class_index=0, output_dir=output_dir ) mock_explainer = mocker.MagicMock( get_score_model=mocker.MagicMock(return_value=score_model), explain_score_model=mocker.MagicMock(return_value=np.zeros((28, 28))), ) vanilla_gradient_callback.explainer = mock_explainer callbacks = [vanilla_gradient_callback] # Two epochs convolutional_model.fit(images, labels, batch_size=2, epochs=2, callbacks=callbacks) # Score model only computed once mock_explainer.get_score_model.assert_called_once()
755
1,066
/* * Copyright 2017 GcsSloop * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * Last modified 2017-03-12 02:50:16 * * GitHub: https://github.com/GcsSloop * Website: http://www.gcssloop.com * Weibo: http://weibo.com/GcsSloop */ package com.gcssloop.diycode.utils; import android.content.Context; import android.support.annotation.NonNull; import android.util.LruCache; import com.gcssloop.diycode_sdk.api.news.bean.New; import com.gcssloop.diycode_sdk.api.sites.bean.Sites; import com.gcssloop.diycode_sdk.api.topic.bean.TopicContent; import com.gcssloop.diycode_sdk.api.topic.bean.TopicReply; import com.gcssloop.diycode_sdk.api.user.bean.UserDetail; import com.gcssloop.diycode_sdk.utils.ACache; import java.io.File; import java.io.Serializable; import java.util.ArrayList; import java.util.List; /** * 数据缓存工具 */ public class DataCache { private static int M = 1024 * 1024; ACache mDiskCache; LruCache<String, Object> mLruCache; public DataCache(Context context) { mDiskCache = ACache.get(new File(FileUtil.getExternalCacheDir(context.getApplicationContext(), "diy-data"))); mLruCache = new LruCache<>(5 * M); } public <T extends Serializable> void saveListData(String key, List<T> data) { ArrayList<T> datas = (ArrayList<T>) data; mLruCache.put(key, datas); mDiskCache.put(key, datas, ACache.TIME_WEEK); // 数据缓存时间为 1 周 } public <T extends Serializable> void saveData(@NonNull String key, @NonNull T data) { mLruCache.put(key, data); mDiskCache.put(key, data, ACache.TIME_WEEK); // 数据缓存时间为 1 周 } public <T extends Serializable> T getData(@NonNull String key) { T result = (T) mLruCache.get(key); if (result == null) { result = (T) mDiskCache.getAsObject(key); if (result != null) { mLruCache.put(key, result); } } return result; } public void removeDate(String key) { mDiskCache.remove(key); } public void saveTopicContent(TopicContent content) { saveData("topic_content_" + content.getId(), content); String preview = HtmlUtil.Html2Text(content.getBody_html()); if (preview.length() > 100) { preview = preview.substring(0, 100); } saveData("topic_content_preview" + content.getId(), preview); } public TopicContent getTopicContent(int id) { return getData("topic_content_" + id); } public String getTopicPreview(int id) { String key = "topic_content_preview" + id; return getData(key); } public void saveTopicRepliesList(int topic_id, List<TopicReply> replyList) { ArrayList<TopicReply> replies = new ArrayList<>(replyList); saveData("topic_reply_" + topic_id, replies); } public List<TopicReply> getTopicRepliesList(int topic_id) { return getData("topic_reply_" + topic_id); } public void saveTopicsListObj(List<Object> topicList) { ArrayList<Object> topics = new ArrayList<>(topicList); saveData("topic_list_obj_", topics); } public List<Object> getTopicsListObj() { return getData("topic_list_obj_"); } public void saveNewsList(List<New> newList) { ArrayList<New> news = new ArrayList<>(newList); saveData("news_list_", news); } public List<New> getNewsList() { return getData("news_list_"); } public void saveNewsListObj(List<Object> newList) { ArrayList<Object> news = new ArrayList<>(newList); saveData("news_list_obj_", news); } public List<Object> getNewsListObj() { return getData("news_list_obj_"); } public void saveMe(UserDetail user) { saveData("Gcs_Me_", user); } public UserDetail getMe() { return getData("Gcs_Me_"); } public void removeMe() { removeDate("Gcs_Me_"); } public void saveSites(List<Sites> sitesList) { saveListData("sites_", sitesList); } public List<Sites> getSites() { return getData("sites_"); } public <T extends Serializable> void saveSitesItems(List<T> sitesList) { saveListData("sites_item_", sitesList); } public <T extends Serializable> ArrayList<T> getSitesItems() { return getData("sites_item_"); } }
1,979
5,169
{ "name": "OGSwitch", "version": "1.0", "summary": "Switch Control for macOS", "homepage": "https://github.com/OskarGroth/OGSwitch", "license": { "type": "MIT", "file": "LICENSE" }, "authors": { "<NAME>": "<EMAIL>" }, "source": { "git": "https://github.com/OskarGroth/OGSwitch.git", "tag": "1.0" }, "platforms": { "osx": "10.9" }, "source_files": "OGSwitch/OGSwitch.{swift}", "requires_arc": true, "screenshots": "https://raw.githubusercontent.com/OskarGroth/OGSwitch/master/screenshot.jpg", "social_media_url": "https://twitter.com/cindoriapps", "pushed_with_swift_version": "3.0" }
276
14,090
<reponame>Zander-Davidson/Neural-Network-Exercise """ relu ~~~~ Plots a graph of the squashing function used by a rectified linear unit.""" import numpy as np import matplotlib.pyplot as plt z = np.arange(-2, 2, .1) zero = np.zeros(len(z)) y = np.max([zero, z], axis=0) fig = plt.figure() ax = fig.add_subplot(111) ax.plot(z, y) ax.set_ylim([-2.0, 2.0]) ax.set_xlim([-2.0, 2.0]) ax.grid(True) ax.set_xlabel('z') ax.set_title('Rectified linear unit') plt.show()
232
994
<reponame>IT-Enthusiast-Nepal/Windows-Driver-Frameworks // // Copyright (C) Microsoft. All rights reserved. // #ifndef _PNPPRIVKM_H_ #define _PNPPRIVKM_H_ // public headers #include "WdfDmaEnabler.h" // private headers #include "FxIrpQueue.hpp" #include "FxCallback.hpp" // <FxSystemWorkItem.hpp> __drv_functionClass(EVT_SYSTEMWORKITEM) __drv_maxIRQL(PASSIVE_LEVEL) __drv_maxFunctionIRQL(DISPATCH_LEVEL) __drv_sameIRQL typedef VOID EVT_SYSTEMWORKITEM( __in PVOID Parameter ); typedef EVT_SYSTEMWORKITEM *PFN_WDF_SYSTEMWORKITEM; // </FxSystemWorkItem.hpp> #include "FxCallbackSpinlock.hpp" #include "FxCallbackMutexLock.hpp" #include "FxPackage.hpp" #include "IfxMemory.hpp" #include "FxCallback.hpp" #include "FxRequestContext.hpp" #include "FxRequestContextTypes.h" #include "FxRequestBase.hpp" #include "FxRequest.hpp" #include "FxPkgPnp.hpp" #include "FxPkgIo.hpp" #include "FxIoQueue.hpp" #include "FxDmaEnabler.hpp" #include "FxSystemWorkItem.hpp" #include "FxDsf.h" // DSF support. #include <device_common.h> #include "FxTelemetry.hpp" _Must_inspect_result_ NTSTATUS SendDeviceUsageNotificationWorker( __in MxDeviceObject* RelatedDevice, __inout FxIrp* RelatedIrp, __in FxIrp* OriginalIrp, __in BOOLEAN Revert ); IO_WORKITEM_ROUTINE _DeviceUsageNotificationWorkItem; struct FxUsageWorkitemParameters { FxUsageWorkitemParameters( VOID ) { RelatedDevice = NULL; RelatedIrp = NULL; OriginalIrp = NULL; Status = STATUS_UNSUCCESSFUL; } _In_ MxDeviceObject* RelatedDevice; _In_ FxIrp* RelatedIrp; _In_ FxIrp* OriginalIrp; _In_ BOOLEAN Revert; _In_ FxCREvent Event; _Out_ NTSTATUS Status; }; #endif // _PNPPRIVKM_H_
799
1,091
import tkinter as tk border_effects = { "flat": tk.FLAT, "sunken": tk.SUNKEN, "raised": tk.RAISED, "groove": tk.GROOVE, "ridge": tk.RIDGE, } window = tk.Tk() for relief_name, relief in border_effects.items(): frame = tk.Frame(master=window, relief=relief, borderwidth=5) frame.pack(side=tk.LEFT) label = tk.Label(master=frame, text=relief_name) label.pack() window.mainloop()
191
2,706
//============================================================================ // // SSSS tt lll lll // SS SS tt ll ll // SS tttttt eeee ll ll aaaa // SSSS tt ee ee ll ll aa // SS tt eeeeee ll ll aaaaa -- "An Atari 2600 VCS Emulator" // SS SS tt ee ll ll aa aa // SSSS ttt eeeee llll llll aaaaa // // Copyright (c) 1995-2014 by <NAME>, <NAME> // and the Stella Team // // See the file "License.txt" for information on usage and redistribution of // this file, and for a DISCLAIMER OF ALL WARRANTIES. // // $Id: DebuggerSystem.hxx 2838 2014-01-17 23:34:03Z stephena $ //============================================================================ #ifndef DEBUGGER_SYSTEM_HXX #define DEBUGGER_SYSTEM_HXX class Debugger; #include "Console.hxx" /** The DebuggerState class is used as a base class for state in all DebuggerSystem objects. We make it a class so we can take advantage of the copy constructor. */ class DebuggerState { public: DebuggerState() { } ~DebuggerState() { } }; /** The base class for all debugger objects. Its real purpose is to clean up the Debugger API, partitioning it into separate subsystems. */ class DebuggerSystem { public: DebuggerSystem(Debugger& dbg, Console& console) : myDebugger(dbg), myConsole(console), mySystem(console.system()) { } virtual ~DebuggerSystem() { } virtual const DebuggerState& getState() = 0; virtual const DebuggerState& getOldState() = 0; virtual void saveOldState() = 0; virtual string toString() = 0; protected: Debugger& myDebugger; Console& myConsole; System& mySystem; }; #endif
641
1,768
<filename>tlatools/org.lamport.tlatools/test/tlc2/tool/distributed/fp/FPSetManagerTest.java /******************************************************************************* * Copyright (c) 2016 Microsoft Research. All rights reserved. * * The MIT License (MIT) * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies * of the Software, and to permit persons to whom the Software is furnished to do * so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in all * copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. * * Contributors: * <NAME> - initial API and implementation ******************************************************************************/ package tlc2.tool.distributed.fp; import java.io.File; import java.io.IOException; import java.rmi.RemoteException; import java.util.ArrayList; import java.util.HashSet; import java.util.List; import java.util.Set; import org.junit.Assert; import org.junit.Before; import org.junit.Test; import tlc2.tool.distributed.fp.FPSetManager.FPSets; import tlc2.tool.fp.FPSet; import tlc2.tool.fp.FPSetConfiguration; import tlc2.tool.fp.FPSetFactory; public class FPSetManagerTest { protected static final String tmpdir = System.getProperty("java.io.tmpdir") + File.separator + "FPSetTest" + System.currentTimeMillis(); @Before public void before() { new File(tmpdir).mkdirs(); } @Test public void test2() throws IOException { doTest(2); } @Test public void test3() throws IOException { doTest(3); } @Test public void test4() throws IOException { doTest(4); } @Test public void test5() throws IOException { doTest(5); } @Test public void test8() throws IOException { doTest(8); } private void doTest(int expectedNumOfServers) throws RemoteException, IOException, FPSetManagerException { final FPSetConfiguration fpSetConfiguration = new FPSetConfiguration(); fpSetConfiguration.setFpBits(1); // two nested FPSets final List<FPSets> sets = new ArrayList<FPSets>(); for (int i = 0; i < fpSetConfiguration.getMultiFPSetCnt(); i++) { final FPSet fpSet = FPSetFactory.getFPSet(fpSetConfiguration); fpSet.init(1, tmpdir, "test" + expectedNumOfServers); sets.add(new FPSets(fpSet, "localhost" + i)); } final IFPSetManager manager = new DynamicFPSetManager(expectedNumOfServers); for (FPSets fpSets : sets) { manager.register(fpSets.getFpset(), fpSets.getFpset().toString()); } // Index uses LSBs Assert.assertEquals(0, manager.getFPSetIndex(0)); Assert.assertEquals(1, manager.getFPSetIndex(1)); Assert.assertEquals(0, manager.getFPSetIndex(2)); Assert.assertEquals(1, manager.getFPSetIndex(3)); Assert.assertEquals(0, manager.getFPSetIndex((1L << 63) + 2L)); Assert.assertEquals(1, manager.getFPSetIndex((1L << 63) + 1L)); final Set<Long> fps = new HashSet<Long>(); // fps.add(0L); // Not accepted by nested FPSets fps.add(1L); // 00...0001 fps.add((1L << 62) + 1L); // 01...0001 fps.add((1L << 63) + 1L); // 10...0001 fps.add((3L << 62) + 1L); // 11...0001 fps.add(2L); // 00...0010 fps.add((1L << 62) + 2L); // 01...0010 fps.add((1L << 63) + 2L); // 10...0010 fps.add((3L << 62) + 2L); // 11...0010 fps.add(3L); // 00...0011 fps.add((1L << 62) + 3L); // 01...0011 fps.add((1L << 63) + 3L); // 10...0011 fps.add((3L << 62) + 3L); // 11...0011 fps.add(4L); // 00...0100 fps.add((1L << 62) + 4L); // 01...0100 fps.add((1L << 63) + 4L); // 10...0100 fps.add((3L << 62) + 4L); // 11...0100 fps.add(5L); // 00...0101 fps.add((1L << 62) + 5L); // 01...0101 fps.add((1L << 63) + 5L); // 10...0101 fps.add((3L << 62) + 5L); // 11...0101 fps.add(6L); // 00...0110 fps.add((1L << 62) + 6L); // 01...0110 fps.add((1L << 63) + 6L); // 10...0110 fps.add((3L << 62) + 6L); // 11...0110 fps.add(7L); // 00...0110 fps.add((1L << 62) + 7L); // 01...0111 fps.add((1L << 63) + 7L); // 10...0111 fps.add((3L << 62) + 7L); // 11...0111 fps.add(8L); // 00...1000 fps.add((1L << 62) + 8L); // 01...1000 fps.add((1L << 63) + 8L); // 10...1000 fps.add((3L << 62) + 8L); // 11...1000 final Set<Long> unseen = new HashSet<Long>(fps); for (Long fp : fps) { // Unseen fingerprints must not be in set. for (Long unseenFP : unseen) { Assert.assertFalse(manager.contains(unseenFP)); } Assert.assertTrue(unseen.remove(fp)); Assert.assertFalse(printBinaryString("", fp), manager.put(fp)); Assert.assertTrue(printBinaryString("", fp), manager.contains(fp)); } Assert.assertEquals(fps.size(), manager.size()); Assert.assertTrue(manager.checkInvariant()); } private String printBinaryString(final String id, final long a) { return String.format(id + ":%64s", Long.toBinaryString(a)).replace(' ', '0'); } }
2,155
371
/*************************************************************** //工程名称 : MVCDemo //文件名称 : MainViewController.h //创建时间 : 2017/3/1 //创建作者 : JXT //版权归属 : 霖溦 //文件描述 : ***************************************************************/ #import "BaseViewController.h" @interface MainViewController : BaseViewController @end
128
897
# Python program to check collinearity of three given points # We can calculate the area formed by the three points, and if the area is zero then they lie on a same line. def check_collinear(ax, ay, bx, by, cx, cy): """ The Area of a Triangle formed by three points (x1, y1), (x2, y2), (x3, y3) is determined by the following formula Area = (1/2) * {x1 * (y2 - y3) + x2 * (y3 - y1) + x3 * (y1 - y2)} """ area = ax * (by - cy) + bx * (cy - ay) + cx * (ay - by) if(area == 0): return True else: return False if __name__ == '__main__': print("Enter the first co-ordinates: ", end="") ax, ay = [int(x) for x in input().split(' ')] print("Enter the second co-ordinates: ", end="") bx, by = [int(x) for x in input().split(' ')] print("Enter the third co-ordinates: ", end="") cx, cy = [int(x) for x in input().split(' ')] res = check_collinear(ax, ay, bx, by, cx, cy) if res: print("The given points are collinear") else: print("The given points are not collinear") """ Time Complexity: O(1) Space Complexity: O(1) SAMPLE INPUT AND OUTPUT Enter the first co-ordinates: 1 1 Enter the second co-ordinates: 2 2 Enter the third co-ordinates: 3 3 The given points are collinear """
506
2,151
/************************************************************************** * * Copyright 2007 Tungsten Graphics, Inc., Cedar Park, Texas. * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the * "Software"), to deal in the Software without restriction, including * without limitation the rights to use, copy, modify, merge, publish, * distribute, sub license, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice (including the * next paragraph) shall be included in all copies or substantial portions * of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. * **************************************************************************/ /* * Authors: * <NAME> <<EMAIL>> */ #include "draw/draw_context.h" #include "draw/draw_private.h" #include "draw/draw_pt.h" #include "util/u_debug.h" void draw_pt_split_prim(unsigned prim, unsigned *first, unsigned *incr) { switch (prim) { case PIPE_PRIM_POINTS: *first = 1; *incr = 1; break; case PIPE_PRIM_LINES: *first = 2; *incr = 2; break; case PIPE_PRIM_LINE_STRIP: case PIPE_PRIM_LINE_LOOP: *first = 2; *incr = 1; break; case PIPE_PRIM_LINES_ADJACENCY: *first = 4; *incr = 4; break; case PIPE_PRIM_LINE_STRIP_ADJACENCY: *first = 4; *incr = 1; break; case PIPE_PRIM_TRIANGLES: *first = 3; *incr = 3; break; case PIPE_PRIM_TRIANGLES_ADJACENCY: *first = 6; *incr = 6; break; case PIPE_PRIM_TRIANGLE_STRIP: case PIPE_PRIM_TRIANGLE_FAN: case PIPE_PRIM_POLYGON: *first = 3; *incr = 1; break; case PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY: *first = 6; *incr = 2; break; case PIPE_PRIM_QUADS: *first = 4; *incr = 4; break; case PIPE_PRIM_QUAD_STRIP: *first = 4; *incr = 2; break; default: assert(0); *first = 0; *incr = 1; /* set to one so that count % incr works */ break; } } unsigned draw_pt_trim_count(unsigned count, unsigned first, unsigned incr) { if (count < first) return 0; return count - (count - first) % incr; }
1,149
12,366
<reponame>Matir/tink /** * Copyright 2021 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except * in compliance with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software distributed under the License * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express * or implied. See the License for the specific language governing permissions and limitations under * the License. */ // [START jwt-signature-example] package jwt; import static java.nio.charset.StandardCharsets.UTF_8; import com.google.crypto.tink.CleartextKeysetHandle; import com.google.crypto.tink.JsonKeysetReader; import com.google.crypto.tink.KeysetHandle; import com.google.crypto.tink.jwt.JwtPublicKeySign; import com.google.crypto.tink.jwt.JwtPublicKeyVerify; import com.google.crypto.tink.jwt.JwtSignatureConfig; import com.google.crypto.tink.jwt.JwtValidator; import com.google.crypto.tink.jwt.RawJwt; import com.google.crypto.tink.jwt.VerifiedJwt; import java.io.File; import java.io.FileOutputStream; import java.io.IOException; import java.nio.file.Files; import java.security.GeneralSecurityException; import java.time.Instant; import java.time.temporal.ChronoUnit; import java.util.List; /** * A command-line utility for signing and verifying JSON Web Tokens (JWTs). * * <p>It loads cleartext keys from disk - this is not recommended! * * <p>It requires the following arguments: * * <ul> * <li>mode: either 'sign' or 'verify'. * <li>key-file: Read the key material from this file. * <li>subject: The subject claim to be used in the token * <li>token-file: name of the file containing the signed JWT. */ public final class JwtSignatureExample { public static void main(String[] args) throws Exception { if (args.length != 4) { System.err.printf("Expected 4 parameters, got %d\n", args.length); System.err.println( "Usage: java JwtSignatureExample sign/verify key-file subject token-file"); System.exit(1); } String mode = args[0]; if (!mode.equals("sign") && !mode.equals("verify")) { System.err.println("Incorrect mode. Please select sign or verify."); System.exit(1); } File keyFile = new File(args[1]); String subject = args[2]; File tokenFile = new File(args[3]); // Register all JWT signature key types with the Tink runtime. JwtSignatureConfig.register(); // Read the keyset into a KeysetHandle. KeysetHandle handle = null; try { handle = CleartextKeysetHandle.read(JsonKeysetReader.withFile(keyFile)); } catch (GeneralSecurityException | IOException ex) { System.err.println("Cannot read keyset, got error: " + ex); System.exit(1); } if (mode.equals("sign")) { // Get the primitive. JwtPublicKeySign signer = null; try { signer = handle.getPrimitive(JwtPublicKeySign.class); } catch (GeneralSecurityException ex) { System.err.println("Cannot create primitive, got error: " + ex); System.exit(1); } // Use the primitive to sign a token that expires in 100 seconds. RawJwt rawJwt = RawJwt.newBuilder() .setSubject(subject) .setExpiration(Instant.now().plusSeconds(100)) .build(); String signedToken = signer.signAndEncode(rawJwt); try (FileOutputStream stream = new FileOutputStream(tokenFile)) { stream.write(signedToken.getBytes(UTF_8)); } System.exit(0); } List<String> lines = Files.readAllLines(tokenFile.toPath()); if (lines.size() != 1) { System.err.printf("The signature file should contain only one line, got %d", lines.size()); System.exit(1); } String signedToken = lines.get(0).trim(); // Get the primitive. JwtPublicKeyVerify verifier = null; try { verifier = handle.getPrimitive(JwtPublicKeyVerify.class); } catch (GeneralSecurityException ex) { System.err.println("Cannot create primitive, got error: " + ex); System.exit(1); } // Use the primitive to verify a token. try { JwtValidator validator = JwtValidator.newBuilder().expectSubject(subject).build(); VerifiedJwt verifiedJwt = verifier.verifyAndDecode(signedToken, validator); long seconds = ChronoUnit.SECONDS.between(Instant.now(), verifiedJwt.getExpiration()); System.out.println("Token is valid and expires in " + seconds + " seconds."); } catch (GeneralSecurityException ex) { System.err.println("JWT verification failed."); System.exit(1); } System.exit(0); } private JwtSignatureExample() {} } // [END jwt-signature-example]
1,728
525
package com.zzzmode.appopsx.common; import android.os.ParcelFileDescriptor; import java.io.FileDescriptor; import java.io.IOException; /** * Created by zl on 2018/1/9. */ public class ParamsFixer { public static CallerMethod wrap(CallerMethod caller) { Object[] params = caller.getParams(); if (caller.getParamsType() != null && params != null) { Class[] paramsType=caller.getParamsType(); for (int i = 0; i < params.length; i++) { params[i] = marshallParamater(paramsType[i],params[i]); } } return caller; } public static CallerMethod unwrap(CallerMethod caller) { Object[] params = caller.getParams(); if (caller.getParamsType() != null && params != null) { Class[] paramsType=caller.getParamsType(); for (int i = 0; i < params.length; i++) { params[i] = unmarshallParamater(paramsType[i],params[i]); } } return caller; } private static Object marshallParamater(Class type,Object obj) { if (FileDescriptor.class.equals(type) && obj instanceof FileDescriptor) { try { return ParcelFileDescriptor.dup(((FileDescriptor) obj)); } catch (IOException e) { e.printStackTrace(); } } return obj; } private static Object unmarshallParamater(Class type,Object obj) { if (FileDescriptor.class.equals(type) && obj instanceof ParcelFileDescriptor) { return ((ParcelFileDescriptor) obj).getFileDescriptor(); } return obj; } }
744
4,879
/* Copyright (c) 2015, Project OSRM contributors All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "mercator.hpp" #include <cmath> double mercator::y2lat(const double value) noexcept { return 180. * M_1_PI * (2. * std::atan(std::exp(value * M_PI / 180.)) - M_PI_2); } double mercator::lat2y(const double latitude) noexcept { return 180. * M_1_PI * std::log(std::tan(M_PI_4 + latitude * (M_PI / 180.) / 2.)); }
500
323
# encoding: UTF-8 ''' 本文件中包含的是CTA模块的组合回测引擎,回测引擎的API和CTA引擎一致, 可以使用和实盘相同的代码进行回测。 华富资产 李来佳 ''' from __future__ import division import sys import os import importlib import csv import copy import pandas as pd import traceback import numpy as np import logging import socket import zlib import pickle from bson import binary from collections import OrderedDict, defaultdict from datetime import datetime, timedelta from functools import lru_cache from pathlib import Path from .base import ( EngineType, STOPORDER_PREFIX, StopOrder, StopOrderStatus ) from .template import CtaTemplate from vnpy.component.cta_fund_kline import FundKline from vnpy.trader.object import ( BarData, TickData, OrderData, TradeData, ContractData ) from vnpy.trader.constant import ( Exchange, Direction, Offset, Status, OrderType, Product ) from vnpy.trader.converter import PositionHolding from vnpy.trader.utility import ( get_underlying_symbol, round_to, extract_vt_symbol, format_number, import_module_by_str ) from vnpy.trader.util_logger import setup_logger from vnpy.data.mongo.mongo_data import MongoData from uuid import uuid1 class BackTestingEngine(object): """ CTA回测引擎 函数接口和策略引擎保持一样, 从而实现同一套代码从回测到实盘。 针对1分钟bar的回测 或者tick级别得回测 提供对组合回测/批量回测得服务 """ def __init__(self, event_engine=None): """Constructor""" # 绑定事件引擎 self.event_engine = event_engine self.mode = 'bar' # 'bar': 根据1分钟k线进行回测, 'tick',根据分笔tick进行回测 # 引擎类型为回测 self.engine_type = EngineType.BACKTESTING self.contract_type = 'future' # future, stock, digital self.using_99_contract = True # 回测策略相关 self.classes = {} # 策略类,class_name: stategy_class self.class_module_map = {} # 策略类名与模块名映射 class_name: mudule_name self.strategies = {} # 回测策略实例, key = strategy_name, value= strategy self.symbol_strategy_map = defaultdict(list) # vt_symbol: strategy list self.test_name = 'portfolio_test_{}'.format(datetime.now().strftime('%M%S')) # 回测策略组合的实例名字 self.daily_report_name = '' # 策略的日净值报告文件名称 self.test_start_date = '' # 组合回测启动得日期 self.init_days = 0 # 初始化天数 self.test_end_date = '' # 组合回测结束日期 self.slippage = {} # 回测时假设的滑点 self.commission_rate = {} # 回测时假设的佣金比例(适用于百分比佣金) self.fix_commission = {} # 每手固定手续费 self.size = {} # 合约大小,默认为1 self.price_tick = {} # 价格最小变动 self.volume_tick = {} # 合约委托单最小单位 self.margin_rate = {} # 回测合约的保证金比率 self.price_dict = {} # 登记vt_symbol对应的最新价 self.contract_dict = {} # 登记vt_symbol得对应合约信息 self.symbol_exchange_dict = {} # 登记symbol: exchange的对应关系 self.data_start_date = None # 回测数据开始日期,datetime对象 (用于截取数据) self.data_end_date = None # 回测数据结束日期,datetime对象 (用于截取数据) self.strategy_start_date = None # 策略启动日期(即前面的数据用于初始化),datetime对象 self.stop_order_count = 0 # 本地停止单编号 self.stop_orders = {} # 本地停止单 self.active_stop_orders = {} # 活动本地停止单 self.limit_order_count = 0 # 限价单编号 self.limit_orders = OrderedDict() # 限价单字典 self.active_limit_orders = OrderedDict() # 活动限价单字典,用于进行撮合用 self.order_strategy_dict = {} # orderid 与 strategy的映射 # 持仓缓存字典 # key为vt_symbol,value为PositionBuffer对象 self.pos_holding_dict = {} self.trade_count = 0 # 成交编号 self.trade_dict = OrderedDict() # 用于统计成交收益时,还没处理得交易 self.trades = OrderedDict() # 记录所有得成交记录 self.trade_pnl_list = [] # 交易记录列表 self.long_position_list = [] # 多单持仓 self.short_position_list = [] # 空单持仓 self.holdings = {} # 多空持仓 # 当前最新数据,用于模拟成交用 self.gateway_name = u'BackTest' self.last_bar = {} # 最新的bar self.last_tick = {} # 最新tick self.last_dt = None # 最新时间 # csvFile相关 self.bar_interval_seconds = 60 # csv文件,属于K线类型,K线的周期(秒数),缺省是1分钟 # 费用风控情况 self.percent = 0.0 self.percent_limit = 30 # 投资仓位比例上限 # 回测计算相关 self.use_margin = True # 使用保证金模式(期货使用,计算保证金时,按照开仓价计算。股票是按照当前价计算) self.init_capital = 1000000 # 期初资金 self.cur_capital = self.init_capital # 当前资金净值 self.net_capital = self.init_capital # 实时资金净值(每日根据capital和持仓浮盈计算) self.max_capital = self.init_capital # 资金最高净值 self.max_net_capital = self.init_capital self.available = self.init_capital self.max_pnl = 0 # 最高盈利 self.min_pnl = 0 # 最大亏损 self.max_occupy_rate = 0 # 最大保证金占比 self.winning_result = 0 # 盈利次数 self.losing_result = 0 # 亏损次数 self.total_trade_count = 0 # 总成交数量 self.total_winning = 0 # 总盈利 self.total_losing = 0 # 总亏损 self.total_turnover = 0 # 总成交金额(合约面值) self.total_commission = 0 # 总手续费 self.total_slippage = 0 # 总滑点 self.time_list = [] # 时间序列 self.pnl_list = [] # 每笔盈亏序列 self.capital_list = [] # 盈亏汇总的时间序列 self.drawdown_list = [] # 回撤的时间序列 self.drawdown_rate_list = [] # 最大回撤比例的时间序列(成交结算) self.max_net_capital_time = '' self.max_drawdown_rate_time = '' self.daily_max_drawdown_rate = 0 # 按照日结算价计算 self.pnl_strategy_dict = {} # 策略实例的平仓盈亏 self.is_plot_daily = False self.daily_list = [] # 按日统计得序列 self.daily_first_benchmark = None self.logger = None self.strategy_loggers = {} self.debug = False self.is_7x24 = False self.logs_path = None self.data_path = None self.fund_kline_dict = {} self.active_fund_kline = False # 回测任务/回测结果,保存在数据库中 self.mongo_api = None self.task_id = None self.test_setting = None # 回测设置 self.strategy_setting = None # 所有回测策略得设置 def create_fund_kline(self, name, use_renko=False, extra_setting = {}): """ 创建资金曲线 :param name: 账号名,或者策略名 :param use_renko: :param extra_setting: 扩展得k线设置,例如macd等 :return: """ setting = {} setting.update({'name': name}) setting['para_ma1_len'] = 5 setting['para_ma2_len'] = 10 setting['para_ma3_len'] = 20 setting['para_active_yb'] = True setting['price_tick'] = 0.01 setting['underlying_symbol'] = 'fund' if use_renko: # 使用砖图,高度是资金的千分之一 setting['height'] = self.init_capital * 0.001 setting['use_renko'] = True for k, v in extra_setting.items(): if k not in setting: setting.update({k: v}) fund_kline = FundKline(cta_engine=self, setting=setting) self.fund_kline_dict.update({name: fund_kline}) return fund_kline def get_fund_kline(self, name: str = None): # 指定资金账号/策略名 if name: kline = self.fund_kline_dict.get(name, None) return kline # 没有指定账号,并且存在一个或多个资金K线 if len(self.fund_kline_dict) > 0: # 优先找vt_setting中,配置了strategy_groud的资金K线 kline = self.fund_kline_dict.get(self.test_name, None) # 找不到,返回第一个 if kline is None: kline = self.fund_kline_dict.values()[0] return kline else: return None # todo wj def save_fund_kline(self, name: str = None): # 没有指定账号,并且存在一个或多个资金K线 if len(self.fund_kline_dict) > 0: # 优先找vt_setting中,配置了strategy_groud的资金K线 kline = self.fund_kline_dict.get(name, None) # 找不到,返回第一个 if kline is None: kline = self.fund_kline_dict.values()[0] kline_file = str(os.path.join(self.get_data_path(), 'fund_{}.csv'.format(name))) # 如果数据文件存在,则删除数据 if os.path.exists(kline_file): os.remove(kline_file) # 设置 kline的输出文件 kline.kline.export_filename = kline_file kline.kline.export_fields = [ {'name': 'datetime', 'source': 'bar', 'attr': 'datetime', 'type_': 'datetime'}, {'name': 'open', 'source': 'bar', 'attr': 'open_price', 'type_': 'float'}, {'name': 'high', 'source': 'bar', 'attr': 'high_price', 'type_': 'float'}, {'name': 'low', 'source': 'bar', 'attr': 'low_price', 'type_': 'float'}, {'name': 'close', 'source': 'bar', 'attr': 'close_price', 'type_': 'float'}, {'name': 'turnover', 'source': 'bar', 'attr': 'turnover', 'type_': 'float'}, {'name': 'volume', 'source': 'bar', 'attr': 'volume', 'type_': 'float'}, {'name': 'open_interest', 'source': 'bar', 'attr': 'open_interest', 'type_': 'float'} ] kline.save() def get_account(self, vt_accountid: str = ""): """返回账号的实时权益,可用资金,仓位比例,投资仓位比例上限""" if self.net_capital == 0.0: self.percent = 0.0 return self.net_capital, self.available, self.percent, self.percent_limit def set_test_start_date(self, start_date: str = '20100416', init_days: int = 10): """设置回测的启动日期""" self.test_start_date = start_date self.init_days = init_days self.data_start_date = datetime.strptime(start_date, '%Y%m%d') # 初始化天数 init_time_delta = timedelta(init_days) self.strategy_start_date = self.data_start_date + init_time_delta self.write_log(u'设置:回测数据开始日期:{},初始化数据为{}天,策略自动启动日期:{}' .format(self.data_start_date, self.init_days, self.strategy_start_date)) def set_test_end_date(self, end_date: str = ''): """设置回测的结束日期""" self.test_end_date = end_date if end_date: self.data_end_date = datetime.strptime(end_date, '%Y%m%d') # 若不修改时间则会导致不包含dataEndDate当天数据 self.data_end_date.replace(hour=23, minute=59) else: self.data_end_date = datetime.now() self.write_log(u'设置:回测数据结束日期:{}'.format(self.data_end_date)) def set_init_capital(self, capital: float): """设置期初净值""" self.cur_capital = capital # 资金 self.net_capital = capital # 实时资金净值(每日根据capital和持仓浮盈计算) self.max_capital = capital # 资金最高净值 self.max_net_capital = capital self.available = capital self.init_capital = capital def set_margin_rate(self, vt_symbol: str, margin_rate: float): """设置某个合约得保证金比率""" self.margin_rate.update({vt_symbol: margin_rate}) @lru_cache() def get_margin_rate(self, vt_symbol: str): return self.margin_rate.get(vt_symbol, 0.1) def get_margin(self, vt_symbol: str): """ 按照当前价格,计算1手合约需要得保证金 :param vt_symbol: :return: 普通合约/期权 => 当前价格 * size * margin_rate SP j2101&j2105.DCE => max( 当前价格 * size * margin_rate) j2101-1-i2101-3-BJ.SPD => (主动腿价格*主动腿size * 主动腿margin_rate + 被动腿价格*被动腿size * 被动腿margin_rate rb2101-1-rb2105-1-CJ.SPD => max(主动腿价格*主动腿size * 主动腿margin_rate , 被动腿价格*被动腿size * 被动腿margin_rate """ if '.SPD99' in vt_symbol: vt_symbol = vt_symbol.replace('.SPD99', '.SPD') if not vt_symbol.endswith('.SPD') and '&' not in vt_symbol: cur_price = self.get_price(vt_symbol) cur_size = self.get_size(vt_symbol) cur_margin_rate = self.get_margin_rate(vt_symbol) if cur_price and cur_size and cur_margin_rate: return abs(cur_price * cur_size * cur_margin_rate) else: # 取不到价格,取不到size,或者取不到保证金比例 self.write_error(f'无法计算{vt_symbol}的保证金,价格:{cur_price}或size:{cur_size}或margin_rate:{cur_margin_rate}') return None # j2101-1-i2101-3-BJ.SPD rb2101-1-rb2105-1-CJ.SPD if vt_symbol.endswith('.SPD'): act_symbol, act_ratio, pas_symbol, pas_ratio, spd_type = vt_symbol.replace('.SPD', '').split('-') act_vt_symbol = '{}.{}'.format(act_symbol, self.get_exchange(act_symbol).value) pas_vt_symbol = '{}.{}'.format(pas_symbol, self.get_exchange(pas_symbol).value) act_ratio = int(act_ratio) pas_ratio = int(pas_ratio) # SP j2101&j2105.DCE elif '&' in vt_symbol: symbol, exchange = extract_vt_symbol(vt_symbol) symbol = symbol.split(' ')[-1] act_symbol, pas_symbol = symbol.split('&') act_vt_symbol = f'{act_symbol}.{exchange.value}' pas_vt_symbol = f'{pas_symbol}.{exchange.value}' act_ratio = 1 pas_ratio = 1 else: self.write_error(f'无法计算{vt_symbol}的保证金:无法分解') return None act_cur_price = self.get_price(act_vt_symbol) act_size = self.get_size(act_vt_symbol) act_margin_rate = self.get_margin_rate(act_vt_symbol) pas_cur_price = self.get_price(pas_vt_symbol) pas_size = self.get_size(pas_vt_symbol) pas_margin_rate = self.get_margin_rate(pas_vt_symbol) if not all([act_cur_price, act_size, act_margin_rate]): self.write_error( f'无法计算{vt_symbol}的保证金,{act_vt_symbol}价格:{act_cur_price}或size:{act_size}或margin_rate:{act_margin_rate}') return None if not all([pas_cur_price, pas_size, pas_margin_rate]): self.write_error( f'无法计算{vt_symbol}的保证金,{pas_vt_symbol}价格:{pas_cur_price}或size:{pas_size}或margin_rate:{pas_margin_rate}') return None # 跨期合约 if get_underlying_symbol(act_symbol) == get_underlying_symbol(pas_symbol): spd_margin = max(act_cur_price * act_size * act_margin_rate * act_ratio, pas_cur_price * pas_size * pas_margin_rate * pas_ratio) # 跨品种合约,取最大值 else: spd_margin = act_cur_price * act_size * act_margin_rate * act_ratio + pas_cur_price * pas_size * pas_margin_rate * pas_ratio return spd_margin def set_slippage(self, vt_symbol: str, slippage: float): """设置滑点点数""" self.slippage.update({vt_symbol: slippage}) @lru_cache() def get_slippage(self, vt_symbol: str): """获取滑点点数""" return self.slippage.get(vt_symbol, 0) def set_size(self, vt_symbol: str, size: int): """设置合约大小""" self.size.update({vt_symbol: size}) @lru_cache() def get_size(self, vt_symbol: str): """查询合约的size""" return self.size.get(vt_symbol, 10) def set_price(self, vt_symbol: str, price: float): self.price_dict.update({vt_symbol: price}) def get_price(self, vt_symbol: str): return self.price_dict.get(vt_symbol, None) def set_commission_rate(self, vt_symbol: str, rate: float): """设置佣金比例""" self.commission_rate.update({vt_symbol: rate}) if rate >= 0.1: self.fix_commission.update({vt_symbol: rate}) def get_commission_rate(self, vt_symbol: str): """ 获取保证金比例,缺省万分之一""" if vt_symbol not in self.commission_rate: symbol, exchange = extract_vt_symbol(vt_symbol) return self.commission_rate.get(symbol, float(0.0001)) return self.commission_rate.get(vt_symbol, float(0.0001)) def get_fix_commission(self, vt_symbol: str): return self.fix_commission.get(vt_symbol, 0) def set_price_tick(self, vt_symbol: str, price_tick: float): """设置价格最小变动""" self.price_tick.update({vt_symbol: price_tick}) def get_price_tick(self, vt_symbol: str): return self.price_tick.get(vt_symbol, 1) def set_volume_tick(self, vt_symbol: str, volume_tick: float): """设置委托单最小单位""" self.volume_tick.update({vt_symbol: volume_tick}) def get_volume_tick(self, vt_symbol: str): return self.volume_tick.get(vt_symbol, 1) def set_contract(self, symbol: str, exchange: Exchange, product: Product, name: str, size: int, price_tick: float, volume_tick: float = 1, margin_rate: float = 0.1): """设置合约信息""" vt_symbol = '.'.join([symbol, exchange.value]) if vt_symbol not in self.contract_dict: c = ContractData( gateway_name=self.gateway_name, symbol=symbol, exchange=exchange, name=name, product=product, size=size, pricetick=price_tick, margin_rate=margin_rate ) self.contract_dict.update({vt_symbol: c}) self.set_size(vt_symbol, size) self.set_margin_rate(vt_symbol, margin_rate) self.set_price_tick(vt_symbol, price_tick) self.set_volume_tick(vt_symbol, volume_tick) self.symbol_exchange_dict.update({symbol: exchange}) @lru_cache() def get_contract(self, vt_symbol): """获取合约配置信息""" return self.contract_dict.get(vt_symbol) @lru_cache() def get_exchange(self, symbol: str): return self.symbol_exchange_dict.get(symbol, Exchange.LOCAL) def get_position_holding(self, vt_symbol: str, gateway_name: str = ''): """ 查询合约在账号的持仓(包含多空)""" if len(gateway_name) == 0: gateway_name = self.gateway_name k = f'{gateway_name}.{vt_symbol}' holding = self.holdings.get(k, None) if not holding: symbol, exchange = extract_vt_symbol(vt_symbol) if self.contract_type == 'future': product = Product.FUTURES elif self.contract_type == 'stock': product = Product.EQUITY else: product = Product.SPOT contract = ContractData(gateway_name=gateway_name, name=vt_symbol, product=product, symbol=symbol, exchange=exchange, size=self.get_size(vt_symbol), pricetick=self.get_price_tick(vt_symbol), margin_rate=self.get_margin_rate(vt_symbol)) holding = PositionHolding(contract) self.holdings[k] = holding return holding def set_name(self, test_name): """ 设置组合的运行实例名称 :param test_name: :return: """ self.test_name = test_name def set_daily_report_name(self, report_file): """ 设置策略的日净值记录csv保存文件名(含路径) :param report_file: 保存文件名(含路径) :return: """ self.daily_report_name = report_file def prepare_env(self, test_setting): """ 根据配置参数,准备环境 包括: 回测名称 ,是否debug,数据目录/日志目录, 资金/保证金类型/仓位控制 回测开始/结束日期 :param test_setting: :return: """ self.test_setting = copy.copy(test_setting) self.output('back_testing prepare_env') if 'name' in test_setting: self.set_name(test_setting.get('name')) self.mode = test_setting.get('mode', 'bar') self.output(f'采用{self.mode}方式回测') self.contract_type = test_setting.get('contract_type', 'future') self.output(f'测试合约主要为{self.contract_type}') self.debug = test_setting.get('debug', False) if 'using_99_contract' in test_setting: self.using_99_contract = test_setting.get('using_99_contract') self.write_log(f'是否使用指数合约:{self.using_99_contract}') # 更新数据目录 if 'data_path' in test_setting: self.data_path = test_setting.get('data_path') else: self.data_path = os.path.abspath(os.path.join(os.getcwd(), 'data')) print(f'数据输出目录:{self.data_path}') # 更新日志目录 if 'logs_path' in test_setting: self.logs_path = os.path.abspath(os.path.join(test_setting.get('logs_path'), self.test_name)) else: self.logs_path = os.path.abspath(os.path.join(os.getcwd(), 'log', self.test_name)) print(f'日志输出目录:{self.logs_path}') # 创建日志 self.create_logger(debug=self.debug) # 设置资金 if 'init_capital' in test_setting: self.write_log(u'设置期初资金:{}'.format(test_setting.get('init_capital'))) self.set_init_capital(test_setting.get('init_capital')) # 缺省使用保证金方式。(期货使用保证金/股票不使用保证金) self.use_margin = test_setting.get('use_margin', True) # 设置最大资金使用比例 if 'percent_limit' in test_setting: self.write_log(u'设置最大资金使用比例:{}%'.format(test_setting.get('percent_limit'))) self.percent_limit = test_setting.get('percent_limit') if 'start_date' in test_setting: if 'strategy_start_date' not in test_setting: init_days = test_setting.get('init_days', 10) self.write_log(u'设置回测开始日期:{},数据加载日数:{}'.format(test_setting.get('start_date'), init_days)) self.set_test_start_date(test_setting.get('start_date'), init_days) else: start_date = test_setting.get('start_date') strategy_start_date = test_setting.get('strategy_start_date') self.write_log(u'使用指定的数据开始日期:{}和策略启动日期:{}'.format(start_date, strategy_start_date)) self.test_start_date = start_date self.data_start_date = datetime.strptime(start_date.replace('-', ''), '%Y%m%d') self.strategy_start_date = datetime.strptime(strategy_start_date.replace('-', ''), '%Y%m%d') if 'end_date' in test_setting: self.write_log(u'设置回测结束日期:{}'.format(test_setting.get('end_date'))) self.set_test_end_date(test_setting.get('end_date')) # 准备数据 if 'symbol_datas' in test_setting: self.write_log(u'准备数据') self.prepare_data(test_setting.get('symbol_datas')) if self.mode == 'tick': self.tick_path = test_setting.get('tick_path', None) # 设置bar文件的时间间隔秒数 if 'bar_interval_seconds' in test_setting: self.write_log(u'设置bar文件的时间间隔秒数:{}'.format(test_setting.get('bar_interval_seconds'))) self.bar_interval_seconds = test_setting.get('bar_interval_seconds') # 资金曲线 self.active_fund_kline = test_setting.get('active_fund_kline', False) if self.active_fund_kline: # 创建资金K线 self.create_fund_kline(self.test_name, use_renko=test_setting.get('use_renko', False)) self.is_plot_daily = test_setting.get('is_plot_daily', False) # 加载所有本地策略class self.load_strategy_class() def prepare_data(self, data_dict): """ 准备组合数据 :param data_dict: :return: """ self.output('prepare_data') if len(data_dict) == 0: self.write_log(u'请指定回测数据和文件') return for symbol, symbol_data in data_dict.items(): self.write_log(u'配置{}数据:{}'.format(symbol, symbol_data)) self.set_price_tick(symbol, symbol_data.get('price_tick', 1)) volume_tick = symbol_data.get('min_volume', symbol_data.get('volume_tick', 1)) self.set_volume_tick(symbol, volume_tick) self.set_slippage(symbol, symbol_data.get('slippage', 0)) self.set_size(symbol, symbol_data.get('symbol_size', 10)) margin_rate = symbol_data.get('margin_rate', 0.1) self.set_margin_rate(symbol, margin_rate) self.set_commission_rate(symbol, symbol_data.get('commission_rate', float(0.0001))) exchange = symbol_data.get('exchange', 'LOCAL') self.set_commission_rate(f'{symbol}.{exchange}', symbol_data.get('commission_rate', float(0.0001))) self.set_contract( symbol=symbol, name=symbol, exchange=Exchange(exchange), product=Product(symbol_data.get('product', "期货")), size=symbol_data.get('symbol_size', 10), price_tick=symbol_data.get('price_tick', 1), volume_tick=volume_tick, margin_rate=margin_rate ) def new_tick(self, tick): """新得tick""" self.last_tick.update({tick.vt_symbol: tick}) if self.last_dt is None or (tick.datetime and tick.datetime > self.last_dt): self.last_dt = tick.datetime self.set_price(tick.vt_symbol, tick.last_price) self.cross_stop_order(tick=tick) # 撮合停止单 self.cross_limit_order(tick=tick) # 先撮合限价单 # 更新账号级别资金曲线(只有持仓时,才更新) fund_kline = self.get_fund_kline(self.test_name) if fund_kline is not None and (len(self.long_position_list) > 0 or len(self.short_position_list) > 0): fund_kline.update_account(self.last_dt, self.net_capital) for strategy in self.symbol_strategy_map.get(tick.vt_symbol, []): # 更新策略的资金K线 fund_kline = self.fund_kline_dict.get(strategy.strategy_name, None) if fund_kline: hold_pnl, _ = fund_kline.get_hold_pnl() if hold_pnl != 0: fund_kline.update_strategy(dt=self.last_dt, hold_pnl=hold_pnl) # 推送tick到策略中 strategy.on_tick(tick) # 推送K线到策略中 # 到达策略启动日期,启动策略 if not strategy.trading and self.strategy_start_date < tick.datetime: strategy.trading = True strategy.on_start() self.output(u'{}策略启动交易'.format(strategy.strategy_name)) def new_bar(self, bar): """新的K线""" self.last_bar.update({bar.vt_symbol: bar}) if self.last_dt is None or ( bar.datetime and bar.datetime > self.last_dt - timedelta(seconds=self.bar_interval_seconds)): self.last_dt = bar.datetime + timedelta(seconds=self.bar_interval_seconds) self.set_price(bar.vt_symbol, bar.close_price) self.cross_stop_order(bar=bar) # 撮合停止单 self.cross_limit_order(bar=bar) # 先撮合限价单 # 更新账号的资金曲线(只有持仓时,才更新) fund_kline = self.get_fund_kline(self.test_name) if fund_kline is not None and (len(self.long_position_list) > 0 or len(self.short_position_list) > 0): fund_kline.update_account(self.last_dt, self.net_capital) for strategy in self.symbol_strategy_map.get(bar.vt_symbol, []): # 更新策略的资金K线 fund_kline = self.fund_kline_dict.get(strategy.strategy_name, None) if fund_kline: hold_pnl, _ = fund_kline.get_hold_pnl() if hold_pnl != 0: fund_kline.update_strategy(dt=self.last_dt, hold_pnl=hold_pnl) # 推送K线到策略中 strategy.on_bar(bar) # 推送K线到策略中 # 到达策略启动日期,启动策略 if not strategy.trading and self.strategy_start_date < bar.datetime: strategy.trading = True strategy.on_start() self.output(u'{}策略启动交易'.format(strategy.strategy_name)) def load_strategy_class(self): """ Load strategy class from source code. """ self.write_log('加载所有策略class') # 加载 vnpy/app/cta_strategy_pro/strategies的所有策略 path1 = Path(__file__).parent.joinpath("strategies") self.load_strategy_class_from_folder( path1, "vnpy.app.cta_strategy_pro.strategies") def load_strategy_class_from_folder(self, path: Path, module_name: str = ""): """ Load strategy class from certain folder. """ for dirpath, dirnames, filenames in os.walk(str(path)): for filename in filenames: if filename.endswith(".py"): strategy_module_name = ".".join( [module_name, filename.replace(".py", "")]) elif filename.endswith(".pyd"): strategy_module_name = ".".join( [module_name, filename.split(".")[0]]) elif filename.endswith(".so"): strategy_module_name = ".".join( [module_name, filename.split(".")[0]]) else: continue self.load_strategy_class_from_module(strategy_module_name) def load_strategy_class_from_module(self, module_name: str): """ Load/Reload strategy class from module file. """ try: module = importlib.import_module(module_name) for name in dir(module): value = getattr(module, name) if (isinstance(value, type) and issubclass(value, CtaTemplate) and value is not CtaTemplate): class_name = value.__name__ if class_name not in self.classes: self.write_log(f"加载策略类{module_name}.{class_name}") else: self.write_log(f"更新策略类{module_name}.{class_name}") self.classes[class_name] = value self.class_module_map[class_name] = module_name return True except: # noqa msg = f"策略文件{module_name}加载失败,触发异常:\n{traceback.format_exc()}" self.write_error(msg) self.output(msg) return False def load_strategy(self, strategy_name: str, strategy_setting: dict = None): """ 装载回测的策略 setting是参数设置,包括 class_name: str, 策略类名字 vt_symbol: str, 缺省合约 setting: {}, 策略的参数 auto_init: True/False, 策略是否自动初始化 auto_start: True/False, 策略是否自动启动 """ # 获取策略的类名 class_name = strategy_setting.get('class_name', None) if class_name is None or strategy_name is None: self.write_error(u'setting中没有class_name') return # strategy_class => module.strategy_class if '.' not in class_name: module_name = self.class_module_map.get(class_name, None) if module_name: class_name = module_name + '.' + class_name self.write_log(u'转换策略为全路径:{}'.format(class_name)) # 获取策略类的定义 strategy_class = import_module_by_str(class_name) if strategy_class is None: self.write_error(u'加载策略模块失败:{}'.format(class_name)) return # 处理 vt_symbol vt_symbol = strategy_setting.get('vt_symbol') if '.' in vt_symbol: symbol, exchange = extract_vt_symbol(vt_symbol) elif self.contract_type == 'future': symbol = vt_symbol if self.using_99_contract: underly_symbol = get_underlying_symbol(symbol).upper() # WJ: 当需要回测A1701.DCE时,不能替换成99合约。 exchange = self.get_exchange(f'{underly_symbol}99') else: exchange = self.get_exchange(symbol) vt_symbol = '.'.join([symbol, exchange.value]) strategy_setting.update({'vt_symbol': vt_symbol}) else: symbol = vt_symbol exchange = Exchange.LOCAL vt_symbol = '.'.join([symbol, exchange.value]) strategy_setting.update({'vt_symbol': vt_symbol}) # 在期货组合回测时,如果直接使用运行得配置,需要把一般配置的主力合约,更换为指数合约 if self.using_99_contract: if '99' not in symbol and exchange != Exchange.SPD and self.contract_type == 'future': underly_symbol = get_underlying_symbol(symbol).upper() self.write_log(u'更新vt_symbol为指数合约:{}=>{}'.format(vt_symbol, underly_symbol + '99.' + exchange.value)) vt_symbol = underly_symbol.upper() + '99.' + exchange.value strategy_setting.update({'vt_symbol': vt_symbol}) # 属于自定义套利合约 if exchange == Exchange.SPD: symbol_pairs = symbol.split('-') active_symbol = get_underlying_symbol(symbol_pairs[0]) passive_symbol = get_underlying_symbol(symbol_pairs[2]) new_vt_symbol = '-'.join([active_symbol.upper() + '99', symbol_pairs[1], passive_symbol.upper() + '99', symbol_pairs[3], symbol_pairs[4]]) + '.SPD' self.write_log(u'更新vt_symbol为指数合约:{}=>{}'.format(vt_symbol, new_vt_symbol)) vt_symbol = new_vt_symbol strategy_setting.update({'vt_symbol': vt_symbol}) # 取消自动启动 if 'auto_start' in strategy_setting: strategy_setting.update({'auto_start': False}) # 策略参数设置 setting = strategy_setting.get('setting', {}) # 强制更新回测为True setting.update({'backtesting': True}) # 创建实例 strategy = strategy_class(self, strategy_name, vt_symbol, setting) # 保存到策略实例映射表中 self.strategies.update({strategy_name: strategy}) # 更新vt_symbol合约与策略的订阅关系 self.subscribe_symbol(strategy_name=strategy_name, vt_symbol=vt_symbol) # 如果idx_symbol不再列表中,需要订阅 if 'idx_symbol' in setting.keys() and setting['idx_symbol'] not in self.symbol_strategy_map.keys(): self.write_log(f"新增订阅指数合约:{setting['idx_symbol']}") self.subscribe_symbol(strategy_name=strategy_name, vt_symbol=setting['idx_symbol']) if strategy_setting.get('auto_init', False): self.write_log(u'自动初始化策略') strategy.on_init() if strategy_setting.get('auto_start', False): self.write_log(u'自动启动策略') strategy.on_start() if self.active_fund_kline: # 创建策略实例的资金K线 self.create_fund_kline(name=strategy_name, use_renko=False) def subscribe_symbol(self, strategy_name: str, vt_symbol: str, gateway_name: str = '', is_bar: bool = False): """订阅合约""" strategy = self.strategies.get(strategy_name, None) if not strategy: self.write_log(f'策略{strategy_name}对应的实例不存在,订阅{vt_symbol}失败') return False # 添加 合约订阅 vt_symbol <=> 策略实例 strategy 映射. strategies = self.symbol_strategy_map[vt_symbol] strategies.append(strategy) return True # --------------------------------------------------------------------- def save_strategy_data(self): """保存策略数据""" for strategy in self.strategies.values(): self.write_log(u'save strategy data') if hasattr(strategy, 'save_data'): strategy.save_data() def send_order(self, strategy: CtaTemplate, vt_symbol: str, direction: Direction, offset: Offset, price: float, volume: float, stop: bool, lock: bool, order_type: OrderType = OrderType.LIMIT, gateway_name: str = None): """发单""" price_tick = self.get_price_tick(vt_symbol) price = round_to(price, price_tick) if stop: return self.send_local_stop_order( strategy=strategy, vt_symbol=vt_symbol, direction=direction, offset=offset, price=price, volume=volume, lock=lock, gateway_name=gateway_name ) else: return self.send_limit_order( strategy=strategy, vt_symbol=vt_symbol, direction=direction, offset=offset, price=price, volume=volume, lock=lock, gateway_name=gateway_name ) def send_limit_order(self, strategy: CtaTemplate, vt_symbol: str, direction: Direction, offset: Offset, price: float, volume: float, lock: bool, order_type: OrderType = OrderType.LIMIT, gateway_name: str = None ): self.limit_order_count += 1 order_id = str(self.limit_order_count) symbol, exchange = extract_vt_symbol(vt_symbol) if gateway_name is None: gateway_name = self.gateway_name order = OrderData( gateway_name=gateway_name, symbol=symbol, exchange=exchange, orderid=order_id, direction=direction, offset=offset, type=order_type, price=round_to(value=price, target=self.get_price_tick(symbol)), volume=volume, status=Status.NOTTRADED, time=str(self.last_dt) ) # 保存到限价单字典中 self.active_limit_orders[order.vt_orderid] = order self.limit_orders[order.vt_orderid] = order self.order_strategy_dict.update({order.vt_orderid: strategy}) self.write_log(f'创建限价单:{order.__dict__}') return [order.vt_orderid] def send_local_stop_order( self, strategy: CtaTemplate, vt_symbol: str, direction: Direction, offset: Offset, price: float, volume: float, lock: bool, gateway_name: str = None): """""" self.stop_order_count += 1 stop_order = StopOrder( vt_symbol=vt_symbol, direction=direction, offset=offset, price=price, volume=volume, stop_orderid=f"{STOPORDER_PREFIX}.{self.stop_order_count}", strategy_name=strategy.strategy_name, ) self.write_log(f'创建本地停止单:{stop_order.__dict__}') self.order_strategy_dict.update({stop_order.stop_orderid: strategy}) self.active_stop_orders[stop_order.stop_orderid] = stop_order self.stop_orders[stop_order.stop_orderid] = stop_order return [stop_order.stop_orderid] def cancel_order(self, strategy: CtaTemplate, vt_orderid: str): """撤单""" if vt_orderid.startswith(STOPORDER_PREFIX): return self.cancel_stop_order(strategy, vt_orderid) else: return self.cancel_limit_order(strategy, vt_orderid) def cancel_limit_order(self, strategy: CtaTemplate, vt_orderid: str): """限价单撤单""" if vt_orderid in self.active_limit_orders: order = self.active_limit_orders[vt_orderid] register_strategy = self.order_strategy_dict.get(vt_orderid, None) if register_strategy.strategy_name != strategy.strategy_name: return False order.status = Status.CANCELLED order.cancel_time = str(self.last_dt) self.active_limit_orders.pop(vt_orderid, None) strategy.on_order(order) return True return False def cancel_stop_order(self, strategy: CtaTemplate, vt_orderid: str): """本地停止单撤单""" if vt_orderid not in self.active_stop_orders: return False stop_order = self.active_stop_orders.pop(vt_orderid) stop_order.status = StopOrderStatus.CANCELLED strategy.on_stop_order(stop_order) return True def cancel_all(self, strategy): """撤销某个策略的所有委托单""" self.cancel_orders(strategy=strategy) def cancel_orders(self, vt_symbol: str = None, offset: Offset = None, strategy: CtaTemplate = None): """撤销所有单""" # Symbol参数:指定合约的撤单; # OFFSET参数:指定Offset的撤单,缺省不填写时,为所有 # strategy参数: 指定某个策略的单子 if len(self.active_limit_orders) > 0: self.write_log(u'从所有订单中,撤销:开平:{},合约:{},策略:{}' .format(offset, vt_symbol if vt_symbol is not None else u'所有', strategy.strategy_name if strategy else None)) for vt_orderid in list(self.active_limit_orders.keys()): order = self.active_limit_orders.get(vt_orderid, None) order_strategy = self.order_strategy_dict.get(vt_orderid, None) if order is None or order_strategy is None: continue if offset is None: offset_cond = True else: offset_cond = order.offset == offset if vt_symbol is None: symbol_cond = True else: symbol_cond = order.vt_symbol == vt_symbol if strategy is None: strategy_cond = True else: strategy_cond = strategy.strategy_name == order_strategy.strategy_name if offset_cond and symbol_cond and strategy_cond: self.write_log(u'撤销限价订单:{},{} {}@{}' .format(vt_orderid, order.direction, order.price, order.volume)) order.status = Status.CANCELLED order.cancel_time = str(self.last_dt) del self.active_limit_orders[vt_orderid] if order_strategy: order_strategy.on_order(order) # 撤销本地停止单 for stop_orderid in list(self.active_stop_orders.keys()): order = self.active_stop_orders.get(stop_orderid, None) order_strategy = self.order_strategy_dict.get(stop_orderid, None) if order is None or order_strategy is None: continue if offset is None: offset_cond = True else: offset_cond = order.offset == offset if vt_symbol is None: symbol_cond = True else: symbol_cond = order.vt_symbol == vt_symbol if strategy is None: strategy_cond = True else: strategy_cond = strategy.strategy_name == order_strategy.strategy_name if offset_cond and symbol_cond and strategy_cond: self.write_log(u'撤销本地停止单:{},{} {}@{}' .format(stop_orderid, order.direction, order.price, order.volume)) order.status = Status.CANCELLED order.cancel_time = str(self.last_dt) self.active_stop_orders.pop(stop_orderid, None) if strategy: strategy.on_stop_order(order) def cross_stop_order(self, bar: BarData = None, tick: TickData = None): """ 本地停止单撮合 Cross stop order with last bar/tick data. """ vt_symbol = bar.vt_symbol if bar else tick.vt_symbol for stop_orderid in list(self.active_stop_orders.keys()): stop_order = self.active_stop_orders[stop_orderid] strategy = self.order_strategy_dict.get(stop_orderid, None) if stop_order.vt_symbol != vt_symbol or stop_order is None or strategy is None: continue # 若买入方向停止单价格高于等于该价格,则会触发 if bar: long_cross_price = round_to(value=bar.low_price, target=self.get_price_tick(vt_symbol)) long_cross_price -= self.get_price_tick(vt_symbol) # 若卖出方向停止单价格低于等于该价格,则会触发 short_cross_price = round_to(value=bar.high_price, target=self.get_price_tick(vt_symbol)) short_cross_price += self.get_price_tick(vt_symbol) # 在当前时间点前发出的买入委托可能的最优成交价 long_best_price = round_to(value=bar.open_price, target=self.get_price_tick(vt_symbol)) + self.get_price_tick(vt_symbol) # 在当前时间点前发出的卖出委托可能的最优成交价 short_best_price = round_to(value=bar.open_price, target=self.get_price_tick(vt_symbol)) - self.get_price_tick(vt_symbol) else: long_cross_price = tick.last_price short_cross_price = tick.last_price long_best_price = tick.last_price short_best_price = tick.last_price # Check whether stop order can be triggered. long_cross = stop_order.direction == Direction.LONG and stop_order.price <= long_cross_price short_cross = stop_order.direction == Direction.SHORT and stop_order.price >= short_cross_price if not long_cross and not short_cross: continue # Create order data. self.limit_order_count += 1 symbol, exchange = extract_vt_symbol(vt_symbol) order = OrderData( symbol=symbol, exchange=exchange, orderid=str(self.limit_order_count), direction=stop_order.direction, offset=stop_order.offset, price=stop_order.price, volume=stop_order.volume, status=Status.ALLTRADED, gateway_name=self.gateway_name, ) order.datetime = self.last_dt self.write_log(f'停止单被触发:\n{stop_order.__dict__}\n=>委托单{order.__dict__}') self.limit_orders[order.vt_orderid] = order # Create trade data. if long_cross: trade_price = max(stop_order.price, long_best_price) else: trade_price = min(stop_order.price, short_best_price) self.trade_count += 1 trade = TradeData( symbol=order.symbol, exchange=order.exchange, orderid=order.orderid, tradeid=str(self.trade_count), direction=order.direction, offset=order.offset, price=trade_price, volume=order.volume, time=self.last_dt.strftime("%Y-%m-%d %H:%M:%S"), datetime=self.last_dt, gateway_name=self.gateway_name, ) trade.strategy_name = strategy.strategy_name trade.datetime = self.last_dt self.write_log(f'停止单触发成交:{trade.__dict__}') self.trade_dict[trade.vt_tradeid] = trade self.trades[trade.vt_tradeid] = copy.copy(trade) # Update stop order. stop_order.vt_orderids.append(order.vt_orderid) stop_order.status = StopOrderStatus.TRIGGERED self.active_stop_orders.pop(stop_order.stop_orderid) # Push update to strategy. strategy.on_stop_order(stop_order) strategy.on_order(order) self.append_trade(trade) holding = self.get_position_holding(vt_symbol=trade.vt_symbol, gateway_name=self.gateway_name) holding.update_trade(trade) strategy.on_trade(trade) def cross_limit_order(self, bar: BarData = None, tick: TickData = None): """基于最新数据撮合限价单""" vt_symbol = bar.vt_symbol if bar else tick.vt_symbol # 遍历限价单字典中的所有限价单 for vt_orderid in list(self.active_limit_orders.keys()): order = self.active_limit_orders.get(vt_orderid, None) if order.vt_symbol != vt_symbol: continue strategy = self.order_strategy_dict.get(order.vt_orderid, None) if strategy is None: self.write_error(u'找不到vt_orderid:{}对应的策略'.format(order.vt_orderid)) continue if bar: buy_cross_price = round_to(value=bar.low_price, target=self.get_price_tick(vt_symbol)) + self.get_price_tick( vt_symbol) # 若买入方向限价单价格高于该价格,则会成交 sell_cross_price = round_to(value=bar.high_price, target=self.get_price_tick(vt_symbol)) - self.get_price_tick( vt_symbol) # 若卖出方向限价单价格低于该价格,则会成交 buy_best_cross_price = round_to(value=bar.open_price, target=self.get_price_tick(vt_symbol)) + self.get_price_tick( vt_symbol) # 在当前时间点前发出的买入委托可能的最优成交价 sell_best_cross_price = round_to(value=bar.open_price, target=self.get_price_tick(vt_symbol)) - self.get_price_tick( vt_symbol) # 在当前时间点前发出的卖出委托可能的最优成交价 else: buy_cross_price = tick.last_price sell_cross_price = tick.last_price buy_best_cross_price = tick.last_price sell_best_cross_price = tick.last_price # 判断是否会成交 buy_cross = order.direction == Direction.LONG and order.price >= buy_cross_price sell_cross = order.direction == Direction.SHORT and order.price <= sell_cross_price # 如果发生了成交 if buy_cross or sell_cross: # 推送成交数据 self.trade_count += 1 # 成交编号自增1 trade_id = str(self.trade_count) symbol, exchange = extract_vt_symbol(vt_symbol) trade = TradeData( gateway_name=self.gateway_name, symbol=symbol, exchange=exchange, tradeid=trade_id, orderid=order.orderid, direction=order.direction, offset=order.offset, volume=order.volume, time=self.last_dt.strftime("%Y-%m-%d %H:%M:%S"), datetime=self.last_dt ) # 以买入为例: # 1. 假设当根K线的OHLC分别为:100, 125, 90, 110 # 2. 假设在上一根K线结束(也是当前K线开始)的时刻,策略发出的委托为限价105 # 3. 则在实际中的成交价会是100而不是105,因为委托发出时市场的最优价格是100 if buy_cross: trade_price = min(order.price, buy_best_cross_price) else: trade_price = max(order.price, sell_best_cross_price) # renko bar较为特殊,使用委托价进行成交 if trade.vt_symbol.startswith('future_renko'): trade_price = order.price trade.price = trade_price # 记录该合约来自哪个策略实例 trade.strategy_name = strategy.strategy_name strategy.on_trade(trade) for cov_trade in self.convert_spd_trade(trade): self.trade_dict[cov_trade.vt_tradeid] = cov_trade self.trades[cov_trade.vt_tradeid] = copy.copy(cov_trade) self.write_log(u'vt_trade_id:{0}'.format(cov_trade.vt_tradeid)) # 更新持仓缓存数据 holding = self.get_position_holding(cov_trade.vt_symbol, self.gateway_name) holding.update_trade(cov_trade) self.write_log(u'{} : crossLimitOrder: TradeId:{}, posBuffer = {}'.format(cov_trade.strategy_name, cov_trade.tradeid, holding.to_str())) # 写入交易记录 self.append_trade(cov_trade) # 更新资金曲线 if 'SPD' not in cov_trade.vt_symbol: fund_kline = self.get_fund_kline(cov_trade.strategy_name) if fund_kline: fund_kline.update_trade(cov_trade) # 推送委托数据 order.traded = order.volume order.status = Status.ALLTRADED strategy.on_order(order) # 从字典中删除该限价单 self.active_limit_orders.pop(vt_orderid, None) # 实时计算模式 self.realtime_calculate() def convert_spd_trade(self, trade): """转换为品种对的交易记录""" if trade.exchange != Exchange.SPD: return [trade] try: active_symbol, active_rate, passive_symbol, passive_rate, spd_type = trade.symbol.split('-') active_rate = int(active_rate) passive_rate = int(passive_rate) active_exchange = self.get_exchange(active_symbol) active_vt_symbol = active_symbol + '.' + active_exchange.value passive_exchange = self.get_exchange(passive_symbol) passive_vt_symbol = passive_symbol + '.' + passive_exchange.value # 主动腿成交记录 act_trade = TradeData(gateway_name=self.gateway_name, symbol=active_symbol, exchange=active_exchange, orderid='spd_' + str(trade.orderid), tradeid='spd_act_' + str(trade.tradeid), direction=trade.direction, offset=trade.offset, strategy_name=trade.strategy_name, price=self.get_price(active_vt_symbol), volume=int(trade.volume * active_rate), time=trade.time, datetime=trade.datetime ) # 被动腿成交记录 # 交易方向与spd合约方向相反 pas_trade = TradeData(gateway_name=self.gateway_name, symbol=passive_symbol, exchange=passive_exchange, orderid='spd_' + str(trade.orderid), tradeid='spd_pas_' + str(trade.tradeid), direction=Direction.LONG if trade.direction == Direction.SHORT else Direction.SHORT, offset=trade.offset, strategy_name=trade.strategy_name, time=trade.time, datetime=trade.datetime ) # 根据套利合约的类型+主合约的价格,反向推导出被动合约的价格 if spd_type == 'BJ': pas_trade.price = (act_trade.price * active_rate * 100 / trade.price) / passive_rate else: pas_trade.price = (act_trade.price * active_rate - trade.price) / passive_rate pas_trade.price = round_to(value=pas_trade.price, target=self.get_price_tick(pas_trade.vt_symbol)) pas_trade.volume = int(trade.volume * passive_rate) pas_trade.time = trade.time # 返回原交易记录,主动腿交易记录,被动腿交易记录 return [trade, act_trade, pas_trade] except Exception as ex: self.write_error(u'转换主动/被动腿异常:{}'.format(str(ex))) return [trade] def update_pos_buffer(self): """更新持仓信息,把今仓=>昨仓""" for k, v in self.pos_holding_dict.items(): if v.long_td > 0: self.write_log(u'调整多单持仓:今仓{}=> 0 昨仓{} => 昨仓:{}'.format(v.long_td, v.long_yd, v.long_pos)) v.long_td = 0 v.long_yd = v.long_pos if v.short_td > 0: self.write_log(u'调整空单持仓:今仓{}=> 0 昨仓{} => 昨仓:{}'.format(v.short_td, v.short_yd, v.short_pos)) v.short_td = 0 v.short_yd = v.short_pos def get_data_path(self): """ 获取数据保存目录 :return: """ if self.data_path is not None: data_folder = self.data_path else: data_folder = os.path.abspath(os.path.join(os.getcwd(), 'data')) self.data_path = data_folder if not os.path.exists(data_folder): os.makedirs(data_folder) return data_folder def get_logs_path(self): """ 获取日志保存目录 :return: """ if self.logs_path is not None: logs_folder = self.logs_path else: logs_folder = os.path.abspath(os.path.join(os.getcwd(), 'log')) self.logs_path = logs_folder if not os.path.exists(logs_folder): os.makedirs(logs_folder) return logs_folder def create_logger(self, strategy_name=None, debug=False): """ 创建日志 :param strategy_name 策略实例名称 :param debug:是否详细记录日志 :return: """ if strategy_name is None: filename = os.path.abspath(os.path.join(self.get_logs_path(), '{}'.format( self.test_name if len(self.test_name) > 0 else 'portfolio_test'))) print(u'create logger:{}'.format(filename)) self.logger = setup_logger(file_name=filename, name=self.test_name, log_level=logging.DEBUG if debug else logging.ERROR, backtesing=True) else: filename = os.path.abspath( os.path.join(self.get_logs_path(), '{}_{}'.format(self.test_name, str(strategy_name)))) print(u'create logger:{}'.format(filename)) self.strategy_loggers[strategy_name] = setup_logger(file_name=filename, name=str(strategy_name), log_level=logging.DEBUG if debug else logging.ERROR, backtesing=True) def write_log(self, msg: str, strategy_name: str = None, level: int = logging.DEBUG): """记录日志""" # log = str(self.datetime) + ' ' + content # self.logList.append(log) if strategy_name is None: # 写入本地log日志 if self.logger: self.logger.log(msg=msg, level=level) else: self.create_logger(debug=self.debug) else: if strategy_name in self.strategy_loggers: self.strategy_loggers[strategy_name].log(msg=msg, level=level) else: self.create_logger(strategy_name=strategy_name, debug=self.debug) def write_error(self, msg, strategy_name=None): """记录异常""" if strategy_name is None: if self.logger: self.logger.error(msg) else: self.create_logger(debug=self.debug) else: if strategy_name in self.strategy_loggers: self.strategy_loggers[strategy_name].error(msg) else: self.create_logger(strategy_name=strategy_name, debug=self.debug) try: self.strategy_loggers[strategy_name].error(msg) except Exception as ex: print('{}'.format(datetime.now()), file=sys.stderr) print('could not create cta logger for {},excption:{},trace:{}'.format(strategy_name, str(ex), traceback.format_exc())) print(msg, file=sys.stderr) def output(self, content): """输出内容""" print(self.test_name + "\t" + content) def realtime_calculate(self): """实时计算交易结果 支持多空仓位并存""" if len(self.trade_dict) < 1: return # 获取所有未处理得成交单 vt_tradeids = list(self.trade_dict.keys()) result_list = [] # 保存交易记录 longid = '' shortid = '' # 对交易记录逐一处理 for vt_tradeid in vt_tradeids: trade = self.trade_dict.pop(vt_tradeid, None) if trade is None: continue if trade.volume == 0: continue # buy trade if trade.direction == Direction.LONG and trade.offset == Offset.OPEN: self.write_log(f'{trade.vt_symbol} buy, price:{trade.price},volume:{trade.volume}') # 放入多单仓位队列 self.long_position_list.append(trade) # cover trade, elif trade.direction == Direction.LONG and trade.offset == Offset.CLOSE: g_id = trade.vt_tradeid # 交易组(多个平仓数为一组) g_result = None # 组合的交易结果 cover_volume = trade.volume self.write_log(f'{trade.vt_symbol} cover:{cover_volume}') while cover_volume > 0: # 如果当前没有空单,属于异常行为 if len(self.short_position_list) == 0: self.write_error(u'异常!没有空单持仓,不能cover') # raise Exception(u'异常!没有空单持仓,不能cover') return cur_short_pos_list = [s_pos.volume for s_pos in self.short_position_list if s_pos.vt_symbol == trade.vt_symbol] self.write_log(u'{}当前空单:{}'.format(trade.vt_symbol, cur_short_pos_list)) # 来自同一策略,同一合约才能撮合 pop_indexs = [i for i, val in enumerate(self.short_position_list) if val.vt_symbol == trade.vt_symbol and val.strategy_name == trade.strategy_name] if len(pop_indexs) < 1: if 'spd' in vt_tradeid: self.write_error(f'没有{trade.strategy_name}对应的symbol:{trade.vt_symbol}的空单持仓, 继续') break else: self.write_error( u'异常,{}没有对应symbol:{}的空单持仓, 终止'.format(trade.strategy_name, trade.vt_symbol)) # raise Exception(u'realtimeCalculate2() Exception,没有对应symbol:{0}的空单持仓'.format(trade.vt_symbol)) return pop_index = pop_indexs[0] # 从未平仓的空头交易 open_trade = self.short_position_list.pop(pop_index) # 开空volume,不大于平仓volume if cover_volume >= open_trade.volume: self.write_log(f'cover volume:{cover_volume}, 满足:{open_trade.volume}') cover_volume = cover_volume - open_trade.volume if cover_volume > 0: self.write_log(u'剩余待平数量:{}'.format(cover_volume)) self.write_log( f'{open_trade.vt_symbol} coverd, price: {trade.price},volume:{open_trade.volume}') result = TradingResult(open_price=open_trade.price, open_datetime=open_trade.datetime, exit_price=trade.price, close_datetime=trade.datetime, volume=-open_trade.volume, rate=self.get_commission_rate(trade.vt_symbol), slippage=self.get_slippage(trade.vt_symbol), size=self.get_size(trade.vt_symbol), group_id=g_id, fix_commission=self.get_fix_commission(trade.vt_symbol)) t = OrderedDict() t['gid'] = g_id t['strategy'] = open_trade.strategy_name t['vt_symbol'] = open_trade.vt_symbol t['open_time'] = open_trade.time t['open_price'] = open_trade.price t['direction'] = u'Short' t['close_time'] = trade.time t['close_price'] = trade.price t['volume'] = open_trade.volume t['profit'] = result.pnl t['commission'] = result.commission self.trade_pnl_list.append(t) # 非自定义套利对,才更新到策略盈亏 if not (open_trade.vt_symbol.endswith('SPD') or open_trade.vt_symbol.endswith('SPD99')): # 更新策略实例的累加盈亏 self.pnl_strategy_dict.update( {open_trade.strategy_name: self.pnl_strategy_dict.get(open_trade.strategy_name, 0) + result.pnl}) msg = u'gid:{} {}[{}:开空tid={}:{}]-[{}.平空tid={},{},vol:{}],净盈亏pnl={},手续费:{}' \ .format(g_id, open_trade.vt_symbol, open_trade.time, shortid, open_trade.price, trade.time, vt_tradeid, trade.price, open_trade.volume, result.pnl, result.commission) self.write_log(msg) # 添加到交易结果汇总 result_list.append(result) if g_result is None: if cover_volume > 0: # 属于组合 g_result = copy.deepcopy(result) else: # 更新组合的数据 g_result.turnover = g_result.turnover + result.turnover g_result.commission = g_result.commission + result.commission g_result.slippage = g_result.slippage + result.slippage g_result.pnl = g_result.pnl + result.pnl # 所有仓位平完 if cover_volume == 0: self.write_log(u'所有平空仓位撮合完毕') g_result.volume = abs(trade.volume) # 开空volume,大于平仓volume,需要更新减少tradeDict的数量。 else: remain_volume = open_trade.volume - cover_volume self.write_log(f'{open_trade.vt_symbol} short pos: {open_trade.volume} => {remain_volume}') result = TradingResult(open_price=open_trade.price, open_datetime=open_trade.datetime, exit_price=trade.price, close_datetime=trade.datetime, volume=-cover_volume, rate=self.get_commission_rate(trade.vt_symbol), slippage=self.get_slippage(trade.vt_symbol), size=self.get_size(trade.vt_symbol), group_id=g_id, fix_commission=self.get_fix_commission(trade.vt_symbol)) t = OrderedDict() t['gid'] = g_id t['strategy'] = open_trade.strategy_name t['vt_symbol'] = open_trade.vt_symbol t['open_time'] = open_trade.time t['open_price'] = open_trade.price t['direction'] = u'Short' t['close_time'] = trade.time t['close_price'] = trade.price t['volume'] = cover_volume t['profit'] = result.pnl t['commission'] = result.commission self.trade_pnl_list.append(t) # 非自定义套利对,才更新盈亏 if not (open_trade.vt_symbol.endswith('SPD') or open_trade.vt_symbol.endswith('SPD99')): # 更新策略实例的累加盈亏 self.pnl_strategy_dict.update( {open_trade.strategy_name: self.pnl_strategy_dict.get(open_trade.strategy_name, 0) + result.pnl}) msg = u'gid:{} {}[{}:开空tid={}:{}]-[{}.平空tid={},{},vol:{}],净盈亏pnl={},手续费:{}' \ .format(g_id, open_trade.vt_symbol, open_trade.time, shortid, open_trade.price, trade.time, vt_tradeid, trade.price, cover_volume, result.pnl, result.commission) self.write_log(msg) # 添加到交易结果汇总 result_list.append(result) # 更新(减少)开仓单的volume,重新推进开仓单列表中 open_trade.volume = remain_volume self.write_log(u'更新(减少)开仓单的volume,重新推进开仓单列表中:{}'.format(open_trade.volume)) self.short_position_list.append(open_trade) cur_short_pos_list = [s_pos.volume for s_pos in self.short_position_list] self.write_log(u'当前空单:{}'.format(cur_short_pos_list)) cover_volume = 0 if g_result is not None: # 更新组合的数据 g_result.turnover = g_result.turnover + result.turnover g_result.commission = g_result.commission + result.commission g_result.slippage = g_result.slippage + result.slippage g_result.pnl = g_result.pnl + result.pnl g_result.volume = abs(trade.volume) if g_result is not None: self.write_log(u'组合净盈亏:{0}'.format(g_result.pnl)) # Short Trade elif trade.direction == Direction.SHORT and trade.offset == Offset.OPEN: self.write_log(f'{trade.vt_symbol}, short: price:{trade.price},volume{trade.volume}') self.short_position_list.append(trade) continue # sell trade elif trade.direction == Direction.SHORT and trade.offset == Offset.CLOSE: g_id = trade.vt_tradeid # 交易组(多个平仓数为一组) g_result = None # 组合的交易结果 sell_volume = trade.volume while sell_volume > 0: if len(self.long_position_list) == 0: self.write_error(f'异常,没有{trade.vt_symbol}的多仓') # raise RuntimeError(u'realtimeCalculate2() Exception,没有开多单') return pop_indexs = [i for i, val in enumerate(self.long_position_list) if val.vt_symbol == trade.vt_symbol and val.strategy_name == trade.strategy_name] if len(pop_indexs) < 1: if 'spd' in vt_tradeid: self.write_error(f'没有{trade.strategy_name}对应的symbol:{trade.vt_symbol}多单数据, 继续') break else: self.write_error(f'没有{trade.strategy_name}对应的symbol:{trade.vt_symbol}多单数据, 终止') # raise RuntimeError(f'realtimeCalculate2() Exception,没有对应的symbol:{trade.vt_symbol}多单数据,') return cur_long_pos_list = [s_pos.volume for s_pos in self.long_position_list if s_pos.vt_symbol == trade.vt_symbol] self.write_log(u'{}当前多单:{}'.format(trade.vt_symbol, cur_long_pos_list)) pop_index = pop_indexs[0] open_trade = self.long_position_list.pop(pop_index) # 开多volume,不大于平仓volume if sell_volume >= open_trade.volume: self.write_log(f'{open_trade.vt_symbol},Sell Volume:{sell_volume} 满足:{open_trade.volume}') sell_volume = sell_volume - open_trade.volume self.write_log(f'{open_trade.vt_symbol},sell, price:{trade.price},volume:{open_trade.volume}') result = TradingResult(open_price=open_trade.price, open_datetime=open_trade.datetime, exit_price=trade.price, close_datetime=trade.datetime, volume=open_trade.volume, rate=self.get_commission_rate(trade.vt_symbol), slippage=self.get_slippage(trade.vt_symbol), size=self.get_size(trade.vt_symbol), group_id=g_id, fix_commission=self.get_fix_commission(trade.vt_symbol)) t = OrderedDict() t['gid'] = g_id t['strategy'] = open_trade.strategy_name t['vt_symbol'] = open_trade.vt_symbol t['open_time'] = open_trade.time t['open_price'] = open_trade.price t['direction'] = u'Long' t['close_time'] = trade.time t['close_price'] = trade.price t['volume'] = open_trade.volume t['profit'] = result.pnl t['commission'] = result.commission self.trade_pnl_list.append(t) # 非自定义套利对,才更新盈亏 if not (open_trade.vt_symbol.endswith('SPD') or open_trade.vt_symbol.endswith('SPD99')): # 更新策略实例的累加盈亏 self.pnl_strategy_dict.update( {open_trade.strategy_name: self.pnl_strategy_dict.get(open_trade.strategy_name, 0) + result.pnl}) msg = u'gid:{} {}[{}:开多tid={}:{}]-[{}.平多tid={},{},vol:{}],净盈亏pnl={},手续费:{}' \ .format(g_id, open_trade.vt_symbol, open_trade.time, longid, open_trade.price, trade.time, vt_tradeid, trade.price, open_trade.volume, result.pnl, result.commission) self.write_log(msg) # 添加到交易结果汇总 result_list.append(result) if g_result is None: if sell_volume > 0: # 属于组合 g_result = copy.deepcopy(result) else: # 更新组合的数据 g_result.turnover = g_result.turnover + result.turnover g_result.commission = g_result.commission + result.commission g_result.slippage = g_result.slippage + result.slippage g_result.pnl = g_result.pnl + result.pnl if sell_volume == 0: g_result.volume = abs(trade.volume) # 开多volume,大于平仓volume,需要更新减少tradeDict的数量。 else: remain_volume = open_trade.volume - sell_volume self.write_log(f'{open_trade.vt_symbol} short pos: {open_trade.volume} => {remain_volume}') result = TradingResult(open_price=open_trade.price, open_datetime=open_trade.datetime, exit_price=trade.price, close_datetime=trade.datetime, volume=sell_volume, rate=self.get_commission_rate(trade.vt_symbol), slippage=self.get_slippage(trade.vt_symbol), size=self.get_size(trade.vt_symbol), group_id=g_id, fix_commission=self.get_fix_commission(trade.vt_symbol)) t = OrderedDict() t['gid'] = g_id t['strategy'] = open_trade.strategy_name t['vt_symbol'] = open_trade.vt_symbol t['open_time'] = open_trade.time t['open_price'] = open_trade.price t['direction'] = u'Long' t['close_time'] = trade.time t['close_price'] = trade.price t['volume'] = sell_volume t['profit'] = result.pnl t['commission'] = result.commission self.trade_pnl_list.append(t) # 非自定义套利对,才更新盈亏 if not (open_trade.vt_symbol.endswith('SPD') or open_trade.vt_symbol.endswith('SPD99')): # 更新策略实例的累加盈亏 self.pnl_strategy_dict.update( {open_trade.strategy_name: self.pnl_strategy_dict.get(open_trade.strategy_name, 0) + result.pnl}) msg = u'Gid:{} {}[{}:开多tid={}:{}]-[{}.平多tid={},{},vol:{}],净盈亏pnl={},手续费:{}' \ .format(g_id, open_trade.vt_symbol, open_trade.time, longid, open_trade.price, trade.time, vt_tradeid, trade.price, sell_volume, result.pnl, result.commission) self.write_log(msg) # 添加到交易结果汇总 result_list.append(result) # 减少开多volume,重新推进多单持仓列表中 open_trade.volume = remain_volume self.long_position_list.append(open_trade) sell_volume = 0 if g_result is not None: # 更新组合的数据 g_result.turnover = g_result.turnover + result.turnover g_result.commission = g_result.commission + result.commission g_result.slippage = g_result.slippage + result.slippage g_result.pnl = g_result.pnl + result.pnl g_result.volume = abs(trade.volume) if g_result is not None: self.write_log(u'组合净盈亏:{0}'.format(g_result.pnl)) # 计算仓位比例 occupy_money = 0.0 # 保证金 occupy_long_money_dict = {} # 多单保证金,key为合约短号,value为保证金 occupy_short_money_dict = {} # 空单保证金,key为合约短号,value为保证金 occupy_underly_symbol_set = set() # 所有合约短号 long_pos_dict = {} short_pos_dict = {} if len(self.long_position_list) > 0: for t in self.long_position_list: # 不计算套利合约的持仓占用保证金 if t.vt_symbol.endswith('SPD') or t.vt_symbol.endswith('SPD99'): continue # 当前持仓的保证金 if self.use_margin: cur_occupy_money = t.price * abs(t.volume) * self.get_size(t.vt_symbol) * self.get_margin_rate( t.vt_symbol) else: cur_occupy_money = self.get_price(t.vt_symbol) * abs(t.volume) * self.get_size( t.vt_symbol) * self.get_margin_rate(t.vt_symbol) # 更新该合约短号的累计保证金 underly_symbol = get_underlying_symbol(t.symbol) occupy_underly_symbol_set.add(underly_symbol) occupy_long_money_dict.update( {underly_symbol: occupy_long_money_dict.get(underly_symbol, 0) + cur_occupy_money}) if t.vt_symbol in long_pos_dict: long_pos_dict[t.vt_symbol] += abs(t.volume) else: long_pos_dict[t.vt_symbol] = abs(t.volume) if len(self.short_position_list) > 0: for t in self.short_position_list: # 不计算套利合约的持仓占用保证金 if t.vt_symbol.endswith('SPD') or t.vt_symbol.endswith('SPD99'): continue cur_occupy_money = 0 # 当前空单保证金 if self.use_margin: try: cur_occupy_money = max(self.get_price(t.vt_symbol), t.price) * abs(t.volume) * self.get_size( t.vt_symbol) * self.get_margin_rate(t.vt_symbol) except Exception as ex: self.write_error(ex) else: cur_occupy_money = self.get_price(t.vt_symbol) * abs(t.volume) * self.get_size( t.vt_symbol) * self.get_margin_rate(t.vt_symbol) # 该合约短号的累计空单保证金 underly_symbol = get_underlying_symbol(t.symbol) occupy_underly_symbol_set.add(underly_symbol) occupy_short_money_dict.update( {underly_symbol: occupy_short_money_dict.get(underly_symbol, 0) + cur_occupy_money}) if t.vt_symbol in short_pos_dict: short_pos_dict[t.vt_symbol] += abs(t.volume) else: short_pos_dict[t.vt_symbol] = abs(t.volume) # 计算多空的保证金累加(对锁的取最大值) for underly_symbol in occupy_underly_symbol_set: occupy_money += max(occupy_long_money_dict.get(underly_symbol, 0), occupy_short_money_dict.get(underly_symbol, 0)) # 可用资金 = 当前净值 - 占用保证金 self.available = self.net_capital - occupy_money # 当前保证金占比 self.percent = round(float(occupy_money * 100 / self.net_capital), 2) # 更新最大保证金占比 self.max_occupy_rate = max(self.max_occupy_rate, self.percent) # 检查是否有平交易 if len(result_list) == 0: msg = u'' if len(self.long_position_list) > 0: msg += u'持多仓{0},'.format(str(long_pos_dict)) if len(self.short_position_list) > 0: msg += u'持空仓{0},'.format(str(short_pos_dict)) msg += u'资金占用:{0},仓位:{1}%%'.format(occupy_money, self.percent) self.write_log(msg) return # 对交易结果汇总统计 for result in result_list: if result.pnl > 0: self.winning_result += 1 self.total_winning += result.pnl else: self.losing_result += 1 self.total_losing += result.pnl self.cur_capital += result.pnl self.max_capital = max(self.cur_capital, self.max_capital) self.net_capital = max(self.net_capital, self.cur_capital) self.max_net_capital = max(self.net_capital, self.max_net_capital) # self.maxVolume = max(self.maxVolume, result.volume) drawdown = self.net_capital - self.max_net_capital drawdown_rate = round(float(drawdown * 100 / self.max_net_capital), 4) self.pnl_list.append(result.pnl) self.time_list.append(result.close_datetime) self.capital_list.append(self.cur_capital) self.drawdown_list.append(drawdown) self.drawdown_rate_list.append(drawdown_rate) self.total_trade_count += 1 self.total_turnover += result.turnover self.total_commission += result.commission self.total_slippage += result.slippage msg = u'[gid:{}] {} 交易盈亏:{},交易手续费:{}回撤:{}/{},账号平仓权益:{},持仓权益:{},累计手续费:{}' \ .format(result.group_id, result.close_datetime, result.pnl, result.commission, drawdown, drawdown_rate, self.cur_capital, self.net_capital, self.total_commission) self.write_log(msg) # 重新计算一次avaliable self.available = self.net_capital - occupy_money self.percent = round(float(occupy_money * 100 / self.net_capital), 2) def saving_daily_data(self, d, c, m, commission, benchmark=0): """保存每日数据""" data = {} data['date'] = d.strftime('%Y/%m/%d') # 日期 data['capital'] = c # 当前平仓净值 data['max_capital'] = m # 之前得最高净值 today_holding_profit = 0 # 持仓浮盈 long_pos_occupy_money = 0 short_pos_occupy_money = 0 strategy_pnl = {} for strategy in self.strategies.keys(): strategy_pnl.update({strategy: self.pnl_strategy_dict.get(strategy, 0)}) positionMsg = "" for longpos in self.long_position_list: # 不计算套利合约的持仓盈亏 if longpos.vt_symbol.endswith('SPD') or longpos.vt_symbol.endswith('SPD99'): continue symbol = longpos.vt_symbol # 计算持仓浮盈浮亏/占用保证金 holding_profit = 0 last_price = self.get_price(symbol) if last_price is not None: holding_profit = (last_price - longpos.price) * longpos.volume * self.get_size(symbol) long_pos_occupy_money += last_price * abs(longpos.volume) * self.get_size( symbol) * self.get_margin_rate(symbol) # 账号的持仓盈亏 today_holding_profit += holding_profit # 计算每个策略实例的持仓盈亏 strategy_pnl.update({longpos.strategy_name: strategy_pnl.get(longpos.strategy_name, 0) + holding_profit}) positionMsg += "{:<10},long ,p:{:<10},vol:{:<3},pnl:{};\n".format(symbol, round(longpos.price,3), longpos.volume, round(holding_profit,3)) for shortpos in self.short_position_list: # 不计算套利合约的持仓盈亏 if shortpos.vt_symbol.endswith('SPD') or shortpos.vt_symbol.endswith('SPD99'): continue symbol = shortpos.vt_symbol # 计算持仓浮盈浮亏/占用保证金 holding_profit = 0 last_price = self.get_price(symbol) if last_price is not None: holding_profit = (shortpos.price - last_price) * shortpos.volume * self.get_size(symbol) short_pos_occupy_money += last_price * abs(shortpos.volume) * self.get_size( symbol) * self.get_margin_rate(symbol) # 账号的持仓盈亏 today_holding_profit += holding_profit # 计算每个策略实例的持仓盈亏 strategy_pnl.update({shortpos.strategy_name: strategy_pnl.get(shortpos.strategy_name, 0) + holding_profit}) positionMsg += "{:<10},short,p:{:<10},vol:{:<3},pnl:{};\n".format(symbol, round(shortpos.price,3), shortpos.volume, round(holding_profit,3)) data['net'] = c + today_holding_profit # 当日净值(含持仓盈亏) data['rate'] = (c + today_holding_profit) / self.init_capital data['occupy_money'] = max(long_pos_occupy_money, short_pos_occupy_money) data['occupy_rate'] = data['occupy_money'] / data['capital'] data['commission'] = commission data.update(self.price_dict) data.update(strategy_pnl) self.daily_list.append(data) # 更新每日浮动净值 self.net_capital = data['net'] # 更新最大初次持仓浮盈净值 if data['net'] > self.max_net_capital: self.max_net_capital = data['net'] self.max_net_capital_time = data['date'] drawdown_rate = round((float(self.max_net_capital - data['net']) * 100) / self.max_net_capital, 4) if drawdown_rate > self.daily_max_drawdown_rate: self.daily_max_drawdown_rate = drawdown_rate self.max_drawdown_rate_time = data['date'] msg = u'{}: net={}, capital={} max={} holding_profit={} commission={}, pos: \n{}' \ .format(data['date'], data['net'], c, m, today_holding_profit, commission, positionMsg) if not self.debug: self.output(msg) else: self.write_log(msg) # 今仓 =》 昨仓 for holding in self.holdings.values(): if holding.long_td > 0: self.write_log( f'{holding.vt_symbol} 多单今仓{holding.long_td},昨仓:{holding.long_yd}=> 昨仓:{holding.long_pos}') holding.long_td = 0 holding.long_yd = holding.long_pos if holding.short_td > 0: self.write_log( f'{holding.vt_symbol} 空单今仓{holding.short_td},昨仓:{holding.short_yd}=> 昨仓:{holding.short_pos}') holding.short_td = 0 holding.short_yd = holding.short_pos # --------------------------------------------------------------------- def export_trade_result(self): """ 导出交易结果(开仓-》平仓, 平仓收益) 导出每日净值结果表 :return: """ if len(self.trade_pnl_list) == 0: self.write_log('no traded records') return s = self.test_name.replace('&', '') s = s.replace(' ', '') trade_list_csv_file = os.path.abspath(os.path.join(self.get_logs_path(), '{}_trade_list.csv'.format(s))) self.write_log(u'save trade records to:{}'.format(trade_list_csv_file)) import csv csv_write_file = open(trade_list_csv_file, 'w', encoding='utf8', newline='') fieldnames = ['gid', 'strategy', 'vt_symbol', 'open_time', 'open_price', 'direction', 'close_time', 'close_price', 'volume', 'profit', 'commission'] writer = csv.DictWriter(f=csv_write_file, fieldnames=fieldnames, dialect='excel') writer.writeheader() for row in self.trade_pnl_list: writer.writerow(row) # 导出每日净值记录表 if not self.daily_list: return if self.daily_report_name == '': daily_csv_file = os.path.abspath(os.path.join(self.get_logs_path(), '{}_daily_list.csv'.format(s))) else: daily_csv_file = self.daily_report_name self.write_log(u'save daily records to:{}'.format(daily_csv_file)) csv_write_file2 = open(daily_csv_file, 'w', encoding='utf8', newline='') fieldnames = ['date', 'capital', 'net', 'max_capital', 'rate', 'commission', 'long_money', 'short_money', 'occupy_money', 'occupy_rate', 'today_margin_long', 'today_margin_short'] # 添加合约的每日close价 fieldnames.extend(sorted(self.price_dict.keys())) # 添加策略列表 fieldnames.extend(sorted(self.strategies.keys())) writer2 = csv.DictWriter(f=csv_write_file2, fieldnames=fieldnames, dialect='excel') writer2.writeheader() for row in self.daily_list: writer2.writerow(row) if self.is_plot_daily: # 生成净值曲线图片 df = pd.DataFrame(self.daily_list) df = df.set_index('date') from vnpy.trader.utility import display_dual_axis plot_file = os.path.abspath(os.path.join(self.get_logs_path(), '{}_plot.png'.format(s))) # 双坐标输出,左侧坐标是净值(比率),右侧是各策略的实际资金收益曲线 display_dual_axis(df=df, columns1=['rate'], columns2=list(self.strategies.keys()), image_name=plot_file) return def get_result(self): # 返回回测结果 d = {} d['init_capital'] = self.init_capital d['profit'] = self.cur_capital - self.init_capital d['net_capital'] = self.net_capital d['max_capital'] = self.max_net_capital # 取消原 maxCapital if len(self.pnl_list) == 0: return {}, [], [] d['max_pnl'] = max(self.pnl_list) d['min_pnl'] = min(self.pnl_list) d['max_occupy_rate'] = self.max_occupy_rate d['total_trade_count'] = self.total_trade_count d['total_turnover'] = self.total_turnover d['total_commission'] = self.total_commission d['total_slippage'] = self.total_slippage d['time_list'] = self.time_list d['pnl_list'] = self.pnl_list d['capital_list'] = self.capital_list d['drawdown_list'] = self.drawdown_list d['drawdown_rate_list'] = self.drawdown_rate_list # 净值最大回撤率列表 d['winning_rate'] = round(100 * self.winning_result / len(self.pnl_list), 4) average_winning = 0 # 这里把数据都初始化为0 average_losing = 0 profit_loss_ratio = 0 if self.winning_result: average_winning = self.total_winning / self.winning_result # 平均每笔盈利 if self.losing_result: average_losing = self.total_losing / self.losing_result # 平均每笔亏损 if average_losing: profit_loss_ratio = -average_winning / average_losing # 盈亏比 d['average_winning'] = average_winning d['average_losing'] = average_losing d['profit_loss_ratio'] = profit_loss_ratio # 计算Sharp if not self.daily_list: return {}, [], [] capital_net_list = [] capital_list = [] for row in self.daily_list: capital_net_list.append(row['net']) capital_list.append(row['capital']) capital = pd.Series(capital_net_list) log_returns = np.log(capital).diff().fillna(0) sharpe = (log_returns.mean() * 252) / (log_returns.std() * np.sqrt(252)) d['sharpe'] = sharpe return d, capital_net_list, capital_list def show_backtesting_result(self): """显示回测结果""" # 导出资金曲线 if self.active_fund_kline: for key in self.fund_kline_dict.keys(): self.save_fund_kline(key) d, daily_net_capital, daily_capital = self.get_result() if len(d) == 0: self.output(u'无交易结果') return {}, '' # 导出交易清单 self.export_trade_result() result_info = OrderedDict() # 输出 self.output('-' * 30) result_info.update({u'第一笔交易': str(d['time_list'][0])}) self.output(u'第一笔交易:\t%s' % d['time_list'][0]) result_info.update({u'最后一笔交易': str(d['time_list'][-1])}) self.output(u'最后一笔交易:\t%s' % d['time_list'][-1]) result_info.update({u'总交易次数': d['total_trade_count']}) self.output(u'总交易次数:\t%s' % format_number(d['total_trade_count'])) result_info.update({u'期初资金': d['init_capital']}) self.output(u'期初资金:\t%s' % format_number(d['init_capital'])) result_info.update({u'期末资金': d['net_capital']}) self.output(u'期末资金:\t%s' % format_number(d['net_capital'])) result_info.update({u'平仓盈亏': d['profit']}) self.output(u'平仓盈亏:\t%s' % format_number(d['profit'])) result_info.update({u'资金最高净值': d['max_capital']}) self.output(u'资金最高净值:\t%s' % format_number(d['max_capital'])) result_info.update({u'资金最高净值时间': str(self.max_net_capital_time)}) self.output(u'资金最高净值时间:\t%s' % self.max_net_capital_time) result_info.update({u'每笔最大盈利': d['max_pnl']}) self.output(u'每笔最大盈利:\t%s' % format_number(d['max_pnl'])) result_info.update({u'每笔最大亏损': d['min_pnl']}) self.output(u'每笔最大亏损:\t%s' % format_number(d['min_pnl'])) result_info.update({u'净值最大回撤': min(d['drawdown_list'])}) self.output(u'净值最大回撤: \t%s' % format_number(min(d['drawdown_list']))) result_info.update({u'净值最大回撤率': self.daily_max_drawdown_rate}) self.output(u'净值最大回撤率: \t%s' % format_number(self.daily_max_drawdown_rate)) result_info.update({u'净值最大回撤时间': str(self.max_drawdown_rate_time)}) self.output(u'净值最大回撤时间:\t%s' % self.max_drawdown_rate_time) result_info.update({u'胜率': d['winning_rate']}) self.output(u'胜率:\t%s' % format_number(d['winning_rate'])) result_info.update({u'盈利交易平均值': d['average_winning']}) self.output(u'盈利交易平均值\t%s' % format_number(d['average_winning'])) result_info.update({u'亏损交易平均值': d['average_losing']}) self.output(u'亏损交易平均值\t%s' % format_number(d['average_losing'])) result_info.update({u'盈亏比': d['profit_loss_ratio']}) self.output(u'盈亏比:\t%s' % format_number(d['profit_loss_ratio'])) result_info.update({u'最大资金占比': d['max_occupy_rate']}) self.output(u'最大资金占比:\t%s' % format_number(d['max_occupy_rate'])) result_info.update({u'平均每笔盈利': d['profit'] / d['total_trade_count']}) self.output(u'平均每笔盈利:\t%s' % format_number(d['profit'] / d['total_trade_count'])) result_info.update({u'平均每笔滑点成本': d['total_slippage'] / d['total_trade_count']}) self.output(u'平均每笔滑点成本:\t%s' % format_number(d['total_slippage'] / d['total_trade_count'])) result_info.update({u'平均每笔佣金': d['total_commission'] / d['total_trade_count']}) self.output(u'平均每笔佣金:\t%s' % format_number(d['total_commission'] / d['total_trade_count'])) result_info.update({u'Sharpe Ratio': d['sharpe']}) self.output(u'Sharpe Ratio:\t%s' % format_number(d['sharpe'])) # 保存回测结果/交易记录/日线统计 至数据库 self.save_result_to_mongo(result_info) return result_info def save_setting_to_mongo(self): """ 保存测试设置到mongo中""" self.task_id = self.test_setting.get('task_id', str(uuid1())) # 保存到mongo得配置 save_mongo = self.test_setting.get('save_mongo', {}) if len(save_mongo) == 0: return if not self.mongo_api: self.mongo_api = MongoData(host=save_mongo.get('host', 'localhost'), port=save_mongo.get('port', 27017)) d = { 'task_id': self.task_id, # 单实例回测任务id 'name': self.test_name, # 回测实例名称, 策略名+参数+时间 'group_id': self.test_setting.get('group_id', datetime.now().strftime('%y-%m-%d')), # 回测组合id 'status': 'start', 'task_start_time': datetime.now(), # 任务开始执行时间 'run_host': socket.gethostname(), # 任务运行得host主机 'test_setting': self.test_setting, # 回测参数 'strategy_setting': binary.Binary(zlib.compress(pickle.dumps(self.strategy_setting))) # 策略参数(二进制保存) } # 去除包含"."的域 if 'symbol_datas' in d['test_setting'].keys(): d['test_setting'].pop('symbol_datas') # 保存入数据库 self.mongo_api.db_insert( db_name=self.gateway_name, col_name='tasks', d=d) def save_fail_to_mongo(self, fail_msg): # 保存到mongo得配置 save_mongo = self.test_setting.get('save_mongo', {}) if len(save_mongo) == 0: return if not self.mongo_api: self.mongo_api = MongoData(host=save_mongo.get('host', 'localhost'), port=save_mongo.get('port', 27017)) # 更新数据到数据库回测记录中 flt = {'task_id': self.task_id} d = self.mongo_api.db_query_one( db_name=self.gateway_name, col_name='tasks', flt=flt) if d: d.update({'status': 'fail'}) # 更新状态未完成 d.update({'fail_msg': fail_msg}) self.write_log(u'更新回测结果至数据库') self.mongo_api.db_update( db_name=self.gateway_name, col_name='tasks', filter_dict=flt, data_dict=d, replace=False) def save_result_to_mongo(self, result_info): # 保存到mongo得配置 save_mongo = self.test_setting.get('save_mongo', {}) if len(save_mongo) == 0: return if not self.mongo_api: self.mongo_api = MongoData(host=save_mongo.get('host', 'localhost'), port=save_mongo.get('port', 27017)) # 更新数据到数据库回测记录中 flt = {'task_id': self.task_id} d = self.mongo_api.db_query_one( db_name=self.gateway_name, col_name='tasks', flt=flt) if d: d.update({'status': 'finish'}) # 更新状态未完成 d.update(result_info) # 补充回测结果 d.update({'task_finish_time': datetime.now()}) # 更新回测完成时间 d.update({'trade_list': binary.Binary(zlib.compress(pickle.dumps(self.trade_pnl_list)))}) # 更新交易记录 d.update({'daily_list': binary.Binary(zlib.compress(pickle.dumps(self.daily_list)))}) # 更新每日净值记录 strategy_json_datas = self.get_all_strategy_json_data() d.update({'strategy_json_datas': binary.Binary(zlib.compress(pickle.dumps(strategy_json_datas)))}) self.write_log(u'更新回测结果至数据库') self.mongo_api.db_update( db_name=self.gateway_name, col_name='tasks', filter_dict=flt, data_dict=d, replace=False) def get_all_strategy_json_data(self): """获取所有策略得json数据""" data = {} for strategy_name, strategy in self.strategies.items(): try: strategy_data = {} if hasattr(strategy, 'get_policy_json'): policy_json = strategy.get_policy_json() if policy_json: strategy_data['Policy'] = policy_json if hasattr(strategy, 'get_grid_trade_json'): grid_trade_json = strategy.get_grid_trade_json() if grid_trade_json: strategy_data['Grids'] = grid_trade_json if strategy_data: data[strategy_name] = strategy_data except Exception as ex: self.write_error(msg=f'获取策略{strategy_name}得json数据异常.{str(ex)}') return data def put_strategy_event(self, strategy: CtaTemplate): """发送策略更新事件,回测中忽略""" pass def clear_backtesting_result(self): """清空之前回测的结果""" # 清空限价单相关 self.limit_order_count = 0 self.limit_orders.clear() self.active_limit_orders.clear() # 清空成交相关 self.trade_count = 0 self.trade_dict.clear() self.trades.clear() self.trade_pnl_list = [] self.last_bar.clear() self.last_dt = None def append_trade(self, trade: TradeData): """ 根据策略名称,写入 logs\test_name_straetgy_name_trade.csv文件 :param trade: :return: """ strategy_name = getattr(trade, 'strategy_name', self.test_name) trade_fields = ['symbol', 'exchange', 'vt_symbol', 'tradeid', 'vt_tradeid', 'orderid', 'vt_orderid', 'direction', 'offset', 'price', 'volume', 'time'] d = OrderedDict() try: for k in trade_fields: if k in ['exchange', 'direction', 'offset']: d[k] = getattr(trade, k).value else: d[k] = getattr(trade, k, '') trade_file = os.path.abspath(os.path.join(self.get_logs_path(), '{}_trade.csv'.format(strategy_name))) self.append_data(file_name=trade_file, dict_data=d) except Exception as ex: self.write_error(u'写入交易记录csv出错:{},{}'.format(str(ex), traceback.format_exc())) # 保存记录相关 def append_data(self, file_name: str, dict_data: OrderedDict, field_names: list = None): """ 添加数据到csv文件中 :param file_name: csv的文件全路径 :param dict_data: OrderedDict :return: """ if field_names is None or field_names == []: dict_fieldnames = list(dict_data.keys()) else: dict_fieldnames = field_names try: if not os.path.exists(file_name): self.write_log(u'create csv file:{}'.format(file_name)) with open(file_name, 'a', encoding='utf8', newline='') as csvWriteFile: writer = csv.DictWriter(f=csvWriteFile, fieldnames=dict_fieldnames, dialect='excel') self.write_log(u'write csv header:{}'.format(dict_fieldnames)) writer.writeheader() writer.writerow(dict_data) else: with open(file_name, 'a', encoding='utf8', newline='') as csvWriteFile: writer = csv.DictWriter(f=csvWriteFile, fieldnames=dict_fieldnames, dialect='excel', extrasaction='ignore') writer.writerow(dict_data) except Exception as ex: self.write_error(u'append_data exception:{}'.format(str(ex))) ######################################################################## class TradingResult(object): """每笔交易的结果""" def __init__(self, open_price, open_datetime, exit_price, close_datetime, volume, rate, slippage, size, group_id, fix_commission=0.0): """Constructor""" self.open_price = open_price # 开仓价格 self.exit_price = exit_price # 平仓价格 self.open_datetime = open_datetime # 开仓时间datetime self.close_datetime = close_datetime # 平仓时间 self.volume = volume # 交易数量(+/-代表方向) self.group_id = group_id # 主交易ID(针对多手平仓) self.turnover = (self.open_price + self.exit_price) * size * abs(volume) # 成交金额 if fix_commission > 0: self.commission = fix_commission * abs(self.volume) else: self.commission = abs(self.turnover * rate) # 手续费成本 self.slippage = slippage * 2 * size * abs(volume) # 滑点成本 self.pnl = ((self.exit_price - self.open_price) * volume * size - self.commission - self.slippage) # 净盈亏
65,329
598
// Copyright (c) Microsoft Corporation. // Licensed under the MIT license. #ifndef _MARO_BACKENDS_RAW_SNAPSHOTLIST_ #define _MARO_BACKENDS_RAW_SNAPSHOTLIST_ #include <map> #include <vector> #include <string> #include <iostream> #include "common.h" #include "attribute.h" #include "node.h" #include "frame.h" using namespace std; namespace maro { namespace backends { namespace raw { #define MAX(a, b) a > b ? a : b /// <summary> /// Shape of current querying. /// </summary> struct SnapshotQueryResultShape { // Number of attribute in result. USHORT attr_number = 0; // Number of ticks in result. int tick_number = 0; // Number of slot in result, include padding slot. SLOT_INDEX max_slot_number = 0; // Number of node in result, include padding nodes. NODE_INDEX max_node_number = 0; }; /// <summary> /// Snapshot list used to hold snapshot of current frame at specified tick. /// </summary> class SnapshotList { /// <summary> /// Querying parameter from prepare step. /// </summary> struct SnapshotQueryParameters { // Is this query for list? bool is_list = false; // For furthur querying, these fields would be changed by prepare function. NODE_TYPE node_type = 0; // List of ticks to query. int* ticks = nullptr; // Number of ticks in tick list. UINT tick_length = 0; // List of node instance index to query. NODE_INDEX* node_indices = nullptr; // Node number UINT node_length = 0; // Attributes to query. ATTR_TYPE* attributes = nullptr; // Number of attribute to query. UINT attr_length = 0; // Max slot number in result, for padding. SLOT_INDEX max_slot_number = 0; /// <summary> /// Reset current parameter after querying. /// </summary> void reset(); }; private: // Tick and its snapshot frame, we will keep a copy of frame. map<int, Frame> _snapshots; // Max size of snapshot is memory. USHORT _max_size = 0; // Current frame that used to copy. Frame* _cur_frame; // Used to hold parameters from prepare function. SnapshotQueryParameters _query_parameters; // Is prepare function called? bool _is_prepared = false; // Default attribute for invalid attribute, for padding. Attribute _nan_attr = NAN; // Query state for list attribute. // NOTE: for list attribute, we only support 1 tick, 1 attribute, 1 node. // and node cannot be null. If ticks not provided, then use latest tick. void query_for_list(QUERY_FLOAT* result); // Query for normal attributes. void query_for_normal(QUERY_FLOAT* result); // Get attribute from specified tick, this function will not throw exception, it will return a NAN attribute // if invalid. Attribute& get_attr(int tick, NODE_INDEX node_index, ATTR_TYPE attr_type, SLOT_INDEX slot_index) noexcept; // Make sure currect frame not null. inline void ensure_cur_frame(); // Make sure max size greater than 0. inline void ensure_max_size(); inline void write_attribute(ofstream &file, int tick, NODE_INDEX node_index, ATTR_TYPE attr_type, SLOT_INDEX slot_index); public: /// <summary> /// Set max size of snapshot in memory. /// </summary> /// <param name="max_size">Max size to set.</param> void set_max_size(USHORT max_size); /// <summary> /// Setup snapshot list with current frame. /// </summary> /// <param name="frame">Current frame that used for snapshots.</param> void setup(Frame* frame); /// <summary> /// Take snapshot for specified tick. /// </summary> /// <param name="ticks">Tick to take snapshot.</param> void take_snapshot(int ticks); /// <summary> /// Current size of snapshots. /// </summary> /// <returns>Number of current snapshots.</returns> UINT size() const noexcept; /// <summary> /// Get max size of current snapshot list. /// </summary> /// <returns>Max number of snapshot list.</returns> UINT max_size() const noexcept; /// <summary> /// Reset snapshot list states. /// </summary> void reset(); /// <summary> /// Dump current snapshots into folder, node will be split into different files. /// </summary> void dump(string path); /// <summary> /// Get avaiable ticks from snapshot list. /// </summary> /// <param name="result">List pointer to hold ticks.</param> void get_ticks(int* result) const; /// <summary> /// Get current max node number for specified node type. /// </summary> /// <param name="node_type">Target node type.</param> /// <returns>Max node number.</returns> NODE_INDEX get_max_node_number(NODE_TYPE node_type) const; /// <summary> /// Prepare for querying. /// </summary> /// <param name="node_type">Target node type.</param> /// <param name="ticks">Ticks to query, leave it as null to retrieve all avaible ticks from snapshots. /// NOTE: if it is null, then use latest tick for list attribute querying.</param> /// <param name="tick_length">Number of ticks to query.</param> /// <param name="node_indices">Indices of node instance to query, leave it as null to retrieve all node instance from snapshots. /// NOTE: it cannot be null if qury for list attribute</param> /// <param name="node_length">Number of node instance to query.</param> /// <param name="attributes">Attribute type list to query, cannot be null. /// NOTE: if first attribute if a list attribute, then there will be a list querying, means only support 1 tick, 1 node, 1 attribute querying. /// </param> /// <param name="attr_length">Target node type.</param> /// <returns>Result shape for input query parameters.</returns> SnapshotQueryResultShape prepare(NODE_TYPE node_type, int ticks[], UINT tick_length, NODE_INDEX node_indices[], UINT node_length, ATTR_TYPE attributes[], UINT attr_length); /// <summary> /// Qeury with parameters from prepare function. /// </summary> /// <param name="result">Pointer to list to hold result value. NOTE: query function will leave the default value for padding.</param> void query(QUERY_FLOAT* result); /// <summary> /// Cancel current querying, this will clear the parameters from last prepare calling. /// </summary> void cancel_query() noexcept; }; /// <summary> /// Tick not supported, like negative tick /// </summary> struct SnapshotTickError : public exception { const char* what() const noexcept override; }; /// <summary> /// Snapshot list max size is 0 /// </summary> struct SnapshotSizeError : public exception { const char* what() const noexcept override; }; /// <summary> /// Query without call prepare function /// </summary> struct SnapshotQueryNotPreparedError : public exception { const char* what() const noexcept override; }; /// <summary> /// Attribute not exist when querying /// </summary> struct SnapshotQueryNoAttributesError : public exception { const char* what() const noexcept override; }; /// <summary> /// Frame not set before operations /// </summary> struct SnapshotInvalidFrameStateError : public exception { const char* what() const noexcept override; }; /// <summary> /// Array pointer is nullptr /// </summary> struct SnapshotQueryResultPtrNullError : public exception { const char* what() const noexcept override; }; struct SnapshotQueryInvalidTickError : public exception { const char* what() const noexcept override; }; struct SnapshotQueryNoSnapshotsError : public exception { const char* what() const noexcept override; }; struct SnapshotListQueryNoNodeIndexError : public exception { const char* what() const noexcept override; }; } } } #endif // !_MARO_BACKENDS_RAW_SNAPSHOTLIST_
3,446
1,577
<filename>src/main/java_jdbc/com/uber/profiling/reporters/util/JdbcUtils.java /* * Copyright (c) 2020 Uber Technologies, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.uber.profiling.reporters.util; import java.beans.BeanInfo; import java.beans.IntrospectionException; import java.beans.Introspector; import java.beans.PropertyDescriptor; import java.util.ArrayList; import java.util.Collection; import java.util.Date; import java.util.List; import java.util.Optional; public class JdbcUtils { public static String getCreateTableSql( Class<?> clazz, String tableName, Collection<String> primaryKeys, Collection<String> indexFields, Collection<String> timestampFields, Collection<String> textFields) { return getCreateTableSql( clazz, tableName, null, primaryKeys, indexFields, timestampFields, textFields); } public static String getCreateTableSql( Class<?> clazz, String tableName, String partitionKey, Collection<String> primaryKeys, Collection<String> indexFields, Collection<String> timestampFields, Collection<String> textFields) { return getCreateTableSql( clazz, tableName, partitionKey, primaryKeys, indexFields, timestampFields, textFields, null); } public static String getCreateTableSql( Class<?> clazz, String tableName, String partitionKey, Collection<String> primaryKeys, Collection<String> indexFields, Collection<String> timestampFields, Collection<String> textFields, String indexNamePrefix) { if (textFields == null) { textFields = new ArrayList<>(); } if (indexNamePrefix == null) { indexNamePrefix = ""; } StringBuilder sb = new StringBuilder(); sb.append(String.format("CREATE TABLE IF NOT EXISTS %s (", tableName)); try { BeanInfo beanInfo = Introspector.getBeanInfo(clazz); PropertyDescriptor[] propertyDescriptors = beanInfo.getPropertyDescriptors(); List<PropertyDescriptor> primaryKeyPropertyDescriptors = new ArrayList<>(); List<PropertyDescriptor> indexFieldsPropertyDescriptors = new ArrayList<>(); List<PropertyDescriptor> otherPropertyDescriptors = new ArrayList<>(); for (PropertyDescriptor entry : propertyDescriptors) { if (entry.getReadMethod() == null || entry.getWriteMethod() == null) { continue; } if (primaryKeys.contains(entry.getName())) { primaryKeyPropertyDescriptors.add(entry); } if (indexFields.contains(entry.getName())) { indexFieldsPropertyDescriptors.add(entry); } if (!primaryKeys.contains(entry.getName()) && !indexFields.contains(entry.getName())) { otherPropertyDescriptors.add(entry); } } if (primaryKeyPropertyDescriptors.size() != primaryKeys.size()) { throw new RuntimeException( String.format( "Invalid primary keys. There are %s in bean object, but %s in input argument", primaryKeyPropertyDescriptors.size(), primaryKeys.size())); } List<PropertyDescriptor> primaryKeyPropertyDescriptorsWithCorrectOrder = new ArrayList<>(); for (String key : primaryKeys) { Optional<PropertyDescriptor> propertyDescriptor = primaryKeyPropertyDescriptors.stream().filter(t -> t.getName().equals(key)).findFirst(); if (!propertyDescriptor.isPresent()) { throw new RuntimeException("Did not find matching property for primary key: " + key); } primaryKeyPropertyDescriptorsWithCorrectOrder.add(propertyDescriptor.get()); } primaryKeyPropertyDescriptors = primaryKeyPropertyDescriptorsWithCorrectOrder; if (indexFieldsPropertyDescriptors.size() != indexFields.size()) { throw new RuntimeException( String.format( "Invalid index fields. There are %s in bean object, but %s in input argument", indexFieldsPropertyDescriptors.size(), indexFields.size())); } List<PropertyDescriptor> allPropertyDescriptors = new ArrayList<>(); allPropertyDescriptors.addAll(primaryKeyPropertyDescriptors); for (int i = 0; i < indexFieldsPropertyDescriptors.size(); i++) { String columnName = indexFieldsPropertyDescriptors.get(i).getName(); if (primaryKeys.contains(columnName)) { continue; } allPropertyDescriptors.add(indexFieldsPropertyDescriptors.get(i)); } allPropertyDescriptors.addAll(otherPropertyDescriptors); sb.append(allPropertyDescriptors.get(0).getName()); sb.append(" "); sb.append( getJdbcTypeString( allPropertyDescriptors.get(0), primaryKeys.contains(allPropertyDescriptors.get(0).getName()) || indexFields.contains(allPropertyDescriptors.get(0).getName()), timestampFields.contains(allPropertyDescriptors.get(0).getName()), textFields.contains(allPropertyDescriptors.get(0).getName()))); for (int i = 1; i < allPropertyDescriptors.size(); i++) { sb.append(", "); sb.append(allPropertyDescriptors.get(i).getName()); sb.append(" "); sb.append( getJdbcTypeString( allPropertyDescriptors.get(i), primaryKeys.contains(allPropertyDescriptors.get(i).getName()) || indexFields.contains(allPropertyDescriptors.get(i).getName()), timestampFields.contains(allPropertyDescriptors.get(i).getName()), textFields.contains(allPropertyDescriptors.get(i).getName()))); } if (!primaryKeyPropertyDescriptors.isEmpty()) { sb.append(", PRIMARY KEY("); sb.append(primaryKeyPropertyDescriptors.get(0).getName()); for (int i = 1; i < primaryKeyPropertyDescriptors.size(); i++) { sb.append(", "); sb.append(primaryKeyPropertyDescriptors.get(i).getName()); } sb.append(")"); } for (int i = 0; i < indexFieldsPropertyDescriptors.size(); i++) { sb.append( String.format( ", INDEX %sindex_%s (%s)", indexNamePrefix, indexFieldsPropertyDescriptors.get(i).getName(), indexFieldsPropertyDescriptors.get(i).getName())); } sb.append(")"); if (partitionKey != null && !partitionKey.isEmpty()) { sb.append(String.format(" PARTITION BY HASH(%s) PARTITIONS 32", partitionKey)); } return sb.toString(); } catch (IntrospectionException e) { throw new RuntimeException(e); } } public static String getCreateSingleColumnTableSql( String idColumnName, String stringColumnName, String tableName) { StringBuilder sb = new StringBuilder(); sb.append(String.format("CREATE TABLE IF NOT EXISTS %s (", tableName)); sb.append("id BIGINT NOT NULL AUTO_INCREMENT, "); sb.append(stringColumnName); sb.append(" "); sb.append("TEXT"); sb.append(String.format(", PRIMARY KEY (%s)", idColumnName)); sb.append(")"); return sb.toString(); } public static String getJdbcTypeString( PropertyDescriptor beanProperty, boolean isPrimaryKeyOrUniqueKey, boolean isDatetime, boolean isText) { int maxVarcharLength = isPrimaryKeyOrUniqueKey ? 150 : 250; String sqlTypeForString = isText ? "TEXT" : String.format("VARCHAR(%s)", maxVarcharLength); if (isDatetime || beanProperty.getPropertyType().equals(Date.class)) { return "DATETIME"; } else if (beanProperty.getPropertyType().equals(String.class)) { return sqlTypeForString; } else if (beanProperty.getPropertyType().equals(Integer.class)) { return "INT"; } else if (beanProperty.getPropertyType().equals(Long.class)) { return "BIGINT"; } else if (beanProperty.getPropertyType().equals(Float.class)) { return "FLOAT"; } else if (beanProperty.getPropertyType().equals(Double.class)) { return "DOUBLE"; } else if (beanProperty.getPropertyType().equals(Boolean.class)) { return "TINYINT"; } else { throw new RuntimeException( String.format( "Unsupported property type for JDBC: %s, %s", beanProperty.getName(), beanProperty.getPropertyType())); } } public static java.sql.Timestamp getSqlTimestamp(Object obj) { if (obj == null) { return null; } final long millis; if (obj instanceof Date) { millis = ((Date) obj).getTime(); } else if (obj instanceof java.sql.Timestamp) { millis = ((java.sql.Timestamp) obj).getTime(); } else if (obj instanceof Double) { millis = DateTimeUtils.getMillisSmart(((Double) obj).doubleValue()); } else if (obj instanceof Float) { millis = DateTimeUtils.getMillisSmart(((Float) obj).doubleValue()); } else if (obj instanceof Long) { millis = DateTimeUtils.getMillisSmart(((Long) obj).longValue()); } else if (obj instanceof Integer) { millis = DateTimeUtils.getMillisSmart(((Integer) obj).longValue()); } else { throw new RuntimeException( String.format("Cannot get sql timestamp from %s (%s)", obj, obj.getClass())); } return new java.sql.Timestamp(millis); } }
3,930
679
/************************************************************** * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. * *************************************************************/ #ifndef _PORDROP_HXX #define _PORDROP_HXX #include "portxt.hxx" class SwFont; // DropCap-Cache, globale Variable, in txtinit.cxx initialisiert/zerstoert // und in txtdrop.cxx benutzt bei der Initialenberechnung class SwDropCapCache; extern SwDropCapCache *pDropCapCache; /************************************************************************* * class SwDropPortionPart * * A drop portion can consist of one or more parts in order to allow * attribute changes inside them. *************************************************************************/ class SwDropPortionPart { SwDropPortionPart* pFollow; SwFont* pFnt; xub_StrLen nLen; sal_uInt16 nWidth; public: SwDropPortionPart( SwFont& rFont, const xub_StrLen nL ) : pFollow( 0 ), pFnt( &rFont ), nLen( nL ), nWidth( 0 ) {}; ~SwDropPortionPart(); inline SwDropPortionPart* GetFollow() const { return pFollow; }; inline void SetFollow( SwDropPortionPart* pNew ) { pFollow = pNew; }; inline SwFont& GetFont() const { return *pFnt; } inline xub_StrLen GetLen() const { return nLen; } inline sal_uInt16 GetWidth() const { return nWidth; } inline void SetWidth( sal_uInt16 nNew ) { nWidth = nNew; } }; /************************************************************************* * class SwDropPortion *************************************************************************/ class SwDropPortion : public SwTxtPortion { friend class SwDropCapCache; SwDropPortionPart* pPart; // due to script / attribute changes MSHORT nLines; // Anzahl der Zeilen KSHORT nDropHeight; // Hoehe KSHORT nDropDescent; // Abstand zur naechsten Zeile KSHORT nDistance; // Abstand zum Text KSHORT nFix; // Fixposition short nX; // X-PaintOffset short nY; // Y-Offset sal_Bool FormatTxt( SwTxtFormatInfo &rInf ); void PaintTxt( const SwTxtPaintInfo &rInf ) const; inline void Fix( const KSHORT nNew ) { nFix = nNew; } public: SwDropPortion( const MSHORT nLineCnt, const KSHORT nDropHeight, const KSHORT nDropDescent, const KSHORT nDistance ); virtual ~SwDropPortion(); virtual void Paint( const SwTxtPaintInfo &rInf ) const; void PaintDrop( const SwTxtPaintInfo &rInf ) const; virtual sal_Bool Format( SwTxtFormatInfo &rInf ); virtual SwPosSize GetTxtSize( const SwTxtSizeInfo &rInfo ) const; virtual xub_StrLen GetCrsrOfst( const MSHORT nOfst ) const; inline MSHORT GetLines() const { return nLines; } inline KSHORT GetDistance() const { return nDistance; } inline KSHORT GetDropHeight() const { return nDropHeight; } inline KSHORT GetDropDescent() const { return nDropDescent; } inline KSHORT GetDropLeft() const { return Width() + nFix; } inline SwDropPortionPart* GetPart() const { return pPart; } inline void SetPart( SwDropPortionPart* pNew ) { pPart = pNew; } inline void SetY( short nNew ) { nY = nNew; } inline SwFont* GetFnt() const { return pPart ? &pPart->GetFont() : NULL; } static void DeleteDropCapCache(); OUTPUT_OPERATOR }; #endif
1,358
474
package org.javacord.api.listener.channel.server.voice; import org.javacord.api.event.channel.server.voice.ServerStageVoiceChannelChangeTopicEvent; import org.javacord.api.listener.GloballyAttachableListener; import org.javacord.api.listener.ObjectAttachableListener; import org.javacord.api.listener.server.ServerAttachableListener; public interface ServerStageVoiceChannelChangeTopicListener extends ServerAttachableListener, ServerStageVoiceChannelAttachableListener, GloballyAttachableListener, ObjectAttachableListener { /** * This method is called every time a server stage voice channel's topic changes. * * @param event The event. */ void onServerStageVoiceChannelChangeTopic(ServerStageVoiceChannelChangeTopicEvent event); }
236
10,125
/** @file Copyright (C) 2012, tiamo. All rights reserved.<BR> Copyright (C) 2014, dmazar. All rights reserved.<BR> Copyright (C) 2018, savvas. All rights reserved.<BR> Copyright (C) 2018, Download-Fritz. All rights reserved.<BR> Copyright (C) 2019-2020, vit9696. All rights reserved.<BR> This program and the accompanying materials are licensed and made available under the terms and conditions of the BSD License which accompanies this distribution. The full text of the license may be found at http://opensource.org/licenses/bsd-license.php. THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED. **/ #ifndef APPLE_IMAGE_CONVERSION_H #define APPLE_IMAGE_CONVERSION_H #include <Protocol/UgaDraw.h> #define APPLE_IMAGE_CONVERSION_PROTOCOL_GUID \ { 0x0DFCE9F6, 0xC4E3, 0x45EE, \ {0xA0, 0x6A, 0xA8, 0x61, 0x3B, 0x98, 0xA5, 0x07 } } // // Protocol revision // Starting with this version scaled interfaces wered added. // Older versions had none. // #define APPLE_IMAGE_CONVERSION_PROTOCOL_REVISION 0x20000 // // Generic protocol extension capable of opening any file, // possibly by chainloading other files // #define APPLE_IMAGE_CONVERSION_PROTOCOL_ANY_EXTENSION 0 /** Recognise image passed through the buffer. @param[in] ImageBuffer Buffer containing image data. @param[in] ImageSize Size of the buffer. @retval EFI_INVALID_PARAMETER when ImageBuffer is NULL. @retval EFI_INVALID_PARAMETER when ImageSize is 0. @retval EFI_UNSUPPORTED when image is not supported (e.g. too large or corrupted). @retval EFI_SUCCESS when image can be decoded. **/ typedef EFI_STATUS (EFIAPI* RECOGNIZE_IMAGE_DATA) ( IN VOID *ImageBuffer, IN UINTN ImageSize ); /** Get image dimensions. @param[in] ImageBuffer Buffer containing image data. @param[in] ImageSize Size of the buffer. @param[out] ImageWidth Image width in pixels. @param[out] ImageHeight Image height in pixels. @retval EFI_INVALID_PARAMETER when ImageBuffer is NULL. @retval EFI_INVALID_PARAMETER when ImageSize is 0. @retval EFI_INVALID_PARAMETER when ImageWidth is NULL. @retval EFI_INVALID_PARAMETER when ImageHeight is NULL. @retval EFI_UNSUPPORTED when image is not supported (e.g. too large or corrupted). @retval EFI_SUCCESS when image dimensions were read. **/ typedef EFI_STATUS (EFIAPI* GET_IMAGE_DIMS) ( IN VOID *ImageBuffer, IN UINTN ImageSize, OUT UINT32 *ImageWidth, OUT UINT32 *ImageHeight ); /** Decode image data in 32-bit format. @param[in] ImageBuffer Buffer containing image data. @param[in] ImageSize Size of the buffer. @param[in,out] RawImageData Pointer to decoded buffer pointer. - When NULL it is allocated from pool. - When not NULL provided buffer is used. @param[in,out] RawImageDataSize Decoded buffer size. - Set to allocated area size if allocated. - Set to truncated area size when provided buffer is used. - Set to required area size when provided buffer is too small. @retval EFI_INVALID_PARAMETER when ImageBuffer is NULL. @retval EFI_INVALID_PARAMETER when ImageSize is 0. @retval EFI_INVALID_PARAMETER when RawImageData is NULL. @retval EFI_INVALID_PARAMETER when RawImageDataSize is NULL. @retval EFI_UNSUPPORTED when image is not supported (e.g. too large or corrupted). @retval EFI_OUT_OF_RESOURCES when allocation error happened. @retval EFI_BUFFER_TOO_SMALL when provided buffer is too small, RawImageDataSize is updated. @retval EFI_SUCCESS when image was decoded successfully. **/ typedef EFI_STATUS (EFIAPI* DECODE_IMAGE_DATA) ( IN VOID *ImageBuffer, IN UINTN ImageSize, IN OUT EFI_UGA_PIXEL **RawImageData, IN OUT UINTN *RawImageDataSize ); /** Get image dimensions for scale. Protocol revision APPLE_IMAGE_CONVERSION_PROTOCOL_REVISION or higher is required. @param[in] ImageBuffer Buffer containing image data. @param[in] ImageSize Size of the buffer. @param[in] Scale Scaling factor (e.g. 1 or 2). @param[out] ImageWidth Image width in pixels. @param[out] ImageHeight Image height in pixels. @retval EFI_INVALID_PARAMETER when ImageBuffer is NULL. @retval EFI_INVALID_PARAMETER when ImageSize is 0. @retval EFI_INVALID_PARAMETER when Scale is 0. @retval EFI_INVALID_PARAMETER when ImageWidth is NULL. @retval EFI_INVALID_PARAMETER when ImageHeight is NULL. @retval EFI_UNSUPPORTED when Scale is not supported. @retval EFI_UNSUPPORTED when image is not supported (e.g. too large or corrupted). @retval EFI_SUCCESS when image dimensions were read. **/ typedef EFI_STATUS (EFIAPI* GET_IMAGE_DIMS_EX) ( IN VOID *ImageBuffer, IN UINTN ImageSize, IN UINTN Scale, OUT UINT32 *ImageWidth, OUT UINT32 *ImageHeight ); /** Decode image data in 32-bit format. Protocol revision APPLE_IMAGE_CONVERSION_PROTOCOL_REVISION or higher is required. @param[in] ImageBuffer Buffer containing image data. @param[in] ImageSize Size of the buffer. @param[in] Scale Scaling factor (e.g. 1 or 2). @param[in,out] RawImageData Pointer to decoded buffer pointer. - When NULL it is allocated from pool. - When not NULL provided buffer is used. @param[in,out] RawImageDataSize Decoded buffer size. - Set to allocated area size if allocated. - Set to truncated area size when provided buffer is used. - Set to required area size when provided buffer is too small. @retval EFI_INVALID_PARAMETER when ImageBuffer is NULL. @retval EFI_INVALID_PARAMETER when ImageSize is 0. @retval EFI_INVALID_PARAMETER when RawImageData is NULL. @retval EFI_INVALID_PARAMETER when RawImageDataSize is NULL. @retval EFI_UNSUPPORTED when Scale is not supported. @retval EFI_UNSUPPORTED when image is not supported (e.g. too large or corrupted). @retval EFI_OUT_OF_RESOURCES when allocation error happened. @retval EFI_BUFFER_TOO_SMALL when provided buffer is too small, RawImageDataSize is updated. @retval EFI_SUCCESS when image was decoded successfully. **/ typedef EFI_STATUS (EFIAPI* DECODE_IMAGE_DATA_EX) ( IN VOID *ImageBuffer, IN UINTN ImageSize, IN UINTN Scale, IN OUT EFI_UGA_PIXEL **RawImageData, IN OUT UINTN *RawImageDataSize ); /** Apple image conversion protocol definition. **/ typedef struct APPLE_IMAGE_CONVERSION_PROTOCOL_ { UINT64 Revision; UINTN FileExt; RECOGNIZE_IMAGE_DATA RecognizeImageData; GET_IMAGE_DIMS GetImageDims; DECODE_IMAGE_DATA DecodeImageData; GET_IMAGE_DIMS_EX GetImageDimsEx; DECODE_IMAGE_DATA_EX DecodeImageDataEx; } APPLE_IMAGE_CONVERSION_PROTOCOL; extern EFI_GUID gAppleImageConversionProtocolGuid; #endif //APPLE_IMAGE_CONVERSION_H
3,200
388
<gh_stars>100-1000 # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. import aiounittest from botframework.connector.models import ( MessageActionsPayloadFrom, MessageActionsPayloadBody, MessageActionsPayloadAttachment, MessageActionsPayloadMention, MessageActionsPayloadReaction, ) from botbuilder.schema.teams import MessageActionsPayload class TestingMessageActionsPayload(aiounittest.AsyncTestCase): # Arrange test_id = "01" reply_to_id = "test_reply_to_id" message_type = "test_message_type" created_date_time = "01/01/2000" last_modified_date_time = "01/01/2000" deleted = False subject = "test_subject" summary = "test_summary" importance = "high" locale = "test_locale" link_to_message = "https://teams.microsoft/com/l/message/testing-id" from_property = MessageActionsPayloadFrom() body = MessageActionsPayloadBody attachment_layout = "test_attachment_layout" attachments = [MessageActionsPayloadAttachment()] mentions = [MessageActionsPayloadMention()] reactions = [MessageActionsPayloadReaction()] # Act message = MessageActionsPayload( id=test_id, reply_to_id=reply_to_id, message_type=message_type, created_date_time=created_date_time, last_modified_date_time=last_modified_date_time, deleted=deleted, subject=subject, summary=summary, importance=importance, locale=locale, link_to_message=link_to_message, from_property=from_property, body=body, attachment_layout=attachment_layout, attachments=attachments, mentions=mentions, reactions=reactions, ) def test_assign_id(self, message_action_payload=message, test_id=test_id): # Assert self.assertEqual(message_action_payload.id, test_id) def test_assign_reply_to_id( self, message_action_payload=message, reply_to_id=reply_to_id ): # Assert self.assertEqual(message_action_payload.reply_to_id, reply_to_id) def test_assign_message_type( self, message_action_payload=message, message_type=message_type ): # Assert self.assertEqual(message_action_payload.message_type, message_type) def test_assign_created_date_time( self, message_action_payload=message, created_date_time=created_date_time ): # Assert self.assertEqual(message_action_payload.created_date_time, created_date_time) def test_assign_last_modified_date_time( self, message_action_payload=message, last_modified_date_time=last_modified_date_time, ): # Assert self.assertEqual( message_action_payload.last_modified_date_time, last_modified_date_time ) def test_assign_deleted(self, message_action_payload=message, deleted=deleted): # Assert self.assertEqual(message_action_payload.deleted, deleted) def test_assign_subject(self, message_action_payload=message, subject=subject): # Assert self.assertEqual(message_action_payload.subject, subject) def test_assign_summary(self, message_action_payload=message, summary=summary): # Assert self.assertEqual(message_action_payload.summary, summary) def test_assign_importance( self, message_action_payload=message, importance=importance ): # Assert self.assertEqual(message_action_payload.importance, importance) def test_assign_locale(self, message_action_payload=message, locale=locale): # Assert self.assertEqual(message_action_payload.locale, locale) def test_assign_link_to_message( self, message_action_payload=message, link_to_message=link_to_message ): # Assert self.assertEqual(message_action_payload.link_to_message, link_to_message) def test_assign_from_property( self, message_action_payload=message, from_property=from_property ): # Assert self.assertEqual(message_action_payload.from_property, from_property) def test_assign_body(self, message_action_payload=message, body=body): # Assert self.assertEqual(message_action_payload.body, body) def test_assign_attachment_layout( self, message_action_payload=message, attachment_layout=attachment_layout ): # Assert self.assertEqual(message_action_payload.attachment_layout, attachment_layout) def test_assign_attachments( self, message_action_payload=message, attachments=attachments ): # Assert self.assertEqual(message_action_payload.attachments, attachments) def test_assign_mentions(self, message_action_payload=message, mentions=mentions): # Assert self.assertEqual(message_action_payload.mentions, mentions) def test_assign_reactions( self, message_action_payload=message, reactions=reactions ): # Assert self.assertEqual(message_action_payload.reactions, reactions)
2,060
1,467
<filename>shared_model/interfaces/query_responses/transactions_page_response.hpp /** * Copyright Soramitsu Co., Ltd. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0 */ #ifndef IROHA_SHARED_MODEL_TRANSACTIONS_PAGE_RESPONSE_HPP #define IROHA_SHARED_MODEL_TRANSACTIONS_PAGE_RESPONSE_HPP #include "interfaces/base/model_primitive.hpp" #include <optional> #include "interfaces/common_objects/range_types.hpp" #include "interfaces/common_objects/types.hpp" namespace shared_model { namespace interface { /** * Response for paginated queries */ class TransactionsPageResponse : public ModelPrimitive<TransactionsPageResponse> { public: /** * @return transactions from this page */ virtual types::TransactionsCollectionType transactions() const = 0; /** * @return hash of the first transaction from the next page */ virtual std::optional<interface::types::HashType> nextTxHash() const = 0; /** * @return total number of transactions for the query */ virtual interface::types::TransactionsNumberType allTransactionsSize() const = 0; std::string toString() const override; bool operator==(const ModelType &rhs) const override; }; } // namespace interface } // namespace shared_model #endif // IROHA_SHARED_MODEL_TRANSACTIONS_PAGE_RESPONSE_HPP
508
346
<reponame>nsbrajapaksha/android-graphics-demo package com.sqisland.android.graphics_demo.util; import android.graphics.Bitmap; import android.graphics.Canvas; import android.graphics.Color; import android.graphics.Paint; public abstract class ImageUtil { public static Bitmap createCircle(int width, int height) { Paint paint = new Paint(); paint.setStyle(Paint.Style.FILL); paint.setColor(Color.BLUE); Bitmap bitmap = Bitmap.createBitmap(width, height, Bitmap.Config.ARGB_8888); Canvas canvas = new Canvas(bitmap); float radius = Math.min(width, height) * 0.45f; canvas.drawCircle(width / 2, height / 2, radius, paint); return bitmap; } }
246
1,408
<gh_stars>1000+ /* * Copyright (c) 2020, MediaTek Inc. All rights reserved. * * SPDX-License-Identifier: BSD-3-Clause */ #include <assert.h> #include <common/debug.h> #include <drivers/delay_timer.h> #include <lib/mmio.h> #include <mcucfg.h> #include <mtspmc.h> #include <mtspmc_private.h> void mcucfg_disable_gic_wakeup(unsigned int cluster, unsigned int cpu) { mmio_setbits_32(MCUCFG_CPC_FLOW_CTRL_CFG, GIC_WAKEUP_IGNORE(cpu)); } void mcucfg_enable_gic_wakeup(unsigned int cluster, unsigned int cpu) { mmio_clrbits_32(MCUCFG_CPC_FLOW_CTRL_CFG, GIC_WAKEUP_IGNORE(cpu)); } void mcucfg_set_bootaddr(unsigned int cluster, unsigned int cpu, uintptr_t bootaddr) { assert(cluster == 0U); mmio_write_32(per_cpu(cluster, cpu, MCUCFG_BOOTADDR), bootaddr); } uintptr_t mcucfg_get_bootaddr(unsigned int cluster, unsigned int cpu) { assert(cluster == 0U); return (uintptr_t)mmio_read_32(per_cpu(cluster, cpu, MCUCFG_BOOTADDR)); } void mcucfg_init_archstate(unsigned int cluster, unsigned int cpu, bool arm64) { uint32_t reg; assert(cluster == 0U); reg = per_cluster(cluster, MCUCFG_INITARCH); if (arm64) { mmio_setbits_32(reg, MCUCFG_INITARCH_CPU_BIT(cpu)); } else { mmio_clrbits_32(reg, MCUCFG_INITARCH_CPU_BIT(cpu)); } } /** * Return subsystem's power state. * * @mask: mask to MCUCFG_CPC_SPMC_PWR_STATUS to query the power state * of one subsystem. * RETURNS: * 0 (the subsys was powered off) * 1 (the subsys was powered on) */ bool spm_get_powerstate(uint32_t mask) { return (mmio_read_32(MCUCFG_CPC_SPMC_PWR_STATUS) & mask) != 0U; } bool spm_get_cluster_powerstate(unsigned int cluster) { assert(cluster == 0U); return spm_get_powerstate(BIT(14)); } bool spm_get_cpu_powerstate(unsigned int cluster, unsigned int cpu) { uint32_t mask = BIT(cpu); assert(cluster == 0U); return spm_get_powerstate(mask); } int spmc_init(void) { INFO("SPM: enable CPC mode\n"); mmio_write_32(SPM_POWERON_CONFIG_EN, PROJECT_CODE | BCLK_CG_EN); mmio_setbits_32(per_cpu(0, 1, SPM_CPU_PWR), PWR_RST_B); mmio_setbits_32(per_cpu(0, 2, SPM_CPU_PWR), PWR_RST_B); mmio_setbits_32(per_cpu(0, 3, SPM_CPU_PWR), PWR_RST_B); mmio_setbits_32(per_cpu(0, 4, SPM_CPU_PWR), PWR_RST_B); mmio_setbits_32(per_cpu(0, 5, SPM_CPU_PWR), PWR_RST_B); mmio_setbits_32(per_cpu(0, 6, SPM_CPU_PWR), PWR_RST_B); mmio_setbits_32(per_cpu(0, 7, SPM_CPU_PWR), PWR_RST_B); mmio_clrbits_32(SPM_MCUSYS_PWR_CON, RESETPWRON_CONFIG); mmio_clrbits_32(SPM_MP0_CPUTOP_PWR_CON, RESETPWRON_CONFIG); mmio_clrbits_32(per_cpu(0, 0, SPM_CPU_PWR), RESETPWRON_CONFIG); mmio_setbits_32(MCUCFG_CPC_FLOW_CTRL_CFG, CPC_CTRL_ENABLE); mmio_setbits_32(MCUCFG_CPC_FLOW_CTRL_CFG, SSPM_CORE_PWR_ON_EN); return 0; } /** * Power on a core with specified cluster and core index * * @cluster: the cluster ID of the CPU which to be powered on * @cpu: the CPU ID of the CPU which to be powered on */ void spm_poweron_cpu(unsigned int cluster, unsigned int cpu) { uintptr_t cpu_pwr_con = per_cpu(cluster, cpu, SPM_CPU_PWR); /* set to 0 after BIG VPROC bulk on & before B-core power on seq. */ if (cpu >= 4U) { mmio_write_32(DREQ20_BIG_VPROC_ISO, 0U); } mmio_setbits_32(cpu_pwr_con, PWR_ON); while (!spm_get_cpu_powerstate(cluster, cpu)) { mmio_clrbits_32(cpu_pwr_con, PWR_ON); mmio_setbits_32(cpu_pwr_con, PWR_ON); } } /** * Power off a core with specified cluster and core index * * @cluster: the cluster ID of the CPU which to be powered off * @cpu: the CPU ID of the CPU which to be powered off */ void spm_poweroff_cpu(unsigned int cluster, unsigned int cpu) { /* Set mp0_spmc_pwr_on_cpuX = 0 */ mmio_clrbits_32(per_cpu(cluster, cpu, SPM_CPU_PWR), PWR_ON); } /** * Power off a cluster with specified index * * @cluster: the cluster index which to be powered off */ void spm_poweroff_cluster(unsigned int cluster) { /* No need to power on/off cluster on single cluster platform */ assert(false); } /** * Power on a cluster with specified index * * @cluster: the cluster index which to be powered on */ void spm_poweron_cluster(unsigned int cluster) { /* No need to power on/off cluster on single cluster platform */ assert(false); }
1,790
743
{ "$schema": "https://raw.githubusercontent.com/microsoft/AdaptiveCards/6f39aedce45864ae1067ed44a5551dc973790bb5/source/nodejs/typed-schema/schema/lib/Type.json", "isAbstract": true, "description": "Marker interface for inlines" }
91
647
trick.real_time_enable() trick.exec_set_terminate_time(5.0)
28
471
<filename>test-old/test_mujoco_pusher.py from surreal.env.mujocomanip.default_env_configs import * from surreal.env.mujocomanip.default_object_configs import * from surreal.env.mujocomanip.mujocomanip_envs import * import copy import numpy as np env_config = DEFAULT_PUSHER_CONFIG env_config.display = True object_config = XML_BALL_CONFIG env_config.mujoco_object_spec = object_config env = SurrealSawyerPushEnv(env_config) obs,info = env.reset() while True: obs,info = env.reset() ### TODO: we should implement ### TODO: this might need clipping ### action = np.random.randn(8) # action[7] *= 0.020833 for i in range(2000): action = np.random.randn(8) / 2 action[7] = -1 obs, reward, done, info = env.step(action) # # obs, reward, done, info = env._step([0,-1,0,0,0,0,2]) # print(obs, reward, done, info) env.render() if done: print('done: {}'.format(reward)) break
428
11,356
<filename>deps/src/boost_1_65_1/boost/spirit/home/support.hpp /*============================================================================= Copyright (c) 2001-2011 <NAME> Distributed under the Boost Software License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) ==============================================================================*/ #if !defined(BOOST_SPIRIT_SUPPORT_SEPTEMBER_26_2008_0340AM) #define BOOST_SPIRIT_SUPPORT_SEPTEMBER_26_2008_0340AM #if defined(_MSC_VER) #pragma once #endif #include<boost/spirit/home/support/assert_msg.hpp> #include<boost/spirit/home/support/action_dispatch.hpp> #include<boost/spirit/home/support/argument.hpp> #include<boost/spirit/home/support/attributes.hpp> #include<boost/spirit/home/support/char_class.hpp> #include<boost/spirit/home/support/common_terminals.hpp> #include<boost/spirit/home/support/container.hpp> #include<boost/spirit/home/support/context.hpp> #include<boost/spirit/home/support/info.hpp> #include<boost/spirit/home/support/lazy.hpp> #include<boost/spirit/home/support/make_component.hpp> #include<boost/spirit/home/support/meta_compiler.hpp> #include<boost/spirit/home/support/modify.hpp> #include<boost/spirit/home/support/sequence_base_id.hpp> #include<boost/spirit/home/support/string_traits.hpp> #include<boost/spirit/home/support/terminal.hpp> #include<boost/spirit/home/support/unused.hpp> #include<boost/spirit/home/support/utf8.hpp> #endif
518
8,865
<gh_stars>1000+ /* ===-- floatundixf.c - Implement __floatundixf ---------------------------=== * * The LLVM Compiler Infrastructure * * This file is dual licensed under the MIT and the University of Illinois Open * Source Licenses. See LICENSE.TXT for details. * * ===----------------------------------------------------------------------=== * * This file implements __floatundixf for the compiler_rt library. * * ===----------------------------------------------------------------------=== */ #if !_ARCH_PPC #include "int_lib.h" /* Returns: convert a to a long double, rounding toward even. */ /* Assumption: long double is a IEEE 80 bit floating point type padded to 128 bits * du_int is a 64 bit integral type */ /* gggg gggg gggg gggg gggg gggg gggg gggg | gggg gggg gggg gggg seee eeee eeee eeee | * 1mmm mmmm mmmm mmmm mmmm mmmm mmmm mmmm | mmmm mmmm mmmm mmmm mmmm mmmm mmmm mmmm */ COMPILER_RT_ABI long double __floatundixf(du_int a) { if (a == 0) return 0.0; const unsigned N = sizeof(du_int) * CHAR_BIT; int clz = __builtin_clzll(a); int e = (N - 1) - clz ; /* exponent */ long_double_bits fb; fb.u.high.s.low = (e + 16383); /* exponent */ fb.u.low.all = a << clz; /* mantissa */ return fb.f; } #endif /* _ARCH_PPC */
521
392
/******************************************************************************* * Copyright (c) 2015 - 2017 * * Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. *******************************************************************************/ package jsettlers.common.menu; import java.util.Date; import java.util.List; /** * This interface defines the methods supplying information about a map definition * * @author michael * @author <NAME> */ public interface IMapDefinition { /** * Gets the id of the map. This id must be unique! The id must also differ between maps in a different version. * * @return The unique identifier of the represented map. */ String getMapId(); /** * Gets the name of the map. * * @return A name describing the map. */ String getMapName(); /** * Gets the description of this map. * * @return A string that describes this map. It may contain linebreaks. */ String getDescription(); /** * Gets the image of this map. * * @return The image data */ short[] getImage(); /** * Gets the minimum number of players that can play this map. * * @return That number. */ int getMinPlayers(); /** * Gets the maximum number of players supported by this map. * * @return The number of players supported by this map. */ int getMaxPlayers(); /** * Gets a list of players that played on the map. * * @return The players from that loadable game. */ List<ILoadableMapPlayer> getPlayers(); Date getCreationDate(); }
665
8,772
package org.apereo.cas.authentication.mfa.trigger; import org.apereo.cas.authentication.principal.Service; import org.apereo.cas.configuration.CasConfigurationProperties; import lombok.val; import org.apache.commons.io.FileUtils; import org.junit.jupiter.api.MethodOrderer; import org.junit.jupiter.api.Order; import org.junit.jupiter.api.Tag; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.TestMethodOrder; import org.springframework.core.io.ClassPathResource; import org.springframework.core.io.FileSystemResource; import java.io.File; import java.nio.charset.StandardCharsets; import static org.junit.jupiter.api.Assertions.*; import static org.mockito.Mockito.*; /** * This is {@link PredicatedPrincipalAttributeMultifactorAuthenticationTriggerTests}. * * @author <NAME> * @since 6.2.0 */ @Tag("MFATrigger") @TestMethodOrder(MethodOrderer.OrderAnnotation.class) public class PredicatedPrincipalAttributeMultifactorAuthenticationTriggerTests extends BaseMultifactorAuthenticationTriggerTests { @Test @Order(0) @Tag("DisableProviderRegistration") public void verifyNoProviders() throws Exception { val props = new CasConfigurationProperties(); val file = File.createTempFile("example", ".txt"); FileUtils.writeStringToFile(file, "script", StandardCharsets.UTF_8); props.getAuthn().getMfa().getTriggers().getPrincipal().getGlobalPrincipalAttributePredicate().setLocation(new FileSystemResource(file)); val trigger = new PredicatedPrincipalAttributeMultifactorAuthenticationTrigger(props, this.applicationContext); val result = trigger.isActivated(authentication, registeredService, this.httpRequest, mock(Service.class)); assertTrue(result.isEmpty()); } @Test @Order(1) public void verifyOperationByHeader() { val props = new CasConfigurationProperties(); props.getAuthn().getMfa().getTriggers().getPrincipal().getGlobalPrincipalAttributePredicate().setLocation(new ClassPathResource("GroovyPredicate.groovy")); val trigger = new PredicatedPrincipalAttributeMultifactorAuthenticationTrigger(props, this.applicationContext); val result = trigger.isActivated(authentication, registeredService, this.httpRequest, mock(Service.class)); assertTrue(result.isPresent()); } @Test @Order(3) public void verifyNoPredicate() throws Exception { val props = new CasConfigurationProperties(); val file = File.createTempFile("predicate", ".txt"); FileUtils.writeStringToFile(file, "script", StandardCharsets.UTF_8); props.getAuthn().getMfa().getTriggers().getPrincipal().getGlobalPrincipalAttributePredicate().setLocation(new FileSystemResource(file)); val trigger = new PredicatedPrincipalAttributeMultifactorAuthenticationTrigger(props, this.applicationContext); val result = trigger.isActivated(authentication, registeredService, this.httpRequest, mock(Service.class)); assertTrue(result.isEmpty()); } }
998
571
<filename>chapter15/wildfly-camel-war/src/main/java/camelinaction/InventoryConverter.java package camelinaction; import camelinaction.inventory.UpdateInventoryInput; import org.apache.camel.Converter; /** * A Camel converter which can convert from CSV (String) to model objects. * <p/> * By annotation this class with @Converter we tell Camel this is a converter class * it should scan and register methods as type converters. */ @Converter public final class InventoryConverter { private InventoryConverter() { } /** * This method can convert from CSV (String) to model object. * <p/> * By annotation this method with @Converter we tell Camel to include this method * as a type converter in its type converter registry. * * @param csv the from type * @return the to type */ @Converter public static UpdateInventoryInput toInput(String csv) { String[] lines = csv.split(","); if (lines == null || lines.length != 4) { throw new IllegalArgumentException("CSV line is not valid: " + csv); } UpdateInventoryInput input = new UpdateInventoryInput(); input.setSupplierId(lines[0]); input.setPartId(lines[1]); input.setName(lines[2]); input.setAmount(lines[3]); return input; } }
478
1,056
<reponame>Antholoj/netbeans /* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.netbeans.lib.profiler.tests.jfluid.benchmarks; import junit.framework.Test; import junit.textui.TestRunner; import org.netbeans.junit.NbModuleSuite; import org.netbeans.lib.profiler.ProfilerEngineSettings; import org.netbeans.lib.profiler.global.CommonConstants; /** * * @author ehucka */ public class JbbTest extends JbbTestType { //~ Constructors ------------------------------------------------------------------------------------------------------------- /** * Creates a new instance of JbbTest */ public JbbTest(String name) { super(name); } public static void main(String[] args) { TestRunner.run(suite()); } public static Test suite() { return NbModuleSuite.create( NbModuleSuite.createConfiguration(JbbTest.class).addTest( "testBasic", "testDefaultEntire", "testDefaultPart", "testInstrumentEager", "testInstrumentSampledLazy", "testInstrumentSampledTotal").enableModules(".*").clusters(".*").gui(false)); } //~ Methods ------------------------------------------------------------------------------------------------------------------ public void testBasic() { ProfilerEngineSettings settings = initCpuTest("jbb", "spec.jbb.JBBmain"); startBenchmarkTest(settings, 20); } public void testDefaultEntire() { ProfilerEngineSettings settings = initCpuTest("jbb", "spec.jbb.JBBmain"); settings.setCPUProfilingType(CommonConstants.CPU_INSTR_FULL); settings.setInstrScheme(CommonConstants.INSTRSCHEME_TOTAL); settings.setInstrumentEmptyMethods(false); settings.setInstrumentGetterSetterMethods(false); settings.setInstrumentMethodInvoke(true); settings.setInstrumentSpawnedThreads(true); settings.setExcludeWaitTime(true); startBenchmarkTest(settings, 170); } public void testDefaultPart() { ProfilerEngineSettings settings = initCpuTest("jbb", "spec.jbb.JBBmain"); settings.setCPUProfilingType(CommonConstants.CPU_INSTR_FULL); settings.setInstrScheme(CommonConstants.INSTRSCHEME_LAZY); settings.setInstrumentEmptyMethods(false); settings.setInstrumentGetterSetterMethods(false); settings.setInstrumentMethodInvoke(true); settings.setInstrumentSpawnedThreads(false); settings.setExcludeWaitTime(true); startBenchmarkTest(settings, 10); } public void testInstrumentEager() { ProfilerEngineSettings settings = initCpuTest("jbb", "spec.jbb.JBBmain"); settings.setCPUProfilingType(CommonConstants.CPU_INSTR_FULL); settings.setInstrScheme(CommonConstants.INSTRSCHEME_EAGER); settings.setInstrumentEmptyMethods(false); settings.setInstrumentGetterSetterMethods(false); settings.setInstrumentMethodInvoke(true); settings.setInstrumentSpawnedThreads(true); settings.setExcludeWaitTime(true); startBenchmarkTest(settings, 165); } public void testInstrumentSampledLazy() { ProfilerEngineSettings settings = initCpuTest("jbb", "spec.jbb.JBBmain"); settings.setCPUProfilingType(CommonConstants.CPU_INSTR_SAMPLED); settings.setSamplingInterval(10); settings.setInstrScheme(CommonConstants.INSTRSCHEME_LAZY); settings.setInstrumentEmptyMethods(false); settings.setInstrumentGetterSetterMethods(false); settings.setInstrumentMethodInvoke(true); settings.setInstrumentSpawnedThreads(true); settings.setExcludeWaitTime(true); startBenchmarkTest(settings, 30); } public void testInstrumentSampledTotal() { ProfilerEngineSettings settings = initCpuTest("jbb", "spec.jbb.JBBmain"); settings.setCPUProfilingType(CommonConstants.CPU_INSTR_SAMPLED); settings.setSamplingInterval(10); settings.setInstrScheme(CommonConstants.INSTRSCHEME_TOTAL); settings.setInstrumentEmptyMethods(false); settings.setInstrumentGetterSetterMethods(false); settings.setInstrumentMethodInvoke(true); settings.setInstrumentSpawnedThreads(true); settings.setExcludeWaitTime(true); startBenchmarkTest(settings, 35); } }
1,837
4,047
#include <glib-object.h> #pragma once #define BAR_TYPE_BAR (bar_bar_get_type()) G_DECLARE_FINAL_TYPE (BarBar, bar_bar, BAR, BAR, GObject) int bar_bar_return_success(void);
77
1,056
<reponame>timfel/netbeans<gh_stars>1000+ /* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.netbeans.modules.php.editor.verification; import java.util.Collections; import java.util.ArrayList; import java.util.List; import org.netbeans.editor.BaseDocument; import org.netbeans.modules.csl.api.EditList; import org.netbeans.modules.csl.api.Hint; import org.netbeans.modules.csl.api.HintFix; import org.netbeans.modules.csl.api.HintSeverity; import org.netbeans.modules.csl.api.OffsetRange; import org.netbeans.modules.csl.spi.support.CancelSupport; import org.netbeans.modules.php.editor.parser.PHPParseResult; import org.netbeans.modules.php.editor.parser.astnodes.ASTNode; import org.netbeans.modules.php.editor.parser.astnodes.Assignment; import org.netbeans.modules.php.editor.parser.astnodes.Block; import org.netbeans.modules.php.editor.parser.astnodes.ConditionalExpression; import org.netbeans.modules.php.editor.parser.astnodes.DoStatement; import org.netbeans.modules.php.editor.parser.astnodes.ForEachStatement; import org.netbeans.modules.php.editor.parser.astnodes.ForStatement; import org.netbeans.modules.php.editor.parser.astnodes.FunctionInvocation; import org.netbeans.modules.php.editor.parser.astnodes.IfStatement; import org.netbeans.modules.php.editor.parser.astnodes.InfixExpression; import org.netbeans.modules.php.editor.parser.astnodes.InfixExpression.OperatorType; import org.netbeans.modules.php.editor.parser.astnodes.ReturnStatement; import org.netbeans.modules.php.editor.parser.astnodes.SwitchCase; import org.netbeans.modules.php.editor.parser.astnodes.WhileStatement; import org.netbeans.modules.php.editor.parser.astnodes.visitors.DefaultTreePathVisitor; import org.openide.filesystems.FileObject; import org.openide.util.NbBundle.Messages; /** * * @author <NAME> <<EMAIL>> */ public class AmbiguousComparisonHint extends HintRule { private static final String HINT_ID = "Ambiguous.Comparison.Hint"; //NOI18N @Override public void invoke(final PHPRuleContext context, final List<Hint> hints) { final PHPParseResult phpParseResult = (PHPParseResult) context.parserResult; if (phpParseResult.getProgram() == null) { return; } final FileObject fileObject = phpParseResult.getSnapshot().getSource().getFileObject(); if (fileObject == null) { return; } if (CancelSupport.getDefault().isCancelled()) { return; } final CheckVisitor checkVisitor = new CheckVisitor(fileObject, context.doc); phpParseResult.getProgram().accept(checkVisitor); if (CancelSupport.getDefault().isCancelled()) { return; } hints.addAll(checkVisitor.getHints()); } private class CheckVisitor extends DefaultTreePathVisitor { private final FileObject fileObject; private final BaseDocument doc; private final List<InfixExpression> expressions = new ArrayList<>(); private final List<Hint> hints = new ArrayList<>(); public CheckVisitor(final FileObject fileObject, final BaseDocument doc) { this.fileObject = fileObject; this.doc = doc; } public List<Hint> getHints() { for (InfixExpression infixExpression : expressions) { if (CancelSupport.getDefault().isCancelled()) { return Collections.emptyList(); } createHint(infixExpression); } return hints; } @Messages("AmbiguousComparisonHintCustom=Possible accidental comparison found. Check if you wanted to use '=' instead.") private void createHint(final InfixExpression node) { final OffsetRange offsetRange = new OffsetRange(node.getStartOffset(), node.getEndOffset()); if (showHint(offsetRange, doc)) { hints.add(new Hint( AmbiguousComparisonHint.this, Bundle.AmbiguousComparisonHintCustom(), fileObject, offsetRange, Collections.<HintFix>singletonList(new AssignmentHintFix(doc, node)), 500)); } } @Override public void visit(final InfixExpression node) { if (CancelSupport.getDefault().isCancelled()) { return; } final OperatorType operator = node.getOperator(); if (OperatorType.IS_EQUAL.equals(operator) || OperatorType.IS_IDENTICAL.equals(operator)) { checkPathForNode(node); } super.visit(node); } private void checkPathForNode(final InfixExpression node) { if (CancelSupport.getDefault().isCancelled()) { return; } final List<ASTNode> path = getPath(); if (path.isEmpty() || !isValidContext(path)) { expressions.add(node); } } private boolean isValidContext(final List<ASTNode> path) { boolean result = false; for (ASTNode node : path) { if (isConditionalNode(node) || node instanceof Assignment || node instanceof ReturnStatement || node instanceof FunctionInvocation) { result = true; break; } else if (node instanceof Block) { result = false; break; } } return result; } private boolean isConditionalNode(final ASTNode node) { return node instanceof IfStatement || node instanceof WhileStatement || node instanceof DoStatement || node instanceof ForStatement || node instanceof ForEachStatement || node instanceof ConditionalExpression || node instanceof SwitchCase; } } private static class AssignmentHintFix implements HintFix { private final BaseDocument doc; private final InfixExpression expression; private static final String ASSIGNMENT = " = "; //NOI18N public AssignmentHintFix(final BaseDocument doc, final InfixExpression expression) { this.doc = doc; this.expression = expression; } @Override @Messages("AssignmentHintFixDisp=Change Comparison to Assignment") public String getDescription() { return Bundle.AssignmentHintFixDisp(); } @Override public void implement() throws Exception { final EditList edits = new EditList(doc); final OffsetRange offsetRange = new OffsetRange(expression.getLeft().getEndOffset(), expression.getRight().getStartOffset()); edits.replace(offsetRange.getStart(), offsetRange.getLength(), ASSIGNMENT, true, 0); edits.apply(); } @Override public boolean isSafe() { return true; } @Override public boolean isInteractive() { return false; } } @Override public String getId() { return HINT_ID; } @Override @Messages("AmbiguousComparisonHintDescName=Tries to reveal typos in assignments (assignments with more than one assignment operator).") public String getDescription() { return Bundle.AmbiguousComparisonHintDescName(); } @Override @Messages("AmbiguousComparisonHintDispName=Ambiguous Comparison") public String getDisplayName() { return Bundle.AmbiguousComparisonHintDispName(); } @Override public HintSeverity getDefaultSeverity() { return HintSeverity.WARNING; } }
3,471
742
/*------------------------------------ ///\ Plywood C++ Framework \\\/ https://plywood.arc80.com/ ------------------------------------*/ #pragma once #include <ply-reflect/Core.h> namespace ply { // The lowest range is reserved for built-in FormatDescriptors static const u32 FormatID_StartUserRange = 1000; enum class FormatKey { // All FormatKeys less than StartUserKeyRange are also FormatIDs. None = 0, // Special value used during serialization Indirect, // Special value used during serialization Bool, S8, S16, S32, S64, U8, U16, U32, U64, Float, Double, String, TypedArray, // FIXME: Maybe this can be wrapped inside "Typed" Typed, // Note: If new built-in FormatKeys are added here, it changes the FormatKey of all user // FormatDescriptors, so pretty much all data needs to be recooked. Maybe we should allow adding // built-in FormatKeys *after* Struct. If so, should remove StartUserKeyRange as it would become // meaningless. User FormatDescriptors (not built-in ones) must use formatKey >= // StartUserKeyRange StartUserKeyRange, FixedArray = StartUserKeyRange, Array, Owned, WeakPtr, Struct, Enum, EnumIndexedArray, Switch }; struct FormatDescriptor { u8 formatKey; FormatDescriptor(u32 formatKey) : formatKey(formatKey) { } virtual ~FormatDescriptor() { } // just for FormatDescriptor_Struct }; extern FormatDescriptor FormatDescriptor_Bool; extern FormatDescriptor FormatDescriptor_S8; extern FormatDescriptor FormatDescriptor_S16; extern FormatDescriptor FormatDescriptor_S32; extern FormatDescriptor FormatDescriptor_S64; extern FormatDescriptor FormatDescriptor_U8; extern FormatDescriptor FormatDescriptor_U16; extern FormatDescriptor FormatDescriptor_U32; extern FormatDescriptor FormatDescriptor_U64; extern FormatDescriptor FormatDescriptor_Float; extern FormatDescriptor FormatDescriptor_Double; extern FormatDescriptor FormatDescriptor_String; extern FormatDescriptor FormatDescriptor_TypedArray; extern FormatDescriptor FormatDescriptor_Typed; struct FormatDescriptor_FixedArray : FormatDescriptor { u32 numItems; FormatDescriptor* itemFormat; FormatDescriptor_FixedArray(u32 numItems, FormatDescriptor* itemFormat) : FormatDescriptor((u32) FormatKey::FixedArray), numItems(numItems), itemFormat(itemFormat) { } virtual ~FormatDescriptor_FixedArray() override { } }; struct FormatDescriptor_Array : FormatDescriptor { FormatDescriptor* itemFormat; FormatDescriptor_Array(FormatDescriptor* itemFormat) : FormatDescriptor((u32) FormatKey::Array), itemFormat(itemFormat) { } virtual ~FormatDescriptor_Array() override { } }; struct FormatDescriptor_Owned : FormatDescriptor { FormatDescriptor* childFormat; FormatDescriptor_Owned(FormatDescriptor* childFormat) : FormatDescriptor((u32) FormatKey::Owned), childFormat(childFormat) { } virtual ~FormatDescriptor_Owned() override { } }; struct FormatDescriptor_WeakPtr : FormatDescriptor { FormatDescriptor* childFormat; FormatDescriptor_WeakPtr(FormatDescriptor* childFormat) : FormatDescriptor((u32) FormatKey::WeakPtr), childFormat(childFormat) { } virtual ~FormatDescriptor_WeakPtr() override { } }; struct FormatDescriptor_Struct : FormatDescriptor { struct Member { String name; FormatDescriptor* formatDesc; }; String name; Array<Member> templateParams; Array<Member> members; FormatDescriptor* getTemplateParam(StringView name) { for (Member& templateParam : templateParams) { if (templateParam.name == name) return templateParam.formatDesc; } return nullptr; } FormatDescriptor* getMember(StringView name) { for (Member& member : members) { if (member.name == name) return member.formatDesc; } return nullptr; } FormatDescriptor_Struct() : FormatDescriptor((u32) FormatKey::Struct) { } virtual ~FormatDescriptor_Struct() override { } }; struct FormatDescriptor_Enum : FormatDescriptor { u8 fixedSize; // FIXME: Delete this? String name; // FIXME: Delete this? Array<String> identifiers; FormatDescriptor_Enum() : FormatDescriptor((u32) FormatKey::Enum) { } virtual ~FormatDescriptor_Enum() override { } }; struct FormatDescriptor_EnumIndexedArray : FormatDescriptor { FormatDescriptor* itemFormat; FormatDescriptor_Enum* enumFormat; FormatDescriptor_EnumIndexedArray() : FormatDescriptor((u32) FormatKey::EnumIndexedArray) { } virtual ~FormatDescriptor_EnumIndexedArray() override { } }; struct FormatDescriptor_Switch : FormatDescriptor { struct State { String name; FormatDescriptor_Struct* structFormat; }; String name; Array<State> states; FormatDescriptor_Switch() : FormatDescriptor((u32) FormatKey::Switch) { } virtual ~FormatDescriptor_Switch() override { } }; } // namespace ply
1,870
2,151
<reponame>Scopetta197/chromium<filename>third_party/mesa/MesaLib/src/gallium/state_trackers/vega/asm_filters.h /************************************************************************** * * Copyright 2009 VMware, Inc. All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the * "Software"), to deal in the Software without restriction, including * without limitation the rights to use, copy, modify, merge, publish, * distribute, sub license, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice (including the * next paragraph) shall be included in all copies or substantial portions * of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. * **************************************************************************/ #ifndef ASM_FILTERS_H #define ASM_FILTERS_H static const char color_matrix_asm[] = "FRAG\n" "DCL IN[0], GENERIC[0], PERSPECTIVE\n" "DCL OUT[0], COLOR, CONSTANT\n" "DCL CONST[0..4], CONSTANT\n" "DCL TEMP[0..4], CONSTANT\n" "DCL SAMP[0], CONSTANT\n" "TEX TEMP[0], IN[0], SAMP[0], 2D\n" "MOV TEMP[1], TEMP[0].xxxx\n" "MOV TEMP[2], TEMP[0].yyyy\n" "MOV TEMP[3], TEMP[0].zzzz\n" "MOV TEMP[4], TEMP[0].wwww\n" "MUL TEMP[1], TEMP[1], CONST[0]\n" "MUL TEMP[2], TEMP[2], CONST[1]\n" "MUL TEMP[3], TEMP[3], CONST[2]\n" "MUL TEMP[4], TEMP[4], CONST[3]\n" "ADD TEMP[0], TEMP[1], CONST[4]\n" "ADD TEMP[0], TEMP[0], TEMP[2]\n" "ADD TEMP[0], TEMP[0], TEMP[3]\n" "ADD TEMP[0], TEMP[0], TEMP[4]\n" "MOV OUT[0], TEMP[0]\n" "END\n"; static const char convolution_asm[] = "FRAG\n" "DCL IN[0], GENERIC[0], PERSPECTIVE\n" "DCL OUT[0], COLOR, CONSTANT\n" "DCL TEMP[0..4], CONSTANT\n" "DCL ADDR[0], CONSTANT\n" "DCL CONST[0..%d], CONSTANT\n" "DCL SAMP[0], CONSTANT\n" "0: MOV TEMP[0], CONST[0].xxxx\n" "1: MOV TEMP[1], CONST[0].xxxx\n" "2: BGNLOOP :14\n" "3: SGE TEMP[0].z, TEMP[0].yyyy, CONST[1].xxxx\n" "4: IF TEMP[0].zzzz :7\n" "5: BRK\n" "6: ENDIF\n" "7: ARL ADDR[0].x, TEMP[0].yyyy\n" "8: MOV TEMP[3], CONST[ADDR[0]+2]\n" "9: ADD TEMP[4].xy, IN[0], TEMP[3]\n" "10: TEX TEMP[2], TEMP[4], SAMP[0], 2D\n" "11: MOV TEMP[3], CONST[ADDR[0]+%d]\n" "12: MAD TEMP[1], TEMP[2], TEMP[3], TEMP[1]\n" "13: ADD TEMP[0].y, TEMP[0].yyyy, CONST[0].yyyy\n" "14: ENDLOOP :2\n" "15: MAD OUT[0], TEMP[1], CONST[1].yyyy, CONST[1].zzzz\n" "16: END\n"; static const char lookup_asm[] = "FRAG\n" "DCL IN[0], GENERIC[0], PERSPECTIVE\n" "DCL OUT[0], COLOR, CONSTANT\n" "DCL TEMP[0..2], CONSTANT\n" "DCL CONST[0], CONSTANT\n" "DCL SAMP[0..1], CONSTANT\n" "TEX TEMP[0], IN[0], SAMP[0], 2D\n" "MOV TEMP[1], TEMP[0]\n" /* do red */ "TEX TEMP[2], TEMP[1].xxxx, SAMP[1], 1D\n" "MOV TEMP[0].x, TEMP[2].xxxx\n" /* do blue */ "TEX TEMP[2], TEMP[1].yyyy, SAMP[1], 1D\n" "MOV TEMP[0].y, TEMP[2].yyyy\n" /* do green */ "TEX TEMP[2], TEMP[1].zzzz, SAMP[1], 1D\n" "MOV TEMP[0].z, TEMP[2].zzzz\n" /* do alpha */ "TEX TEMP[2], TEMP[1].wwww, SAMP[1], 1D\n" "MOV TEMP[0].w, TEMP[2].wwww\n" "MOV OUT[0], TEMP[0]\n" "END\n"; static const char lookup_single_asm[] = "FRAG\n" "DCL IN[0], GENERIC[0], PERSPECTIVE\n" "DCL OUT[0], COLOR, CONSTANT\n" "DCL TEMP[0..2], CONSTANT\n" "DCL CONST[0], CONSTANT\n" "DCL SAMP[0..1], CONSTANT\n" "TEX TEMP[0], IN[0], SAMP[0], 2D\n" "TEX TEMP[1], TEMP[0].%s, SAMP[1], 1D\n" "MOV OUT[0], TEMP[1]\n" "END\n"; #endif
1,962
949
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package examples; import org.apache.log4j.Logger; import org.apache.log4j.NDC; /** Example code for log4j to viewed in conjunction with the {@link examples.Sort Sort} class. <p>SortAlgo uses the bubble sort algorithm to sort an integer array. See also its <b><a href="doc-files/SortAlgo.java">source code</a></b>. @author <NAME>uml;lc&uuml; */ public class SortAlgo { final static String className = SortAlgo.class.getName(); final static Logger LOG = Logger.getLogger(className); final static Logger OUTER = Logger.getLogger(className + ".OUTER"); final static Logger INNER = Logger.getLogger(className + ".INNER"); final static Logger DUMP = Logger.getLogger(className + ".DUMP"); final static Logger SWAP = Logger.getLogger(className + ".SWAP"); int[] intArray; SortAlgo(int[] intArray) { this.intArray = intArray; } void bubbleSort() { LOG.info( "Entered the sort method."); for(int i = intArray.length -1; i >= 0 ; i--) { NDC.push("i=" + i); OUTER.debug("in outer loop."); for(int j = 0; j < i; j++) { NDC.push("j=" + j); // It is poor practice to ship code with log staments in tight loops. // We do it anyway in this example. INNER.debug( "in inner loop."); if(intArray[j] > intArray[j+1]) swap(j, j+1); NDC.pop(); } NDC.pop(); } } void dump() { if(! (this.intArray instanceof int[])) { DUMP.error("Tried to dump an uninitialized array."); return; } DUMP.info("Dump of integer array:"); for(int i = 0; i < this.intArray.length; i++) { DUMP.info("Element [" + i + "]=" + this.intArray[i]); } } void swap(int l, int r) { // It is poor practice to ship code with log staments in tight // loops or code called potentially millions of times. SWAP.debug( "Swapping intArray["+l+"]=" + intArray[l] + " and intArray["+r+"]=" + intArray[r]); int temp = this.intArray[l]; this.intArray[l] = this.intArray[r]; this.intArray[r] = temp; } }
1,033
1,673
/* bug #1094 - Nested struct/union initializers don't compile */ #include <stdio.h> #include <stddef.h> #include <stdlib.h> #include <stdint.h> typedef uint16_t u16; typedef uint8_t u8; struct WW { int a : 4; struct { unsigned int b : 4; unsigned int c : 8; } x[2]; } wwqq = { 0, {2, 5, {3, 4}}, }; typedef struct { u16 quot; u16 rem; } udiv_t; typedef struct { u16 quot; u16 rem; char m[8]; } big_t; union U { struct { signed int a : 3; signed int b : 3; signed int c : 3; }; int u; }; union U g = { 5, 3, 1 }; struct S { struct { unsigned int a : 3; unsigned int b : 3; unsigned int c : 3; }; }; struct S h = { 5, 3, 1 }; union X { struct { uint16_t a : 3; union { struct { uint16_t b : 3; uint16_t c : 3; }; uint16_t d; }; }; uint16_t e; } x = { 4, {5, 6} }; udiv_t div3(udiv_t u) { udiv_t v = {}; u.quot = 341 + u.quot; u.rem = 1 + u.rem; v = u; return v; } int main(void) { udiv_t v = { 141, 32 }; big_t b = { 141, 32 }; v = div3(*(udiv_t*)&b); printf("%d %d %d\n", (int)wwqq.a, wwqq.x[0].b, wwqq.x[0].c); printf("%d %d %d\n", (int)wwqq.a, wwqq.x[1].b, wwqq.x[1].c); printf("quot = %u, rem = %u\n", div3(v).quot, div3(v).rem); printf("quot = %u, rem = %u\n", v.quot, v.rem); printf("quot = %u, rem = %u\n", b.quot, b.rem); printf("g.a = %u, g.b = %u, g.c = %d\n", g.a, g.b, g.c); x.e = 1467; printf("x.a = %d, x.b = %d, x.c = %d\n", x.a, x.b, x.c); printf("(long)x.b = %ld, sizeof(x) = %u, sizeof((long)x.a) = %u\n", (long)x.b, sizeof(x), sizeof((long)x.a)); printf("-x.d = %d, (long)(-x.c + 1) = %ld\n", -x.d, (long)(-x.c + 1)); printf("h.a = %u, h.b = %u, h.c = %u\n", h.a, h.b, h.c); return 0; }
1,100
823
<filename>problem-violations/src/test/java/org/zalando/problem/violations/ConstraintViolationProblemModuleTest.java package org.zalando.problem.violations; import com.fasterxml.jackson.core.JsonProcessingException; import com.fasterxml.jackson.databind.MapperFeature; import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.databind.json.JsonMapper; import lombok.extern.slf4j.Slf4j; import org.junit.jupiter.api.Test; import org.zalando.problem.jackson.ProblemModule; import java.net.URI; import static com.jayway.jsonassert.JsonAssert.with; import static java.util.Collections.emptyList; import static java.util.Collections.singletonList; import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.is; import static org.zalando.problem.Status.BAD_REQUEST; @Slf4j final class ConstraintViolationProblemModuleTest { @Test void shouldSerializeWithoutAutoDetect() throws JsonProcessingException { final JsonMapper mapper = JsonMapper.builder() .disable(MapperFeature.AUTO_DETECT_FIELDS) .disable(MapperFeature.AUTO_DETECT_GETTERS) .disable(MapperFeature.AUTO_DETECT_IS_GETTERS) .addModule(new ProblemModule()) .addModule(new ConstraintViolationProblemModule()) .build(); final Violation violation = new Violation("bob", "was missing"); final ConstraintViolationProblem unit = new ConstraintViolationProblem(BAD_REQUEST, singletonList(violation)); with(mapper.writeValueAsString(unit)) .assertThat("status", is(400)) .assertThat("type", is(ConstraintViolationProblem.TYPE_VALUE)) .assertThat("title", is("Constraint Violation")) .assertThat("violations", hasSize(1)) .assertThat("violations.*.field", contains("bob")) .assertThat("violations.*.message", contains("was missing")); } @Test void shouldSerializeCustomType() throws JsonProcessingException { final ObjectMapper mapper = new ObjectMapper() .registerModule(new ProblemModule()) .registerModule(new ConstraintViolationProblemModule()); final URI type = URI.create("foo"); final ConstraintViolationProblem unit = new ConstraintViolationProblem(type, BAD_REQUEST, emptyList()); with(mapper.writeValueAsString(unit)) .assertThat("type", is("foo")); } }
1,013
3,066
<gh_stars>1000+ /* * Licensed to Crate.io GmbH ("Crate") under one or more contributor * license agreements. See the NOTICE file distributed with this work for * additional information regarding copyright ownership. Crate licenses * this file to you under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. You may * obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * However, if you have executed another commercial license agreement * with Crate these terms will supersede the license and you may use the * software solely pursuant to the terms of the relevant commercial agreement. */ package io.crate.expression.symbol; import io.crate.metadata.FunctionType; import io.crate.metadata.Reference; import io.crate.types.DataTypes; import java.util.List; public final class GroupAndAggregateSemantics { /** * Ensures that the SELECT list expressions and GROUP BY expressions are semantically valid. * * The combination of SELECT list and GROUP BY is semantically valid if it can be executed considering the logical execution order of operators: * <pre> * {@code * (operators that are not relevant are omitted) * * Eval (select list) \ * | | these can only operate on top of expressions that are outputs of (Group)HashAggregate * ProjectSet | * | | * WindowAgg / * | * (Group)HashAggregate --> Aggregates can also operate on columns that are not part of the select List * | * Collect --> Can output all columns of the table and eval scalars; no other expressions * * To note here: * - Scalars can be evaluated both, within `Collect` *before* an aggregation or grouping, and after in `Eval` * - ProjectSet (table functions) and WindowAgg happen after the Aggregation * } * </pre> */ public static void validate(List<Symbol> outputSymbols, List<Symbol> groupBy) throws IllegalArgumentException { boolean containsAggregations = SymbolVisitors.any( x -> x instanceof Function && ((Function) x).type() == FunctionType.AGGREGATE, outputSymbols ); if (!containsAggregations && groupBy.isEmpty()) { return; } groupBy.forEach(GroupAndAggregateSemantics::ensureTypedGroupKey); for (int i = 0; i < outputSymbols.size(); i++) { Symbol output = outputSymbols.get(i); Symbol offender = output.accept(FindOffendingSymbol.INSTANCE, groupBy); if (offender == null) { continue; } throw new IllegalArgumentException( "'" + offender + "' must appear in the GROUP BY clause or be used in an aggregation function. " + "Perhaps you grouped by an alias that clashes with a column in the relations" ); } } private static void ensureTypedGroupKey(Symbol groupBy) { groupBy.accept(EnsureTypedGroupKey.INSTANCE, null); } private static class EnsureTypedGroupKey extends SymbolVisitor<Void, Void> { static final EnsureTypedGroupKey INSTANCE = new EnsureTypedGroupKey(); @Override public Void visitSymbol(Symbol symbol, Void context) { if (symbol.valueType() == DataTypes.UNDEFINED) { raiseException(symbol); } return null; } @Override public Void visitLiteral(Literal symbol, Void context) { if (symbol.valueType() == DataTypes.UNDEFINED) { if (symbol.value() == null) { // `NULL` is a valid case return null; } else { raiseException(symbol); } } return null; } @Override public Void visitAlias(AliasSymbol aliasSymbol, Void context) { return aliasSymbol.symbol().accept(this, context); } private static void raiseException(Symbol symbol) { throw new IllegalArgumentException( "Cannot group or aggregate on '" + symbol.toString() + "' with an undefined type." + " Using an explicit type cast will make this work but adds processing overhead to the query." ); } } static class FindOffendingSymbol extends SymbolVisitor<List<Symbol>, Symbol> { static final FindOffendingSymbol INSTANCE = new FindOffendingSymbol(); @Override protected Symbol visitSymbol(Symbol symbol, List<Symbol> groupBy) { throw new UnsupportedOperationException("Unsupported symbol: " + symbol); } @Override public Symbol visitFunction(Function function, List<Symbol> groupBy) { switch (function.type()) { case SCALAR: { /* valid: * SELECT 4 * x FROM tbl GROUP BY x * SELECT 4 * x FROM tbl GROUP BY 4 * x * * invalid: * SELECT 4 * y FROM tbl GROUP BY x */ if (groupBy.contains(function)) { return null; } for (Symbol argument : function.arguments()) { Symbol offender = argument.accept(this, groupBy); if (offender != null) { return function; } } return null; } case AGGREGATE: return null; case TABLE: case WINDOW: // Cannot group by a table or window function. Arguments must pass validation: for (Symbol argument : function.arguments()) { Symbol offender = argument.accept(this, groupBy); if (offender != null) { return offender; } } return null; default: throw new IllegalStateException("Unexpected function type: " + function.type()); } } @Override public Symbol visitAggregation(Aggregation symbol, List<Symbol> groupBy) { throw new AssertionError("`Aggregation` symbols are created in the Planner. Until then there should only be `Function` symbols with type aggregate"); } @Override public Symbol visitAlias(AliasSymbol aliasSymbol, List<Symbol> groupBy) { /* valid: * SELECT x AS xx, count(*) FROM tbl GROUP BY xx; * SELECT x AS xx, count(*) FROM tbl GROUP BY x; * * not valid: * * SELECT x AS xx, count(*) FROM tbl GROUP BY y; */ if (groupBy.contains(aliasSymbol)) { return null; } return aliasSymbol.symbol().accept(this, groupBy); } @Override public Symbol visitReference(Reference ref, List<Symbol> groupBy) { if (containedIn(ref, groupBy)) { return null; } return ref; } @Override public Symbol visitField(ScopedSymbol symbol, List<Symbol> groupBy) { if (containedIn(symbol, groupBy)) { return null; } return symbol; } public static boolean containedIn(Symbol symbol, List<Symbol> groupBy) { // SELECT count(*), x AS xx, x FROM tbl GROUP BY 2 // GROUP BY is on `xx`, but `x` is implicitly also present in GROUP BY, so must be valid. for (Symbol groupExpr : groupBy) { if (symbol.equals(groupExpr)) { return true; } if (groupExpr instanceof AliasSymbol) { if (symbol.equals(((AliasSymbol) groupExpr).symbol())) { return true; } } } return false; } @Override public Symbol visitDynamicReference(DynamicReference ref, List<Symbol> groupBy) { return visitReference(ref, groupBy); } @Override public Symbol visitWindowFunction(WindowFunction function, List<Symbol> groupBy) { // Window function is executed after the GROUP operation // It cannot appear in the GROUP BY clause, but arguments must pass validation. for (Symbol argument : function.arguments()) { Symbol offender = argument.accept(this, groupBy); if (offender != null) { return offender; } } return null; } @Override public Symbol visitLiteral(Literal symbol, List<Symbol> groupBy) { return null; } @Override public Symbol visitParameterSymbol(ParameterSymbol parameterSymbol, List<Symbol> groupBy) { // Behaves like a literal: `SELECT $1, x GROUP BY x` is allowed return null; } @Override public Symbol visitSelectSymbol(SelectSymbol selectSymbol, List<Symbol> groupBy) { // Only non-correlated sub-queries are allowed, so this behaves like a literal return null; } @Override public Symbol visitInputColumn(InputColumn inputColumn, List<Symbol> groupBy) { throw new AssertionError("Must not have `InputColumn`s when doing semantic validation of SELECT LIST / GROUP BY"); } @Override public Symbol visitMatchPredicate(MatchPredicate matchPredicate, List<Symbol> groupBy) { throw new AssertionError("MATCH predicate cannot be used in SELECT list"); } @Override public Symbol visitFetchReference(FetchReference fetchReference, List<Symbol> groupBy) { throw new AssertionError("Must not have `FetchReference`s when doing semantic validation of SELECT LIST / GROUP BY"); } } }
4,816
463
<filename>scripts/prepare_testing_files.py import sys import os sys.path.append(os.path.dirname(os.path.dirname(__file__))) import argparse import glob import csv import numpy as np from config.AudioConfig import AudioConfig def mkdir(path): if not os.path.exists(path): os.makedirs(path) def proc_frames(src_path, dst_path): cmd = 'ffmpeg -i \"{}\" -start_number 0 -qscale:v 2 \"{}\"/%06d.jpg -loglevel error -y'.format(src_path, dst_path) os.system(cmd) frames = glob.glob(os.path.join(dst_path, '*.jpg')) return len(frames) def proc_audio(src_mouth_path, dst_audio_path): audio_command = 'ffmpeg -i \"{}\" -loglevel error -y -f wav -acodec pcm_s16le ' \ '-ar 16000 \"{}\"'.format(src_mouth_path, dst_audio_path) os.system(audio_command) if __name__ == "__main__": parser = argparse.ArgumentParser() # parser.add_argument('--dst_dir_path', default='/mnt/lustre/DATAshare3/VoxCeleb2', # help="dst file position") parser.add_argument('--dir_path', default='./misc', help="dst file position") parser.add_argument('--src_pose_path', default='./misc/Pose_Source/00473.mp4', help="pose source file position, this could be an mp4 or a folder") parser.add_argument('--src_audio_path', default='./misc/Audio_Source/00015.mp4', help="audio source file position, it could be an mp3 file or an mp4 video with audio") parser.add_argument('--src_mouth_frame_path', default=None, help="mouth frame file position, the video frames synced with audios") parser.add_argument('--src_input_path', default='./misc/Input/00098.mp4', help="input file position, it could be a folder with frames, a jpg or an mp4") parser.add_argument('--csv_path', default='./misc/demo2.csv', help="path to output index files") parser.add_argument('--convert_spectrogram', action='store_true', help='whether to convert audio to spectrogram') args = parser.parse_args() dir_path = args.dir_path mkdir(dir_path) # ===================== process input ======================================================= input_save_path = os.path.join(dir_path, 'Input') mkdir(input_save_path) input_name = args.src_input_path.split('/')[-1].split('.')[0] num_inputs = 1 dst_input_path = os.path.join(input_save_path, input_name) mkdir(dst_input_path) if args.src_input_path.split('/')[-1].split('.')[-1] == 'mp4': num_inputs = proc_frames(args.src_input_path, dst_input_path) elif os.path.isdir(args.src_input_path): dst_input_path = args.src_input_path else: os.system('cp {} {}'.format(args.src_input_path, os.path.join(dst_input_path, args.src_input_path.split('/')[-1]))) # ===================== process audio ======================================================= audio_source_save_path = os.path.join(dir_path, 'Audio_Source') mkdir(audio_source_save_path) audio_name = args.src_audio_path.split('/')[-1].split('.')[0] spec_dir = 'None' dst_audio_path = os.path.join(audio_source_save_path, audio_name + '.mp3') if args.src_audio_path.split('/')[-1].split('.')[-1] == 'mp3': os.system('cp {} {}'.format(args.src_audio_path, dst_audio_path)) if args.src_mouth_frame_path and os.path.isdir(args.src_mouth_frame_path): dst_mouth_frame_path = args.src_mouth_frame_path num_mouth_frames = len(glob.glob(os.path.join(args.src_mouth_frame_path, '*.jpg')) + glob.glob(os.path.join(args.src_mouth_frame_path, '*.png'))) else: dst_mouth_frame_path = 'None' num_mouth_frames = 0 else: mouth_source_save_path = os.path.join(dir_path, 'Mouth_Source') mkdir(mouth_source_save_path) dst_mouth_frame_path = os.path.join(mouth_source_save_path, audio_name) mkdir(dst_mouth_frame_path) proc_audio(args.src_audio_path, dst_audio_path) num_mouth_frames = proc_frames(args.src_audio_path, dst_mouth_frame_path) if args.convert_spectrogram: audio = AudioConfig(fft_size=1280, hop_size=160) wav = audio.read_audio(dst_audio_path) spectrogram = audio.audio_to_spectrogram(wav) spec_dir = os.path.join(audio_source_save_path, audio_name + '.npy') np.save(spec_dir, spectrogram.astype(np.float32), allow_pickle=False) # ===================== process pose ======================================================= if os.path.isdir(args.src_pose_path): num_pose_frames = len(glob.glob(os.path.join(args.src_pose_path, '*.jpg')) + glob.glob(os.path.join(args.src_pose_path, '*.png'))) dst_pose_frame_path = args.src_pose_path else: pose_source_save_path = os.path.join(dir_path, 'Pose_Source') mkdir(pose_source_save_path) pose_name = args.src_pose_path.split('/')[-1].split('.')[0] dst_pose_frame_path = os.path.join(pose_source_save_path, pose_name) mkdir(dst_pose_frame_path) num_pose_frames = proc_frames(args.src_pose_path, dst_pose_frame_path) # ===================== form csv ======================================================= with open(args.csv_path, 'w', newline='') as csvfile: writer = csv.writer(csvfile, delimiter=' ', quoting=csv.QUOTE_MINIMAL) writer.writerows([[dst_input_path, str(num_inputs), dst_pose_frame_path, str(num_pose_frames), dst_audio_path, dst_mouth_frame_path, str(num_mouth_frames), spec_dir]]) print('meta-info saved at ' + args.csv_path) csvfile.close()
2,428
435
<reponame>amaajemyfren/data { "copyright_text": "Creative Commons Attribution license (reuse allowed)", "description": "Have you ever struggled with finding ways to present data visualizations\nand/or results to non-technical audiences in a coherent and engaging\nmanner? In this talk, I'll detail how I overcame such a challenge by\nusing Dash to build an interactive app for firefighters to use during\nperformance testing of their rescue equipment.\n\nAnalytical web applications can serve as a powerful means for scientists\nand engineers to interact with data and identify trends in a concise and\nstraightforward manner. Such tools can allow users to immediately see\nthe effects of modifying specific input parameters. Additionally,\ninteractive web apps can be utilized to present data visualizations and\nanalysis results in engaging ways.\n\nUnless you're a full-stack developer, creating these types of web\napplications may seem quite challenging. Dash, a Python framework\nwritten on top of Flask, Plotly.js, and React.js, handles many of the\ncomplexities associated with building custom interfaces and provides\nusers the ability to build powerful data visualizations strictly through\nPython.\n\nDespite being an intermediate Python user lacking full knowledge of the\ntechnologies and protocols required to build web-based applications, I\nwas able to create a UI using Dash. More specifically, I built an\ninteractive dashboard for firefighters to process and interact with\nsensor data collected during performance testing of their rescue\nequipment.\n\nDuring this talk, I will briefly detail the motivation behind this\nproject. Then, I'll describe how the project progressed to its current\nstate, while highlighting key points that can be applied to the general\ncase of developing interactive web apps for audiences from non-technical\nbackgrounds. To conclude my presentation, I will show a demo of the\ninteractive web app and summarize the key takeaways.\n", "duration": 1611, "language": "eng", "recorded": "2019-07-27T12:00:00", "related_urls": [ { "label": "Conference schedule", "url": "https://www.pyohio.org/2019/events/schedule" } ], "speakers": [ "<NAME>" ], "tags": [], "thumbnail_url": "https://i.ytimg.com/vi/GiNF9diitAM/maxresdefault.jpg", "title": "Using Dash to Create Interactive Web Apps for Non-Technical Audiences", "videos": [ { "type": "youtube", "url": "https://www.youtube.com/watch?v=GiNF9diitAM" } ] }
679
2,860
{"QGgk6etEB5eSvqY2ZuDMmg==":"demo.gif","+xEDS5NSUg9SHc2Mg8OnuA==":"mbr-1620x1080.jpg","f0xtCOtiw5RjCzDyQVo+/w==":"mbr-719x1080.jpg","FOQetHuWr16GxoiOogp+Tg==":"mbr-1-1620x1080.jpg"}
115
432
from .. import rman_config from .. import rman_bl_nodes import bpy import json import pprint def GetConfigurablePanels(): '''Return the names of RenderForBlender panels that are configurable. Example: import RenderManForBlender.rfb_api as rfb_api rfb_api.GetConfigurablePanels() Returns: (dict) ''' panels = dict() for config_name,cfg in rman_config.__RMAN_CONFIG__.items(): for param_name, ndp in cfg.params.items(): panel = getattr(ndp, 'panel', '') if panel == '': continue if panel not in panels: #panels.append(ndp.panel) cls = getattr(bpy.types, panel) panels[panel] = { 'bl_label': cls.bl_label } print("RenderMan Configurable Panels") print("------------------------------") for panel, props in panels.items(): print("%s (%s)" % (panel, props['bl_label'])) print("------------------------------\n") return panels def GetConfigurablePanelProperties(panel): '''Return all properties in a given panel that are configurable. Example: import RenderManForBlender.rfb_api as rfb_api rfb_api.GetConfigurablePanelProperties('RENDER_PT_renderman_sampling') Args: panel (str) - the name of the panel caller is interested in Returns: (dict) ''' props = dict() for config_name,cfg in rman_config.__RMAN_CONFIG__.items(): for param_name, ndp in cfg.params.items(): if not hasattr(ndp, 'panel'): continue if ndp.panel == panel: label = ndp.name if hasattr(ndp, 'label'): label = ndp.label props[label] = ndp.name print("Configurable Properties (%s)" % panel) print("------------------------------") for label, prop in props.items(): print("%s (%s)" % (prop, label)) print("------------------------------\n") return props def GetPanelPropertyAsJson(panel, prop): '''Get a configurable panel property as JSON Example: import RenderManForBlender.rfb_api as rfb_api rfb_api.GetPanelPropertyAsJson('RENDER_PT_renderman_sampling', 'hider_maxSamples') Args: panel (str) - the name of the panel caller is interested in prop (str) - property name caller is interested in ''' json_str = '' for config_name,cfg in rman_config.__RMAN_CONFIG__.items(): for param_name, ndp in cfg.params.items(): if not hasattr(ndp, 'panel'): continue if ndp.panel == panel and ndp.name == prop: json_str = json.dumps(ndp.as_dict()) break return json_str
1,239
17,703
<reponame>giantcroc/envoy<filename>test/common/config/sotw_subscription_state_test.cc #include "envoy/config/endpoint/v3/endpoint.pb.h" #include "envoy/config/endpoint/v3/endpoint.pb.validate.h" #include "source/common/config/resource_name.h" #include "source/common/config/utility.h" #include "source/common/config/xds_mux/sotw_subscription_state.h" #include "source/common/stats/isolated_store_impl.h" #include "test/mocks/config/mocks.h" #include "test/mocks/event/mocks.h" #include "test/mocks/local_info/mocks.h" #include "test/test_common/simulated_time_system.h" #include "gmock/gmock.h" #include "gtest/gtest.h" using testing::An; using testing::IsSubstring; using testing::NiceMock; using testing::Throw; using testing::UnorderedElementsAre; namespace Envoy { namespace Config { namespace { class SotwSubscriptionStateTest : public testing::Test { protected: SotwSubscriptionStateTest() : resource_decoder_("cluster_name") { ttl_timer_ = new Event::MockTimer(&dispatcher_); state_ = std::make_unique<XdsMux::SotwSubscriptionState>( Config::getTypeUrl<envoy::config::endpoint::v3::ClusterLoadAssignment>(), callbacks_, dispatcher_, resource_decoder_); state_->updateSubscriptionInterest({"name1", "name2", "name3"}, {}); auto cur_request = getNextDiscoveryRequestAckless(); EXPECT_THAT(cur_request->resource_names(), UnorderedElementsAre("name1", "name2", "name3")); } std::unique_ptr<envoy::service::discovery::v3::DiscoveryRequest> getNextDiscoveryRequestAckless() { return state_->getNextRequestAckless(); } envoy::service::discovery::v3::Resource heartbeatResource(std::chrono::milliseconds ttl, const std::string& name) { envoy::service::discovery::v3::Resource resource; resource.mutable_ttl()->CopyFrom(Protobuf::util::TimeUtil::MillisecondsToDuration(ttl.count())); resource.set_name(name); return resource; } envoy::service::discovery::v3::Resource resourceWithTtl(std::chrono::milliseconds ttl, const envoy::config::endpoint::v3::ClusterLoadAssignment& cla) { envoy::service::discovery::v3::Resource resource; resource.mutable_resource()->PackFrom(cla); resource.mutable_ttl()->CopyFrom(Protobuf::util::TimeUtil::MillisecondsToDuration(ttl.count())); resource.set_name(cla.cluster_name()); return resource; } const envoy::config::endpoint::v3::ClusterLoadAssignment resource(const std::string& cluster_name) { envoy::config::endpoint::v3::ClusterLoadAssignment resource; resource.set_cluster_name(cluster_name); return resource; } UpdateAck deliverDiscoveryResponse(const std::vector<std::string>& resource_names, const std::string& version_info, const std::string& nonce) { envoy::service::discovery::v3::DiscoveryResponse response; response.set_version_info(version_info); response.set_nonce(nonce); response.set_type_url(Config::getTypeUrl<envoy::config::endpoint::v3::ClusterLoadAssignment>()); for (const auto& resource_name : resource_names) { response.add_resources()->PackFrom(resource(resource_name)); } EXPECT_CALL(callbacks_, onConfigUpdate(An<const std::vector<DecodedResourcePtr>&>(), version_info)); return state_->handleResponse(response); } UpdateAck deliverDiscoveryResponseWithTtlResource(const envoy::service::discovery::v3::Resource& resource, const std::string& version_info, const std::string& nonce) { envoy::service::discovery::v3::DiscoveryResponse response; response.set_version_info(version_info); response.set_nonce(nonce); response.set_type_url(Config::getTypeUrl<envoy::config::endpoint::v3::ClusterLoadAssignment>()); response.add_resources()->PackFrom(resource); EXPECT_CALL(callbacks_, onConfigUpdate(An<const std::vector<DecodedResourcePtr>&>(), version_info)); return state_->handleResponse(response); } UpdateAck deliverBadDiscoveryResponse(const std::string& version_info, const std::string& nonce) { envoy::service::discovery::v3::DiscoveryResponse message; message.set_version_info(version_info); message.set_nonce(nonce); EXPECT_CALL(callbacks_, onConfigUpdate(An<const std::vector<DecodedResourcePtr>&>(), _)) .WillOnce(Throw(EnvoyException("oh no"))); return state_->handleResponse(message); } NiceMock<MockUntypedConfigUpdateCallbacks> callbacks_; TestUtility::TestOpaqueResourceDecoderImpl<envoy::config::endpoint::v3::ClusterLoadAssignment> resource_decoder_; NiceMock<Event::MockDispatcher> dispatcher_; Event::MockTimer* ttl_timer_; // We start out interested in three resources: name1, name2, and name3. std::unique_ptr<XdsMux::SotwSubscriptionState> state_; }; // Basic gaining/losing interest in resources should lead to changes in subscriptions. TEST_F(SotwSubscriptionStateTest, SubscribeAndUnsubscribe) { { state_->updateSubscriptionInterest({"name4"}, {"name1"}); auto cur_request = getNextDiscoveryRequestAckless(); EXPECT_THAT(cur_request->resource_names(), UnorderedElementsAre("name2", "name3", "name4")); } { state_->updateSubscriptionInterest({"name1"}, {"name3", "name4"}); auto cur_request = getNextDiscoveryRequestAckless(); EXPECT_THAT(cur_request->resource_names(), UnorderedElementsAre("name1", "name2")); } } // Unlike delta, if SotW gets multiple interest updates before being able to send a request, they // all collapse to a single update. However, even if the updates all cancel each other out, there // still will be a request generated. All of the following tests explore different such cases. TEST_F(SotwSubscriptionStateTest, RemoveThenAdd) { state_->updateSubscriptionInterest({}, {"name3"}); state_->updateSubscriptionInterest({"name3"}, {}); auto cur_request = getNextDiscoveryRequestAckless(); EXPECT_THAT(cur_request->resource_names(), UnorderedElementsAre("name1", "name2", "name3")); } TEST_F(SotwSubscriptionStateTest, AddThenRemove) { state_->updateSubscriptionInterest({"name4"}, {}); state_->updateSubscriptionInterest({}, {"name4"}); auto cur_request = getNextDiscoveryRequestAckless(); EXPECT_THAT(cur_request->resource_names(), UnorderedElementsAre("name1", "name2", "name3")); } TEST_F(SotwSubscriptionStateTest, AddRemoveAdd) { state_->updateSubscriptionInterest({"name4"}, {}); state_->updateSubscriptionInterest({}, {"name4"}); state_->updateSubscriptionInterest({"name4"}, {}); auto cur_request = getNextDiscoveryRequestAckless(); EXPECT_THAT(cur_request->resource_names(), UnorderedElementsAre("name1", "name2", "name3", "name4")); } TEST_F(SotwSubscriptionStateTest, RemoveAddRemove) { state_->updateSubscriptionInterest({}, {"name3"}); state_->updateSubscriptionInterest({"name3"}, {}); state_->updateSubscriptionInterest({}, {"name3"}); auto cur_request = getNextDiscoveryRequestAckless(); EXPECT_THAT(cur_request->resource_names(), UnorderedElementsAre("name1", "name2")); } TEST_F(SotwSubscriptionStateTest, BothAddAndRemove) { state_->updateSubscriptionInterest({"name4"}, {"name1", "name2", "name3"}); state_->updateSubscriptionInterest({"name1", "name2", "name3"}, {"name4"}); state_->updateSubscriptionInterest({"name4"}, {"name1", "name2", "name3"}); auto cur_request = getNextDiscoveryRequestAckless(); EXPECT_THAT(cur_request->resource_names(), UnorderedElementsAre("name4")); } TEST_F(SotwSubscriptionStateTest, CumulativeUpdates) { state_->updateSubscriptionInterest({"name4"}, {}); state_->updateSubscriptionInterest({"name5"}, {}); auto cur_request = getNextDiscoveryRequestAckless(); EXPECT_THAT(cur_request->resource_names(), UnorderedElementsAre("name1", "name2", "name3", "name4", "name5")); } TEST_F(SotwSubscriptionStateTest, LastUpdateNonceAndVersionUsed) { EXPECT_CALL(*ttl_timer_, disableTimer()); deliverDiscoveryResponse({"name1", "name2"}, "version1", "nonce1"); state_->updateSubscriptionInterest({"name3"}, {}); auto cur_request = getNextDiscoveryRequestAckless(); EXPECT_EQ("nonce1", cur_request->response_nonce()); EXPECT_EQ("version1", cur_request->version_info()); } // Verifies that a sequence of good and bad responses from the server all get the appropriate // ACKs/NACKs from Envoy. TEST_F(SotwSubscriptionStateTest, AckGenerated) { // The xDS server's first response includes items for name1 and 2, but not 3. { EXPECT_CALL(*ttl_timer_, disableTimer()); UpdateAck ack = deliverDiscoveryResponse({"name1", "name2"}, "version1", "nonce1"); EXPECT_EQ("nonce1", ack.nonce_); EXPECT_EQ(Grpc::Status::WellKnownGrpcStatus::Ok, ack.error_detail_.code()); } // The next response updates 1 and 2, and adds 3. { EXPECT_CALL(*ttl_timer_, disableTimer()); UpdateAck ack = deliverDiscoveryResponse({"name1", "name2", "name3"}, "version2", "nonce2"); EXPECT_EQ("nonce2", ack.nonce_); EXPECT_EQ(Grpc::Status::WellKnownGrpcStatus::Ok, ack.error_detail_.code()); } // The next response tries but fails to update all 3, and so should produce a NACK. { EXPECT_CALL(*ttl_timer_, disableTimer()); UpdateAck ack = deliverBadDiscoveryResponse("version3", "nonce3"); EXPECT_EQ("nonce3", ack.nonce_); EXPECT_NE(Grpc::Status::WellKnownGrpcStatus::Ok, ack.error_detail_.code()); } // The last response successfully updates all 3. { EXPECT_CALL(*ttl_timer_, disableTimer()); UpdateAck ack = deliverDiscoveryResponse({"name1", "name2", "name3"}, "version4", "nonce4"); EXPECT_EQ("nonce4", ack.nonce_); EXPECT_EQ(Grpc::Status::WellKnownGrpcStatus::Ok, ack.error_detail_.code()); } } TEST_F(SotwSubscriptionStateTest, CheckUpdatePending) { // Note that the test fixture ctor causes the first request to be "sent", so we start in the // middle of a stream, with our initially interested resources having been requested already. EXPECT_FALSE(state_->subscriptionUpdatePending()); state_->updateSubscriptionInterest({}, {}); // no change EXPECT_FALSE(state_->subscriptionUpdatePending()); state_->markStreamFresh(); EXPECT_TRUE(state_->subscriptionUpdatePending()); // no change, BUT fresh stream state_->updateSubscriptionInterest({}, {"name3"}); // one removed EXPECT_TRUE(state_->subscriptionUpdatePending()); state_->updateSubscriptionInterest({"name3"}, {}); // one added EXPECT_TRUE(state_->subscriptionUpdatePending()); } TEST_F(SotwSubscriptionStateTest, HandleEstablishmentFailure) { // Although establishment failure is not supposed to cause an onConfigUpdateFailed() on the // ultimate actual subscription callbacks, the callbacks reference held is actually to // the WatchMap, which then calls GrpcSubscriptionImpl(s). It is the GrpcSubscriptionImpl // that will decline to pass on an onConfigUpdateFailed(ConnectionFailure). EXPECT_CALL(callbacks_, onConfigUpdateFailed(_, _)); state_->handleEstablishmentFailure(); } TEST_F(SotwSubscriptionStateTest, ResourceTTL) { Event::SimulatedTimeSystem time_system; time_system.setSystemTime(std::chrono::milliseconds(0)); { EXPECT_CALL(*ttl_timer_, enabled()); EXPECT_CALL(*ttl_timer_, enableTimer(std::chrono::milliseconds(1000), _)); deliverDiscoveryResponseWithTtlResource( resourceWithTtl(std::chrono::seconds(1), resource("name1")), "debug1", "nonce1"); } { // Increase the TTL. EXPECT_CALL(*ttl_timer_, enabled()); EXPECT_CALL(*ttl_timer_, enableTimer(std::chrono::milliseconds(2000), _)); deliverDiscoveryResponseWithTtlResource( resourceWithTtl(std::chrono::seconds(2), resource("name1")), "debug1", "nonce1"); } { // Refresh the TTL with a heartbeat. The resource should not be passed to the update callbacks. EXPECT_CALL(*ttl_timer_, enabled()); deliverDiscoveryResponseWithTtlResource(heartbeatResource(std::chrono::seconds(2), "name1"), "debug1", "nonce1"); } // Remove the TTL. EXPECT_CALL(*ttl_timer_, disableTimer()); deliverDiscoveryResponse({"name1"}, "version1", "nonce1"); // Add back the TTL. EXPECT_CALL(*ttl_timer_, enabled()); EXPECT_CALL(*ttl_timer_, enableTimer(_, _)); deliverDiscoveryResponseWithTtlResource( resourceWithTtl(std::chrono::seconds(2), resource("name1")), "debug1", "nonce1"); EXPECT_CALL(callbacks_, onConfigUpdate(_, _, _)); EXPECT_CALL(*ttl_timer_, disableTimer()); time_system.setSystemTime(std::chrono::seconds(2)); // Invoke the TTL. ttl_timer_->invokeCallback(); } TEST_F(SotwSubscriptionStateTest, TypeUrlMismatch) { envoy::service::discovery::v3::DiscoveryResponse response; response.set_version_info("version1"); response.set_nonce("nonce1"); response.set_type_url("badtypeurl"); response.add_resources()->PackFrom(resource("resource")); EXPECT_CALL(callbacks_, onConfigUpdateFailed(Envoy::Config::ConfigUpdateFailureReason::UpdateRejected, _)) .WillOnce(Invoke([](Envoy::Config::ConfigUpdateFailureReason, const EnvoyException* e) { EXPECT_TRUE(IsSubstring( "", "", "type URL type.googleapis.com/envoy.config.endpoint.v3.ClusterLoadAssignment embedded " "in an individual Any does not match the message-wide type URL badtypeurl", e->what())); })); EXPECT_CALL(*ttl_timer_, disableTimer()); state_->handleResponse(response); } } // namespace } // namespace Config } // namespace Envoy
4,911
1,350
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. // Code generated by Microsoft (R) AutoRest Code Generator. package com.azure.resourcemanager.postgresql.models; import com.azure.core.util.Context; import com.azure.resourcemanager.postgresql.fluent.models.ServerKeyInner; import java.time.OffsetDateTime; /** An immutable client-side representation of ServerKey. */ public interface ServerKey { /** * Gets the id property: Fully qualified resource Id for the resource. * * @return the id value. */ String id(); /** * Gets the name property: The name of the resource. * * @return the name value. */ String name(); /** * Gets the type property: The type of the resource. * * @return the type value. */ String type(); /** * Gets the kind property: Kind of encryption protector used to protect the key. * * @return the kind value. */ String kind(); /** * Gets the serverKeyType property: The key type like 'AzureKeyVault'. * * @return the serverKeyType value. */ ServerKeyType serverKeyType(); /** * Gets the uri property: The URI of the key. * * @return the uri value. */ String uri(); /** * Gets the creationDate property: The key creation date. * * @return the creationDate value. */ OffsetDateTime creationDate(); /** * Gets the inner com.azure.resourcemanager.postgresql.fluent.models.ServerKeyInner object. * * @return the inner object. */ ServerKeyInner innerModel(); /** The entirety of the ServerKey definition. */ interface Definition extends DefinitionStages.Blank, DefinitionStages.WithParentResource, DefinitionStages.WithCreate { } /** The ServerKey definition stages. */ interface DefinitionStages { /** The first stage of the ServerKey definition. */ interface Blank extends WithParentResource { } /** The stage of the ServerKey definition allowing to specify parent resource. */ interface WithParentResource { /** * Specifies serverName, resourceGroupName. * * @param serverName The name of the server. * @param resourceGroupName The name of the resource group. The name is case insensitive. * @return the next definition stage. */ WithCreate withExistingServer(String serverName, String resourceGroupName); } /** * The stage of the ServerKey definition which contains all the minimum required properties for the resource to * be created, but also allows for any other optional properties to be specified. */ interface WithCreate extends DefinitionStages.WithServerKeyType, DefinitionStages.WithUri { /** * Executes the create request. * * @return the created resource. */ ServerKey create(); /** * Executes the create request. * * @param context The context to associate with this operation. * @return the created resource. */ ServerKey create(Context context); } /** The stage of the ServerKey definition allowing to specify serverKeyType. */ interface WithServerKeyType { /** * Specifies the serverKeyType property: The key type like 'AzureKeyVault'.. * * @param serverKeyType The key type like 'AzureKeyVault'. * @return the next definition stage. */ WithCreate withServerKeyType(ServerKeyType serverKeyType); } /** The stage of the ServerKey definition allowing to specify uri. */ interface WithUri { /** * Specifies the uri property: The URI of the key.. * * @param uri The URI of the key. * @return the next definition stage. */ WithCreate withUri(String uri); } } /** * Begins update for the ServerKey resource. * * @return the stage of resource update. */ ServerKey.Update update(); /** The template for ServerKey update. */ interface Update extends UpdateStages.WithServerKeyType, UpdateStages.WithUri { /** * Executes the update request. * * @return the updated resource. */ ServerKey apply(); /** * Executes the update request. * * @param context The context to associate with this operation. * @return the updated resource. */ ServerKey apply(Context context); } /** The ServerKey update stages. */ interface UpdateStages { /** The stage of the ServerKey update allowing to specify serverKeyType. */ interface WithServerKeyType { /** * Specifies the serverKeyType property: The key type like 'AzureKeyVault'.. * * @param serverKeyType The key type like 'AzureKeyVault'. * @return the next definition stage. */ Update withServerKeyType(ServerKeyType serverKeyType); } /** The stage of the ServerKey update allowing to specify uri. */ interface WithUri { /** * Specifies the uri property: The URI of the key.. * * @param uri The URI of the key. * @return the next definition stage. */ Update withUri(String uri); } } /** * Refreshes the resource to sync with Azure. * * @return the refreshed resource. */ ServerKey refresh(); /** * Refreshes the resource to sync with Azure. * * @param context The context to associate with this operation. * @return the refreshed resource. */ ServerKey refresh(Context context); }
2,438
5,908
package com.example; import java.util.stream.Collectors; import lombok.RequiredArgsConstructor; import lombok.SneakyThrows; import org.apache.solr.client.solrj.SolrClient; import org.apache.solr.client.solrj.SolrQuery; import org.apache.solr.client.solrj.response.QueryResponse; import org.apache.solr.client.solrj.util.ClientUtils; import org.apache.solr.common.SolrDocument; @RequiredArgsConstructor public class SolrSearchEngine implements SearchEngine { public static final String COLLECTION_NAME = "products"; private final SolrClient client; @SneakyThrows public SearchResult search(String term) { SolrQuery query = new SolrQuery(); query.setQuery("title:" + ClientUtils.escapeQueryChars(term)); QueryResponse response = client.query(COLLECTION_NAME, query); return createResult(response); } private SearchResult createResult(QueryResponse response) { return SearchResult.builder() .totalHits(response.getResults().getNumFound()) .results(response.getResults() .stream() .map(SolrDocument::getFieldValueMap) .collect(Collectors.toList())) .build(); } }
464
2,134
<filename>proto-vjoystick/vjoystick.h /* Copyright (c) 2016, <NAME> <<EMAIL>> * Permission to use, copy, modify, and/or distribute this * software for any purpose with or without fee is hereby granted, * provided that the above copyright notice and this permission * notice appear in all copies. */ #pragma once #include "ui_vjoystick.h" #include "api/plugin-api.hpp" #include "compat/macros.hpp" enum status { }; class vjoystick : public TR, public IProtocol { Q_OBJECT public: vjoystick(); ~vjoystick() override; module_status initialize() override; void pose(const double* headpose, const double*) override; QString game_name() override { return tr("Virtual joystick"); } private: long axis_min[6] {}; long axis_max[6] {}; [[nodiscard]] bool init(); int to_axis_value(unsigned axis_id, double val) const; static constexpr unsigned axis_count = 6; static const unsigned char axis_ids[axis_count]; bool status = false; bool first_run = true; }; class vjoystick_dialog final : public IProtocolDialog { Q_OBJECT public: vjoystick_dialog(); void register_protocol(IProtocol *) override {} void unregister_protocol() override {} private: Ui::vjoystick ui; }; class vjoystick_metadata : public Metadata { Q_OBJECT QString name() override { return tr("Joystick emulation -- vjoystick"); } QIcon icon() override { return QIcon(":/images/vjoystick.png"); } };
503
831
<reponame>phpc0de/idea-android<gh_stars>100-1000 /* * Copyright (C) 2017 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.android.tools.idea.editors.strings; import com.android.SdkConstants; import com.android.ide.common.resources.ValueXmlHelper; import com.android.tools.idea.rendering.Locale; import com.intellij.openapi.project.Project; import com.intellij.openapi.vfs.VirtualFile; import com.intellij.psi.xml.XmlFile; import com.intellij.psi.xml.XmlTag; import org.jetbrains.annotations.NotNull; import org.jetbrains.annotations.Nullable; final class StringPsiUtils { private StringPsiUtils() { } @Nullable static XmlFile getDefaultStringResourceFile(@NotNull Project project, @NotNull StringResourceKey key) { VirtualFile directory = key.getDirectory(); return directory == null ? null : StringsWriteUtils.getStringResourceFile(project, directory, null); } @Nullable static XmlFile getStringResourceFile(@NotNull Project project, @NotNull StringResourceKey key, @NotNull Locale locale) { VirtualFile directory = key.getDirectory(); return directory == null ? null : StringsWriteUtils.getStringResourceFile(project, directory, locale); } static void addString(@NotNull XmlFile file, @NotNull StringResourceKey key, @NotNull String value) { addString(file, key, true, value); } static void addString(@NotNull XmlFile file, @NotNull StringResourceKey key, boolean translatable, @NotNull String value) { XmlTag resources = file.getRootTag(); if (resources == null) { return; } XmlTag string = resources.createChildTag(SdkConstants.TAG_STRING, resources.getNamespace(), escape(value), false); string.setAttribute(SdkConstants.ATTR_NAME, key.getName()); if (!translatable) { string.setAttribute(SdkConstants.ATTR_TRANSLATABLE, Boolean.FALSE.toString()); } resources.addSubTag(string, false); } @NotNull private static String escape(@NotNull String value) { try { return ValueXmlHelper.escapeResourceStringAsXml(value); } catch (IllegalArgumentException exception) { // The invalid XML will be underlined in the editor return value; } } }
867
323
<reponame>humstarman/linkerd-examples<gh_stars>100-1000 package io.buoyant.http.classifiers; import io.buoyant.linkerd.ResponseClassifierInitializer; /** * This config initializer is loaded by linkerd at startup and registers the * `HeaderClassifierConfig` class under the id "io.buoyant.headerClassifier". * This tells linkerd's config system to deserialize response classifier config * blocks to `HeaderClassifierConfig` if the kind is * "io.buoyant.headerClassifer". * * In order for linkerd to load this class, it must be listed in the * `META-INF/services/io.l5d.linkerd.ResponseClassifierInitializer` file. */ public class HeaderClassifierInitializer extends ResponseClassifierInitializer { @Override public String configId() { return "io.buoyant.headerClassifier"; } @Override public Class<?> configClass() { return HeaderClassifierConfig.class; } }
287
360
/* * Copyright (c) 2020 Huawei Technologies Co.,Ltd. * * openGauss is licensed under Mulan PSL v2. * You can use this software according to the terms and conditions of the Mulan PSL v2. * You may obtain a copy of Mulan PSL v2 at: * * http://license.coscl.org.cn/MulanPSL2 * * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. * See the Mulan PSL v2 for more details. * ------------------------------------------------------------------------- * * joinskewinfo.cpp * functions for joinskew solution in MPP * * * IDENTIFICATION * src/gausskernel/optimizer/util/joinskewinfo.cpp * * ------------------------------------------------------------------------- */ #include "postgres.h" #include "knl/knl_variable.h" #include <math.h> #include "catalog/pg_statistic.h" #include "distributelayer/streamCore.h" #include "executor/executor.h" #include "nodes/makefuncs.h" #include "nodes/nodeFuncs.h" #include "optimizer/clauses.h" #include "optimizer/cost.h" #include "optimizer/dataskew.h" #include "optimizer/optimizerdebug.h" #include "optimizer/pathnode.h" #include "optimizer/planmain.h" #include "optimizer/restrictinfo.h" #include "optimizer/tlist.h" #include "optimizer/var.h" #include "parser/parse_type.h" #include "parser/parsetree.h" #include "utils/lsyscache.h" #include "utils/selfuncs.h" #include "utils/syscache.h" #include "utils/typcache.h" #include "vecexecutor/vecexecutor.h" #define IS_JOIN_OUTER(jointype) \ (JOIN_LEFT == (jointype) || JOIN_RIGHT == (jointype) || JOIN_FULL == (jointype) || \ JOIN_LEFT_ANTI_FULL == (jointype) || JOIN_RIGHT_ANTI_FULL == (jointype)) #define IS_JOIN_PLAN(plan) (IsA(plan, HashJoin) || IsA(plan, NestLoop) || IsA(plan, MergeJoin)) /* ===================== Functions for join skew info ====================== */ JoinSkewInfo::JoinSkewInfo( PlannerInfo* root, RelOptInfo* rel, List* join_clause, JoinType join_type, JoinType save_join_type) : SkewInfo(root), m_joinType(join_type), m_saveJoinType(save_join_type), m_joinClause(join_clause) { m_innerStreamInfo = NULL; m_outerStreamInfo = NULL; m_innerSkewInfo = NIL; m_outerSkewInfo = NIL; m_skewInfo = NULL; m_distribution = NULL; m_isOuterStream = false; m_rel = rel; m_skewType = SKEW_JOIN; } /* * @Description: destructor function for join skew info. */ JoinSkewInfo::~JoinSkewInfo() { m_joinClause = NULL; m_distribution = NULL; m_innerStreamInfo = NULL; m_outerStreamInfo = NULL; m_innerSkewInfo = NULL; m_outerSkewInfo = NULL; m_skewInfo = NULL; } /* * @Description: set stream info. * * @param[IN] inner_stream_info: stream info of inner side. * @param[IN] outer_stream_info: stream info of outer side. * @return void */ void JoinSkewInfo::setStreamInfo( StreamInfo* inner_stream_info, StreamInfo* outer_stream_info, Distribution* distribution) { m_innerStreamInfo = inner_stream_info; m_outerStreamInfo = outer_stream_info; m_distribution = distribution; } /* * @Description: main entrance to find stream skew info. * * @return void */ uint32 JoinSkewInfo::findStreamSkewInfo() { m_oldContext = MemoryContextSwitchTo(m_context); /* Check if these kinds of streams are possible to cause skew problem. */ if (checkSkewPossibility(false) == false && checkSkewPossibility(true) == false) { MemoryContextSwitchTo(m_oldContext); return SKEW_RES_NONE; } /* Find skew value from base relation, including null values. */ findBaseSkewInfo(); /* Find skew null value generate by outer join (not base relation). */ findNullSkewInfo(); /* Add qual cost and check if null data is need. */ addQualSkewInfo(); /* Add skew info to stream info. */ addToStreamInfo(); /* Get result. */ uint32 ret = getSkewInfo(); /* Skew info is reused, so reset the status and memory each time. */ resetSkewInfo(); return ret; } /* * @Description: main entrance to find skew info from base relation. * * @return void */ void JoinSkewInfo::findBaseSkewInfo() { /* Find skew values for both sides. */ findBaseSkewValues(false); findBaseSkewValues(true); /* Add skew info for both sides. */ addSkewInfoBothSides(); } /* * @Description: the main entrance for null skew caused by outer join. * Take 'select A.a1, B.b1 from A left join B on A.a0 = B.b0;' * as an example, when some data in a0 dose not match any data * in b0, then we out put data like (a1, NULL) as result. * Actually there must be many data can not match and generate * NULL result in real situation, which will cause NULL value * skew in later hash redistribution. * * @return void */ void JoinSkewInfo::findNullSkewInfo() { findSubNullSkew(true); findSubNullSkew(false); } /* * @Description: add qual cost and check if null data is need. * * @return void */ void JoinSkewInfo::addQualSkewInfo() { addQualCost(false); addQualCost(true); } /* * @Description: add skew info to stream info. * * @return void */ void JoinSkewInfo::addToStreamInfo() { /* * Hybrid stream type is needed for skew optimization which includes * PART_REDISTRIBUTE_PART_BROADCAST, * PART_REDISTRIBUTE_PART_ROUNDROBIN, * PART_REDISTRIBUTE_PART_LOCAL, * PART_LOCAL_PART_BROADCAST. */ if (list_length(m_innerSkewInfo) > 0) { if (checkRedundant(false)) { m_innerStreamInfo->ssinfo = NIL; m_innerStreamInfo->type = STREAM_NONE; } else { m_innerStreamInfo->ssinfo = m_innerSkewInfo; m_innerStreamInfo->type = STREAM_HYBRID; } } if (list_length(m_outerSkewInfo) > 0) { if (checkRedundant(true)) { m_outerStreamInfo->ssinfo = NIL; m_outerStreamInfo->type = STREAM_NONE; } else { m_outerStreamInfo->ssinfo = m_outerSkewInfo; m_outerStreamInfo->type = STREAM_HYBRID; } } if (m_outerStreamInfo->type != STREAM_HYBRID && m_innerStreamInfo->type != STREAM_HYBRID) { m_hasStatSkew = false; m_hasHintSkew = false; m_hasRuleSkew = false; } } /* * @Description: find skew values from base relation. * * @param[IN] stream_outer: true -- this stream is outer side of join. * @return void */ void JoinSkewInfo::findBaseSkewValues(bool stream_outer) { StreamInfo* sinfo = stream_outer ? m_outerStreamInfo : m_innerStreamInfo; m_skewInfo = stream_outer ? &m_outerSkewInfo : &m_innerSkewInfo; m_subrel = sinfo->subpath->parent; /* Data skew only occur when we hash-redistribute data. */ if (checkSkewPossibility(stream_outer) == false) { *m_skewInfo = NIL; m_distributeKeys = NIL; return; } m_distributeKeys = sinfo->stream_keys; m_dop = sinfo->smpDesc.consumerDop; m_isMultiCol = (list_length(m_distributeKeys) > 1); /* When distribute key include multiple column. */ if (m_isMultiCol) { *m_skewInfo = findMultiColSkewValues(); updateMultiColSkewness(sinfo, *m_skewInfo); } else { *m_skewInfo = findSingleColSkewValues(); } processSkewValue(*m_skewInfo); } /* * @Description: find null skew of one side of join caused by outer join. * * @param[IN] is_outer: true -- is the outer side of join. * @return void */ void JoinSkewInfo::findSubNullSkew(bool is_outer) { StreamInfo* sinfo = is_outer ? m_outerStreamInfo : m_innerStreamInfo; m_distributeKeys = sinfo->stream_keys; m_dop = sinfo->smpDesc.consumerDop; m_skewInfo = is_outer ? &m_outerSkewInfo : &m_innerSkewInfo; if (checkSkewPossibility(is_outer) == false) return; traverseSubPath(sinfo->subpath); } /* * @Description: after skew value found in join's distribute column, * we need to transfer the skew values to useful skew * optimization info. * * @return void */ void JoinSkewInfo::addSkewInfoBothSides() { List* inner_equal_keys = NIL; List* outer_equal_keys = NIL; List* inner_qual_list = NIL; List* outer_qual_list = NIL; List* tmp_list = NIL; if (m_innerSkewInfo == NIL && m_outerSkewInfo == NIL) { printSkewOptimizeDetail("No skew info is found in base rel."); return; } /* If we find the same skew value at both side, then delete one of them. */ deleteDuplicateSkewValue(); /* Check if we can optimize the skew problem. */ deleteUnoptimzeSkew(false); deleteUnoptimzeSkew(true); /* Find keys equal to the skew keys. */ inner_equal_keys = findOtherSidekeys(true); outer_equal_keys = findOtherSidekeys(false); if (inner_equal_keys == NIL || outer_equal_keys == NIL) { m_innerSkewInfo = NIL; m_outerSkewInfo = NIL; return; } /* Create quals for skew side. */ inner_qual_list = createSkewQuals(false); outer_qual_list = createSkewQuals(true); /* * If we find skew values at inner side, we need also add qual to outer side. * Because we need to find the value at outer side to broadcast it. */ tmp_list = addSkewInfoToOtherSide(false, outer_equal_keys); outer_qual_list = list_concat(outer_qual_list, tmp_list); tmp_list = addSkewInfoToOtherSide(true, inner_equal_keys); inner_qual_list = list_concat(inner_qual_list, tmp_list); m_innerSkewInfo = inner_qual_list; m_outerSkewInfo = outer_qual_list; } /* * @Description: when both sides of join have the same skew value, we just keep * the more skew side. For example: * select * from t1, t2 where t1.b = t2.b; (t1(hash a), t2(hash a)) * Then we find that t1.b has skew value X, and t2.b has skew value * X too. Thus we can only keep one side of skew value X, when we * find that the num of t1.b(X) is more than t2.b(X), then we take * t1.b(X) as skew value and t2.b(X) as non-skew value. So, t2.b(V) * is deleted from skew list. * * return void */ void JoinSkewInfo::deleteDuplicateSkewValue() { if (m_innerSkewInfo == NIL || m_outerSkewInfo == NIL) return; if (m_isMultiCol) deleteDuplicateMultiColSkewValue(); else deleteDuplicateSingleColSkewValue(); } /* * @Description: delete duplicate skew value from skew list. * * return void */ void JoinSkewInfo::deleteDuplicateSingleColSkewValue() { ListCell* lc1 = NULL; ListCell* lc2 = NULL; ColSkewInfo* cs1 = NULL; ColSkewInfo* cs2 = NULL; lc1 = m_innerSkewInfo->head; while (lc1 != NULL) { cs1 = (ColSkewInfo*)lfirst(lc1); lc1 = lc1->next; /* We have already delete all skew info at outer side. */ if (m_outerSkewInfo == NIL) return; /* For null skew, no need to compare. */ if (needPartBroadcast(cs1) == false) continue; lc2 = m_outerSkewInfo->head; while (lc2 != NULL) { cs2 = (ColSkewInfo*)lfirst(lc2); lc2 = lc2->next; /* For null skew, no need to compare. */ if (needPartBroadcast(cs2) == false) continue; /* If match the skew value, then only keep the more skew side. */ if (isSingleColSkewValueEqual(cs1, cs2)) { /* Keep the side which has more skew data ot which is set by hint. */ if (findMoreSkewSideForSingleCol(cs1, cs2)) { m_outerSkewInfo = list_delete_ptr(m_outerSkewInfo, (void*)cs2); printSkewOptimizeDetail("Duplicate skew value is found at both side of join," " and the outer side is less skew, so delete it."); } else { m_innerSkewInfo = list_delete_ptr(m_innerSkewInfo, (void*)cs1); printSkewOptimizeDetail("Duplicate skew value is found at both side of join," " and the inner side is less skew, so delete it."); } } } } } /* * @Description: delete duplicate skew value from skew list. * * return void */ void JoinSkewInfo::deleteDuplicateMultiColSkewValue() { ListCell* lc1 = NULL; ListCell* lc2 = NULL; MultiColSkewInfo* mcs1 = NULL; MultiColSkewInfo* mcs2 = NULL; bool equalconst = false; lc1 = m_innerSkewInfo->head; while (lc1 != NULL) { mcs1 = (MultiColSkewInfo*)lfirst(lc1); lc1 = lc1->next; /* If there is no skew values, which means it is a null skew. */ if (needPartBroadcast(mcs1) == false) continue; /* We have already delete all skew info at outer side. */ if (m_outerSkewInfo == NIL) return; lc2 = m_outerSkewInfo->head; while (lc2 != NULL) { mcs2 = (MultiColSkewInfo*)lfirst(lc2); lc2 = lc2->next; /* Null skew info. */ if (needPartBroadcast(mcs2) == false) continue; /* Try to match all skew values. */ equalconst = isMultiColSkewValueEqual(mcs1, mcs2); /* If all match, then only keep the more skew side. */ if (equalconst) { /* Keep the side which has more skew data or which is set by hint. */ if (findMoreSkewSideForMultiCol(mcs1, mcs2)) { m_outerSkewInfo = list_delete_ptr(m_outerSkewInfo, (void*)mcs2); printSkewOptimizeDetail("Duplicate skew value is found at both side of join," " and the outer side is less skew, so delete it."); } else { m_innerSkewInfo = list_delete_ptr(m_innerSkewInfo, (void*)mcs1); printSkewOptimizeDetail("Duplicate skew value is found at both side of join," " and the inner side is less skew, so delete it."); } } } } } /* * @Description: Find out which side has more skew data or which is set by hint. * * return true if cs1 has more skew data, false other way. */ bool JoinSkewInfo::findMoreSkewSideForSingleCol(ColSkewInfo* cs1, ColSkewInfo* cs2) const { /* Both tables are set by hint. */ if (cs1->mcv_ratio < 0 && cs2->mcv_ratio < 0) { if ((m_innerStreamInfo->subpath->parent->rows > m_outerStreamInfo->subpath->parent->rows)) { cs1->mcv_op_ratio = 1; return true; } else { cs2->mcv_op_ratio = 1; return false; } } /* Neither table is set by hint. */ if (cs1->mcv_ratio >= 0 && cs2->mcv_ratio >= 0) { if ((m_innerStreamInfo->subpath->parent->rows * cs1->mcv_ratio > m_outerStreamInfo->subpath->parent->rows * cs2->mcv_ratio)) { cs1->mcv_op_ratio = cs2->mcv_ratio; return true; } else { cs2->mcv_op_ratio = cs1->mcv_ratio; return false; } } /* Only one of the table is set by hint. */ if (cs1->mcv_ratio < 0) { cs1->mcv_op_ratio = cs2->mcv_ratio; return true; } else { cs2->mcv_op_ratio = cs1->mcv_ratio; return false; } return true; } /* * @Description: Find out which side has more skew data or which is set by hint. * * return true if mcs1 has more skew data, false other way. */ bool JoinSkewInfo::findMoreSkewSideForMultiCol(MultiColSkewInfo* mcs1, MultiColSkewInfo* mcs2) const { /* Both tables are set by hint. */ if (mcs1->mcv_ratio < 0 && mcs2->mcv_ratio < 0) { if ((m_innerStreamInfo->subpath->parent->rows > m_outerStreamInfo->subpath->parent->rows)) { mcs1->mcv_op_ratio = 1; return true; } else { mcs2->mcv_op_ratio = 1; return false; } } /* Neither table is set by hint. */ if (mcs1->mcv_ratio >= 0 && mcs2->mcv_ratio >= 0) { if ((m_innerStreamInfo->subpath->parent->rows * mcs1->mcv_ratio > m_outerStreamInfo->subpath->parent->rows * mcs2->mcv_ratio)) { mcs1->mcv_op_ratio = mcs2->mcv_ratio; return true; } else { mcs2->mcv_op_ratio = mcs1->mcv_ratio; return false; } } /* Only one of the table is set by hint. */ if (mcs1->mcv_ratio < 0) { mcs1->mcv_op_ratio = mcs2->mcv_ratio; return true; } else { mcs2->mcv_op_ratio = mcs1->mcv_ratio; return false; } return true; } /* * @Description: Even we find skew values from statistic or hint, we can not * solve this problem now. So delete them from skew list. * * @param[IN] is_outer: this side is outer side. * @return void */ void JoinSkewInfo::deleteUnoptimzeSkew(bool is_outer) { List* skewInfo = is_outer ? m_outerSkewInfo : m_innerSkewInfo; List* nullSkewInfo = NIL; ListCell* lc = NULL; if (skewInfo == NIL) return; if (checkSkewOptimization(is_outer) == false) { foreach(lc, skewInfo) { if (m_isMultiCol) { MultiColSkewInfo* mcsinfo = (MultiColSkewInfo*)lfirst(lc); /* Null skew do not need to broadcast the other side, so we can keep it. */ if (mcsinfo->is_null) nullSkewInfo = lappend(nullSkewInfo, mcsinfo); } else { ColSkewInfo* csinfo = (ColSkewInfo*)lfirst(lc); if (csinfo->is_null) nullSkewInfo = lappend(nullSkewInfo, csinfo); } } if (is_outer) m_outerSkewInfo = nullSkewInfo; else m_innerSkewInfo = nullSkewInfo; } } /* * @Description: we need to compare the input data with the skew value * during execution stage, so we should generate equal * compare expression between the skew column data and * the skew values. * * @param[IN] is_stream_outer: this side is outer side. * @return List*: equal operation list. */ List* JoinSkewInfo::createSkewQuals(bool is_stream_outer) { if (m_isMultiCol) return createMultiColSkewQuals(is_stream_outer); else return createSingleColSkewQuals(is_stream_outer); } /* * @Description: generate equal compare expression for single skew column. * * @param[IN] is_stream_outer: this side is outer side. * @return List*: equal operation list. */ List* JoinSkewInfo::createSingleColSkewQuals(bool is_stream_outer) { List* ssinfo = is_stream_outer ? m_outerSkewInfo : m_innerSkewInfo; List* skew_quals = NIL; List* quals = NIL; ListCell* lc = NULL; ColSkewInfo* csinfo = NULL; QualSkewInfo* qsinfo = NULL; if (ssinfo == NIL) return NIL; foreach(lc, ssinfo) { csinfo = (ColSkewInfo*)lfirst(lc); if (csinfo->is_null || (csinfo->value && csinfo->value->constisnull)) { NullTest* nulltest = NULL; nulltest = makeNullTest(IS_NULL, (Expr*)csinfo->var); quals = lappend(quals, (void*)nulltest); } else { OpExpr* op = NULL; op = createEqualExprForSkew((Node*)csinfo->var, csinfo->value); quals = lappend(quals, (void*)op); } } if (quals != NIL) { qsinfo = makeNode(QualSkewInfo); if (list_length(quals) > 1) { Expr* expr = makeBoolExpr(OR_EXPR, quals, -1); qsinfo->skew_quals = lappend(qsinfo->skew_quals, (void*)expr); } else { qsinfo->skew_quals = quals; } /* * If the producer threads less than consumer threads, * we need round robin to make sure data has been transfered to * each consumer threads evenly. */ qsinfo->skew_stream_type = PART_REDISTRIBUTE_PART_ROUNDROBIN; skew_quals = lappend(skew_quals, (void*)qsinfo); } return skew_quals; } /* * @Description: generate equal compare expression for multi skew column. * * @param[IN] is_stream_outer: this side is outer side. * @return List*: equal operation list. */ List* JoinSkewInfo::createMultiColSkewQuals(bool is_stream_outer) { List* ssinfo = is_stream_outer ? m_outerSkewInfo : m_innerSkewInfo; List* skew_qual_list = NIL; List* or_quals = NIL; ListCell* lc1 = NULL; ListCell* lc2 = NULL; MultiColSkewInfo* mcsinfo = NULL; QualSkewInfo* qsinfo = NULL; Const* con = NULL; Node* node = NULL; Expr* expr = NULL; if (ssinfo == NIL) { return NIL; } foreach(lc1, ssinfo) { mcsinfo = (MultiColSkewInfo*)lfirst(lc1); NullTest* nulltest = NULL; List* and_quals = NIL; if (mcsinfo->is_null) { foreach(lc2, mcsinfo->vars) { nulltest = makeNullTest(IS_NULL, (Expr*)lfirst(lc2)); and_quals = lappend(and_quals, nulltest); } expr = makeBoolExpr(AND_EXPR, and_quals, -1); or_quals = lappend(or_quals, expr); } else { OpExpr* op = NULL; int nvars = list_length(mcsinfo->vars); for (int i = 0; i < nvars; i++) { node = (Node*)list_nth(mcsinfo->vars, i); con = (Const*)list_nth(mcsinfo->values, i); /* * If we combine single skew values to a multiple vale, * one of these values may be null. */ if (con == NULL || con->constisnull) { nulltest = makeNullTest(IS_NULL, (Expr*)node); and_quals = lappend(and_quals, nulltest); } else { op = createEqualExprForSkew(node, con); and_quals = lappend(and_quals, (void*)op); } } expr = makeBoolExpr(AND_EXPR, and_quals, -1); or_quals = lappend(or_quals, expr); } } if (or_quals != NIL) { qsinfo = makeNode(QualSkewInfo); if (list_length(or_quals) > 1) { expr = makeBoolExpr(OR_EXPR, or_quals, -1); qsinfo->skew_quals = lappend(qsinfo->skew_quals, (void*)expr); } else { qsinfo->skew_quals = or_quals; } /* * If the producer threads less than consumer threads, * we need round robin to make sure data has been transfered to * each consumer threads evenly. */ qsinfo->skew_stream_type = PART_REDISTRIBUTE_PART_ROUNDROBIN; skew_qual_list = lappend(skew_qual_list, qsinfo); } return skew_qual_list; } /* * @Description: when we find a skew value at one side of join, * then we need to add relate info to the other side. * For example, when a skew value A is found in the * outer side of join's column a1, we keep all data * which equal A to local DN. Then the inner side * should broadcast the data equal A to all DNs. * * @param[IN] is_outer_skew: the skew side is outer side. * @return List*: skew info list. */ List* JoinSkewInfo::addSkewInfoToOtherSide(bool is_outer_skew, List* other_keys) { List* ssinfo = is_outer_skew ? m_outerSkewInfo : m_innerSkewInfo; if (ssinfo == NIL) return NIL; if (m_isMultiCol) return addMultiColSkewInfoToOtherSide(is_outer_skew, other_keys); else return addSingleColSkewInfoToOtherSide(is_outer_skew, other_keys); } /* * @Description: add skew info of single column for the other side. * * @param[IN] is_outer_skew: the skew side is outer side. * @return List*: skew info list. */ List* JoinSkewInfo::addSingleColSkewInfoToOtherSide(bool is_outer_skew, List* other_keys) { List* ssinfo = is_outer_skew ? m_outerSkewInfo : m_innerSkewInfo; StreamInfo* other_sinfo = is_outer_skew ? m_innerStreamInfo : m_outerStreamInfo; List* skew_quals = NIL; List* quals = NIL; ListCell* lc = NULL; OpExpr* op = NULL; Node* key = NULL; ColSkewInfo* csinfo = NULL; QualSkewInfo* qsinfo = NULL; double broadcast_ratio = 0.0; if (ssinfo == NIL) return NIL; /* * If the other side is replicate, we dont need to add skew info. * However, there is a special situation when we add a redistribute * on replicate table to do hash filter. */ if (other_sinfo->type == STREAM_BROADCAST || (other_sinfo->type == STREAM_NONE && is_replicated_path(other_sinfo->subpath))) return NIL; if (other_sinfo->type == STREAM_REDISTRIBUTE && other_sinfo->smpDesc.distriType == LOCAL_BROADCAST) return NIL; key = (Node*)linitial(other_keys); foreach(lc, ssinfo) { csinfo = (ColSkewInfo*)lfirst(lc); /* No need to broadcast skew value when it is null */ if (needPartBroadcast(csinfo) == false) continue; broadcast_ratio += csinfo->mcv_op_ratio; op = createEqualExprForSkew(key, csinfo->value); quals = lappend(quals, op); } if (quals != NIL) { qsinfo = makeNode(QualSkewInfo); qsinfo->skew_stream_type = chooseStreamForNoSkewSide(other_sinfo); qsinfo->broadcast_ratio = broadcast_ratio; if (list_length(quals) > 1) { Expr* expr = makeBoolExpr(OR_EXPR, quals, -1); qsinfo->skew_quals = lappend(qsinfo->skew_quals, (void*)expr); } else { qsinfo->skew_quals = quals; } skew_quals = lappend(skew_quals, (void*)qsinfo); } return skew_quals; } /* * @Description: add skew info of multi column for the other side. * * @param[IN] is_outer_skew: the skew side is outer side. * @return List*: skew info list. */ List* JoinSkewInfo::addMultiColSkewInfoToOtherSide(bool is_outer_skew, List* other_keys) { List* ssinfo = is_outer_skew ? m_outerSkewInfo : m_innerSkewInfo; StreamInfo* other_sinfo = is_outer_skew ? m_innerStreamInfo : m_outerStreamInfo; List* skew_quals = NIL; List* or_quals = NIL; List* and_quals = NIL; ListCell* lc1 = NULL; NullTest* nulltest = NULL; OpExpr* op = NULL; Node* equal_var = NULL; Const* con = NULL; Expr* expr = NULL; MultiColSkewInfo* mcsinfo = NULL; QualSkewInfo* qsinfo = NULL; double broadcast_ratio = 0.0; int i; if (ssinfo == NIL) return NIL; /* * redistribute(skew) + broadcast will occur in nodegroup. * We dont need to do any extra work for the broadcast side. */ if (other_sinfo->type == STREAM_BROADCAST) return NIL; if (other_sinfo->type == STREAM_REDISTRIBUTE && other_sinfo->smpDesc.distriType == LOCAL_BROADCAST) return NIL; foreach(lc1, ssinfo) { mcsinfo = (MultiColSkewInfo*)lfirst(lc1); /* No need to broadcast skew value when it is null */ if (needPartBroadcast(mcsinfo) == false) continue; i = 0; and_quals = NIL; broadcast_ratio += mcsinfo->mcv_op_ratio; int nvar = list_length(other_keys); for (i = 0; i < nvar; i++) { equal_var = (Node*)list_nth(other_keys, i); con = (Const*)list_nth(mcsinfo->values, i); if (con == NULL || con->constisnull) { nulltest = makeNullTest(IS_NULL, (Expr*)equal_var); and_quals = lappend(and_quals, nulltest); } else { op = createEqualExprForSkew(equal_var, con); and_quals = lappend(and_quals, op); } } expr = makeBoolExpr(AND_EXPR, and_quals, -1); or_quals = lappend(or_quals, expr); } if (or_quals != NIL) { qsinfo = makeNode(QualSkewInfo); qsinfo->skew_stream_type = chooseStreamForNoSkewSide(other_sinfo); qsinfo->broadcast_ratio = broadcast_ratio; if (list_length(or_quals) > 1) { expr = makeBoolExpr(OR_EXPR, or_quals, -1); qsinfo->skew_quals = lappend(qsinfo->skew_quals, (void*)expr); } else { qsinfo->skew_quals = or_quals; } skew_quals = lappend(skew_quals, qsinfo); } return skew_quals; } /* * @Description: choose a suitable stream for the opposite side of skew side. * * @param[IN] sinfo: stream info. * @return SkewStreamType: skew stream type. */ SkewStreamType JoinSkewInfo::chooseStreamForNoSkewSide(StreamInfo* sinfo) const { SkewStreamType sstype = PART_NONE; if (sinfo->type == STREAM_REDISTRIBUTE) { /* * Stream pair for parallel stream that may cause skew problem: * 1. split redistribute(skew) + split redistribute * 2. split redistribute(skew) + local redistribute * 3. split redistribute(skew) + local broadcast (nodegroup scenario) * 4. remote redistribute(skew) + remote redistribute * 5. remote redistribute(skew) + local gather */ switch (sinfo->smpDesc.distriType) { case PARALLEL_NONE: sstype = PART_REDISTRIBUTE_PART_BROADCAST; break; case REMOTE_DISTRIBUTE: case REMOTE_SPLIT_DISTRIBUTE: sstype = PART_REDISTRIBUTE_PART_BROADCAST; break; case LOCAL_DISTRIBUTE: /* * Because the executor treat local stream differently * and only connect to consumer in the same datanode, * we need to change local stream to remote redistribute. */ sstype = PART_REDISTRIBUTE_PART_BROADCAST; if (sinfo->smpDesc.consumerDop > 1) sinfo->smpDesc.distriType = REMOTE_SPLIT_DISTRIBUTE; else sinfo->smpDesc.distriType = REMOTE_DISTRIBUTE; break; case LOCAL_ROUNDROBIN: sstype = PART_LOCAL_PART_BROADCAST; sinfo->smpDesc.distriType = REMOTE_HYBRID; break; case LOCAL_BROADCAST: sstype = PART_NONE; break; default: sstype = PART_NONE; break; } } else if (sinfo->type == STREAM_NONE) { if (sinfo->subpath->dop > 1) { /* * In a case, split redistribute + local redistribute. The local * side alredy has been redistribute at the subquery's plan, so * we dont need to do local redistribute. However, when we try to * solve skew problem, we need a stream. */ sinfo->smpDesc.consumerDop = sinfo->subpath->dop; sinfo->smpDesc.producerDop = sinfo->subpath->dop; sinfo->smpDesc.distriType = REMOTE_HYBRID; sstype = PART_LOCAL_PART_BROADCAST; } else { sstype = PART_LOCAL_PART_BROADCAST; } } else if (sinfo->type == STREAM_BROADCAST) { sstype = PART_NONE; } return sstype; } void JoinSkewInfo::traverseSubPath(Path* path) { switch (path->type) { case T_NestPath: case T_MergePath: case T_HashPath: { JoinPath* jpath = (JoinPath*)path; if (IS_JOIN_OUTER(jpath->jointype)) { if (checkOuterJoinNulls(path)) { m_hasRuleSkew = true; return; } } traverseSubPath(jpath->outerjoinpath); traverseSubPath(jpath->innerjoinpath); } break; case T_AppendPath: { AppendPath* apath = (AppendPath*)path; ListCell* lc = NULL; Path* subpath = NULL; foreach(lc, apath->subpaths) { subpath = (Path*)lfirst(lc); traverseSubPath(subpath); } } break; case T_MergeAppendPath: { MergeAppendPath* mpath = (MergeAppendPath*)path; ListCell* lc = NULL; Path* subpath = NULL; foreach(lc, mpath->subpaths) { subpath = (Path*)lfirst(lc); traverseSubPath(subpath); } } break; case T_ResultPath: { ResultPath* rpath = (ResultPath*)path; traverseSubPath(rpath->subpath); } break; case T_UniquePath: { UniquePath* upath = (UniquePath*)path; traverseSubPath(upath->subpath); } break; case T_MaterialPath: { MaterialPath* mpath = (MaterialPath*)path; traverseSubPath(mpath->subpath); } break; case T_StreamPath: { StreamPath* spath = (StreamPath*)path; traverseSubPath(spath->subpath); } break; default: break; } } /* * @Description: check the null skew in outer join path. * * @param[IN] path: outer join path. * @return bool: true -- found null skew */ bool JoinSkewInfo::checkOuterJoinNulls(Path* jpath) { if (!IS_JOIN_OUTER(((JoinPath*)jpath)->jointype)) return false; List* target_list = jpath->parent->reltargetlist; List* subtarget_list = NIL; List* join_clauses = NIL; List* null_list = NIL; List* skew_cols = NIL; ListCell* lc = NULL; Node* node = NULL; QualSkewInfo* qsinfo = NULL; NullTest* nulltest = NULL; join_clauses = getJoinClause(jpath); subtarget_list = getSubTargetList((JoinPath*)jpath); null_list = findNullCols(target_list, subtarget_list, join_clauses); foreach(lc, m_distributeKeys) { node = (Node*)lfirst(lc); if (find_node_in_targetlist(node, null_list) >= 0) { skew_cols = lappend(skew_cols, (void*)node); } } /* Null skew occurs only when all the distribute key in in null cols. */ if (skew_cols != NIL && list_length(m_distributeKeys) == list_length(skew_cols)) { qsinfo = makeNode(QualSkewInfo); qsinfo->skew_stream_type = PART_REDISTRIBUTE_PART_LOCAL; foreach(lc, skew_cols) { nulltest = makeNullTest(IS_NULL, (Expr*)lfirst(lc)); qsinfo->skew_quals = lappend(qsinfo->skew_quals, (void*)nulltest); } *m_skewInfo = lappend(*m_skewInfo, qsinfo); printSkewOptimizeDetail("Found null skew caused by outer join."); return true; } return false; } /* * @Description: get all target list which may need add null value from sub path. * * @param[IN] jpath: join path. * @return List*: potential null skew column list. */ List* JoinSkewInfo::getSubTargetList(JoinPath* jpath) const { Path* left_path = jpath->outerjoinpath; Path* right_path = jpath->innerjoinpath; List* subtarget_list = NIL; if (!IS_JOIN_OUTER(jpath->jointype)) return NIL; /* find the target list that may need add null */ if (jpath->jointype == JOIN_LEFT || jpath->jointype == JOIN_LEFT_ANTI_FULL) { subtarget_list = right_path->parent->reltargetlist; } else if (jpath->jointype == JOIN_RIGHT || jpath->jointype == JOIN_RIGHT_ANTI_FULL) { subtarget_list = left_path->parent->reltargetlist; } else if (jpath->jointype == JOIN_FULL) { subtarget_list = list_union(left_path->parent->reltargetlist, right_path->parent->reltargetlist); } return subtarget_list; } /* * @Description: calculate the cost of skew qual expression for one side. * * @return void */ void JoinSkewInfo::addQualCost(bool is_outer) { List* qualList = is_outer ? m_outerSkewInfo : m_innerSkewInfo; ListCell* lc = NULL; QualSkewInfo* qsinfo = NULL; foreach(lc, qualList) { qsinfo = (QualSkewInfo*)lfirst(lc); cost_qual_eval(&qsinfo->qual_cost, qsinfo->skew_quals, m_root); } } /* * @Description: check if this stream is possible to cause skew. * * @param[IN] bool: outer side of join. * @return void */ bool JoinSkewInfo::checkSkewPossibility(bool is_outer) { StreamInfo* sinfo = is_outer ? m_outerStreamInfo : m_innerStreamInfo; if (sinfo == NULL) return false; if (sinfo->stream_keys == NIL) return false; if (sinfo->type == STREAM_REDISTRIBUTE) { /* No need to check unique side of join. */ if ((is_outer && m_saveJoinType == JOIN_UNIQUE_OUTER) || (!is_outer && m_saveJoinType == JOIN_UNIQUE_INNER)) return false; /* * Handle parallel stream. * 1. Local Redistribute. * Since we use sub path's distribute keys as local redistribute * keys, so it will not cause skew problem. * 2. Local Broadcast / Local RoundRobin * These kinds of stream won't cause skew problem. * 3. Split Redistribute. * Only this kind will cause skew. */ if (sinfo->smpDesc.distriType == PARALLEL_NONE || sinfo->smpDesc.distriType == REMOTE_SPLIT_DISTRIBUTE || sinfo->smpDesc.distriType == REMOTE_DISTRIBUTE) return true; } return false; } /* * @Description: For skew value(not null), we need to broadcast the other side's * data which equals the skew value to solve the skew problem. * However in some case, we can not use broadcast, so in these * situation, we can not solve skew problem now. * * @param[IN] is_outer: outer side of join. * @return void */ bool JoinSkewInfo::checkSkewOptimization(bool is_outer) { /* * Forbiden this situation now, in case we do broadcast to outer join's null side. * Need solve this situation later. */ if (is_outer) { StreamInfo* other_sinfo = m_innerStreamInfo; if (!can_broadcast_inner(m_joinType, m_saveJoinType, is_replicated_path(other_sinfo->subpath), other_sinfo->subpath->distribute_keys, other_sinfo->subpath)) return false; } else { StreamInfo* other_sinfo = m_outerStreamInfo; if (!can_broadcast_outer(m_joinType, m_saveJoinType, is_replicated_path(other_sinfo->subpath), other_sinfo->subpath->distribute_keys, other_sinfo->subpath)) return false; } return true; } /* * @Description: When two neighbor joins have the same join table and join col, * and this col is skew, then we just need to do part roundrobin * at the first join. For example: * t1 inner join t2 on t1.a = t2.b inner join t3 on t1.a = t3.b * and t1.a is a skew column. * * Example: Join(t1.a = t3.c) * / \ * Part RoundRobin(Redundant) Part Broadcast * / \ * Join(t1.a = t2.b) Scan(t3) * / \ * Part RoundRobin Part Broadcast * / \ * Scan(t1) scan(t3) * * * @param[IN] is_outer: outer side of join. * @return bool: true -- this skew stream is redundant and can be removed. */ bool JoinSkewInfo::checkRedundant(bool is_outer) { List* skewInfo = is_outer ? m_outerSkewInfo : m_innerSkewInfo; StreamInfo* sinfo = is_outer ? m_outerStreamInfo : m_innerStreamInfo; ListCell* lc = NULL; QualSkewInfo* qsinfo = NULL; foreach(lc, skewInfo) { qsinfo = (QualSkewInfo*)lfirst(lc); if (qsinfo->skew_stream_type != PART_REDISTRIBUTE_PART_ROUNDROBIN) return false; } return checkPathRedundant(sinfo->stream_keys, sinfo->subpath); } /* * @Description: Check if there is same part redistribute part roundrobin at * under path. * * @param[IN] streamKeys: current distribute keys. * @param[IN] path: path to be checked. * @return bool: true -- this skew stream is redundant and can be removed. */ bool JoinSkewInfo::checkPathRedundant(List* streamKeys, Path* path) { bool ret = false; switch (path->pathtype) { case T_NestLoop: case T_MergeJoin: case T_HashJoin: { JoinPath* jpath = (JoinPath*)path; if (path->locator_type == LOCATOR_TYPE_RROBIN) { ret = ret || checkPathRedundant(streamKeys, jpath->innerjoinpath); ret = ret || checkPathRedundant(streamKeys, jpath->outerjoinpath); } else { ret = false; } } break; case T_Material: { MaterialPath* mpath = (MaterialPath*)path; ret = checkPathRedundant(streamKeys, mpath->subpath); } break; case T_Unique: { UniquePath* upath = (UniquePath*)path; ret = checkPathRedundant(streamKeys, upath->subpath); } break; case T_Stream: { StreamPath* spath = (StreamPath*)path; if (list_length(spath->skew_list) > 0) { Distribution *d1, *d2; d1 = m_distribution; d2 = &spath->consumer_distribution; if (d1 == NULL) { ret = false; break; } /* * There are 3 precondition when we confirm it is redundant stream: * 1. they have the same distribute keys; * 2. they are in the same nodegroup; * 3. they have the same parallel degree. */ if (equal(streamKeys, path->distribute_keys) && ng_is_same_group(d1, d2) && m_dop == spath->smpDesc->consumerDop) { ListCell* lc = NULL; QualSkewInfo* qsinfo = NULL; foreach(lc, spath->skew_list) { qsinfo = (QualSkewInfo*)lfirst(lc); if (qsinfo->skew_stream_type != PART_REDISTRIBUTE_PART_ROUNDROBIN) break; } if (lc == NULL) ret = true; } } } break; default: break; } return ret; } /* * @Description: reset member structer for later use. * * @return void */ void JoinSkewInfo::resetSkewInfo() { /* Reset skew info. */ m_innerSkewInfo = NIL; m_outerSkewInfo = NIL; m_distributeKeys = NIL; m_dop = 1; m_isMultiCol = false; m_hasStatSkew = false; m_hasHintSkew = false; m_hasRuleSkew = false; /* Switch to original context. */ MemoryContextSwitchTo(m_oldContext); /* Copy skew info to parent context. */ Assert(m_context != CurrentMemoryContext); m_innerStreamInfo->ssinfo = (List*)copyObject(m_innerStreamInfo->ssinfo); m_outerStreamInfo->ssinfo = (List*)copyObject(m_outerStreamInfo->ssinfo); /* Reset memory. */ MemoryContextReset(m_context); } /* * @Description: find the distribute keys at the other side of join. * * * @param[IN] is_outer_skew: the outer side of join. * @return void */ List* JoinSkewInfo::findOtherSidekeys(bool is_outer_skew) { StreamInfo* sinfo = is_outer_skew ? m_outerStreamInfo : m_innerStreamInfo; StreamInfo* otherSinfo = is_outer_skew ? m_innerStreamInfo : m_outerStreamInfo; List* equalKeys = NIL; /* * Try to find if we already have distribute keys at the other side, * if not, try to find the equal keys from join clause. */ if (otherSinfo->stream_keys != NIL) { equalKeys = otherSinfo->stream_keys; } else { equalKeys = findEqualVarList(sinfo->stream_keys, sinfo->subpath->parent); } return equalKeys; } /* * @Description: find equal var list from join clause. * * @param[IN] skewList: distribute keys at skew side. * @param[IN] rel: rel info for skew side. * @return void */ List* JoinSkewInfo::findEqualVarList(List* skewList, RelOptInfo* rel) { ListCell* lc = NULL; List* equalList = NIL; Node* node = NULL; Node* equalNode = NULL; if (skewList == NIL) return NIL; foreach(lc, skewList) { node = (Node*)lfirst(lc); equalNode = findEqualVar(node, rel); if (equalNode == NULL) { printSkewOptimizeDetail("Can not find equal expr for the skew column."); return NIL; } else { equalList = lappend(equalList, equalNode); } } return equalList; } /* * @Description: find equal var at the opposite side of join basee on join clauses. * * @param[IN] var: skew var * @param[IN] rel: the relation of skew side * @return Var*: equal var */ Node* JoinSkewInfo::findEqualVar(Node* var, RelOptInfo* rel) { ListCell* lc = NULL; RestrictInfo* restrictinfo = NULL; Node* equal_var = NULL; Node* leftkey = NULL; Node* rightkey = NULL; Node* skewkey = NULL; Node* otherkey = NULL; OpExpr* op = NULL; bool skew_is_left = false; bool skew_is_right = false; foreach(lc, m_joinClause) { restrictinfo = (RestrictInfo*)lfirst(lc); op = (OpExpr*)restrictinfo->clause; leftkey = join_clause_get_join_key((Node*)restrictinfo->clause, true); rightkey = join_clause_get_join_key((Node*)restrictinfo->clause, false); skew_is_left = bms_is_subset(restrictinfo->left_relids, rel->relids); skew_is_right = bms_is_subset(restrictinfo->right_relids, rel->relids); if (skew_is_left) { skewkey = leftkey; otherkey = rightkey; } else if (skew_is_right) { skewkey = rightkey; otherkey = leftkey; } else { continue; } if (skewkey == NULL || otherkey == NULL) continue; /* Check if this key compatible with the skew column. */ if (judge_node_compatible(m_root, (Node*)var, skewkey)) { equal_var = otherkey; break; } else { /* When the join clause is a expr which include the var, try to find it. */ if (IsA(var, Var)) { List* varList = pull_var_clause(skewkey, PVC_RECURSE_AGGREGATES, PVC_RECURSE_PLACEHOLDERS); if (list_length(varList) == 1) { if (_equalSimpleVar(var, linitial(varList))) equal_var = otherkey; } } } } return equal_var; } /* ===================== Functions for agg skew info ====================== */ /* * @Description: Constructor func for agg skew judgement and output the skew info. * * @param[IN] root: planner info for agg. * @param[IN] distribute_keys: distribute keys for this agg. * @param[IN] subplan: lefttree of agg. * @param[IN] rel_info: rel option info of agg. */ AggSkewInfo::AggSkewInfo(PlannerInfo* root, Plan* subplan, RelOptInfo* rel_info) : SkewInfo(root), m_subplan(subplan) { m_subrel = rel_info; m_skewType = SKEW_AGG; } /* * @Description: destructor function for agg skew info. */ AggSkewInfo::~AggSkewInfo() { m_subplan = NULL; } /* * @Description: set distribute keys to find skew info. * * @return void. */ void AggSkewInfo::setDistributeKeys(List* distribute_keys) { m_distributeKeys = distribute_keys; } /* * @Description: main entrance to find stream skew info. * * @return void */ void AggSkewInfo::findStreamSkewInfo() { MemoryContext old_cxt = MemoryContextSwitchTo(m_context); /* Skew info is reused, so reset the status each time. */ resetSkewInfo(); /* Find skew info from skew hint. */ findHintSkewInfo(); /* Find skew null value generate by outer join (not base relation). */ findNullSkewInfo(); /* Hint and rule skew info has a higher priority, then try to find statistic info. */ if (!m_hasHintSkew && !m_hasRuleSkew) findStatSkewInfo(); MemoryContextReset(m_context); MemoryContextSwitchTo(old_cxt); } /* * @Description: Find skew info from statistic for agg. * * @return void */ void AggSkewInfo::findHintSkewInfo() { m_isMultiCol = (list_length(m_distributeKeys) > 1); /* Try to find skew info from hint. */ if (m_isMultiCol) { /* When distribute keys include multiple columns. */ List* rece = findMultiColSkewValuesFromHint(); /* receive List point and free it. */ list_free_deep(rece); } else { /* When distribute key only has one column. */ List* rece = findSingleColSkewValuesFromHint(); /* receive List point and free it. */ list_free_deep(rece); } } /* * @Description: Find skew info from skew hint for agg. * * @return void */ void AggSkewInfo::findStatSkewInfo() { List* svalues = NIL; m_isMultiCol = (list_length(m_distributeKeys) > 1); /* Try to find skew info from hint. */ if (m_isMultiCol) { /* When distribute keys include multiple columns. */ svalues = findMultiColSkewValuesFromStatistic(); } else { /* When distribute key only has one column. */ svalues = findSingleColSkewValuesFromStatistic(); } if (list_length(svalues) >= 1) m_hasStatSkew = true; } /* * @Description: the main entrance for null skew caused by outer join. * Take 'select A.a1, B.b1 from A left join B on A.a0 = B.b0;' * as an example, when some data in a0 dose not match any data * in b0, then we out put data like (a1, NULL) as result. * Actually there must be many data can not match and generate * NULL result in real situation, which will cause NULL value * skew in later hash redistribution. * * @return void */ void AggSkewInfo::findNullSkewInfo() { traverseSubPlan(m_subplan); } /* ======================= Functions for null skew ======================== */ void AggSkewInfo::traverseSubPlan(Plan* plan) { /* Find the join node */ switch (nodeTag(plan)) { case T_NestLoop: case T_MergeJoin: case T_HashJoin: { Join* join = (Join*)plan; if (IS_JOIN_OUTER(join->jointype)) { if (checkOuterJoinNullsForAgg(plan)) { m_hasRuleSkew = true; return; } } traverseSubPlan(plan->lefttree); traverseSubPlan(plan->righttree); } break; case T_Append: { Append* aplan = (Append*)plan; ListCell* lc = NULL; Plan* subplan = NULL; foreach(lc, aplan->appendplans) { subplan = (Plan*)lfirst(lc); traverseSubPlan(subplan); } } break; case T_MergeAppend: { MergeAppend* mplan = (MergeAppend*)plan; ListCell* lc = NULL; Plan* subplan = NULL; foreach(lc, mplan->mergeplans) { subplan = (Plan*)lfirst(lc); traverseSubPlan(subplan); } } break; case T_SubqueryScan: { /* we may need to think about plans in subquery. */ break; } default: break; } } /* * @Description: check the null skew in outer join plan for agg operation. * * @param[IN] path: outer join plan. * @return bool: true -- found null skew */ bool AggSkewInfo::checkOuterJoinNullsForAgg(Plan* jplan) const { if (!IS_JOIN_OUTER(((Join*)jplan)->jointype)) return false; List* target_list = jplan->targetlist; List* subtarget_list = NIL; List* null_list = NIL; List* skew_cols = NIL; ListCell* lc = NULL; Node* node = NULL; /* Get the sub target list of outer join null side. */ subtarget_list = getSubTargetListByPlan(jplan); /* null_list is the intersection of subtarget_list and targetlist. */ foreach(lc, target_list) { node = (Node*)((TargetEntry*)lfirst(lc))->expr; if (find_node_in_targetlist(node, subtarget_list) >= 0) { null_list = lappend(null_list, (void*)node); } } /* Get the column that both in m_distributeKeys and null_list. */ foreach(lc, m_distributeKeys) { node = (Node*)lfirst(lc); if (find_node_in_targetlist(node, null_list) >= 0) { skew_cols = lappend(skew_cols, (void*)node); } } /* Null skew occurs only when all the distribute key in in null cols. */ if (skew_cols == NIL || list_length(m_distributeKeys) != list_length(skew_cols)) { list_free(skew_cols); return false; } return true; } /* * @Description: Get all target list which may need add null value from sub plan. * * @param[IN] plan: join plan. * @return List*: potential null skew column list. */ List* AggSkewInfo::getSubTargetListByPlan(Plan* plan) const { if (!IS_JOIN_OUTER(((Join*)plan)->jointype)) return NIL; List* sub_target = NIL; Join* join = (Join*)plan; /* find the target list that may need add null */ if (JOIN_LEFT == join->jointype || JOIN_LEFT_ANTI_FULL == join->jointype) { sub_target = plan->righttree->targetlist; } else if (JOIN_RIGHT == join->jointype || JOIN_RIGHT_ANTI_FULL == join->jointype) { sub_target = plan->lefttree->targetlist; } else if (JOIN_FULL == join->jointype) { sub_target = list_union(plan->righttree->targetlist, plan->lefttree->targetlist); } return sub_target; } /* * @Description: reset member structer for later use. * * @return void */ void AggSkewInfo::resetSkewInfo() { /* Reset skew info. */ m_hasStatSkew = false; m_hasHintSkew = false; m_hasRuleSkew = false; m_isMultiCol = false; } /* ======================= Functions for skew exec ======================== */ /* * @Description: construct function for skew optimze plan execution. * * @param[IN] ssinfo: skew info of this stream. * @param[IN] estate: working state for an Execution. * @param[IN] isVec: if this is vec stream. */ StreamSkew::StreamSkew(List* ssinfo, bool isVec) { m_ssinfo = ssinfo; m_estate = NULL; m_econtext = NULL; m_localNodeId = -1; m_skewQual = NIL; } /* * @Description: destructor function for stream skew. */ StreamSkew::~StreamSkew() { m_ssinfo = NIL; if (m_skewQual != NIL) { list_free(m_skewQual); m_skewQual = NIL; } if (m_estate != NULL) { FreeExecutorState(m_estate); m_estate = NULL; } m_econtext = NULL; } /* * @Description: mainly init execution expression state. * * @param[IN] ssinfo: skew info of this stream. * @param[IN] estate: working state for an Execution. * @param[IN] isVec: if this is vec stream. * @return void */ void StreamSkew::init(bool isVec) { QualSkewInfo* qsinfo = NULL; QualSkewState* qsstate = NULL; ListCell* lc = NULL; if (m_ssinfo == NIL) return; /* * We create a estate under the t_thrd.top_mem_cxt of stream thread, * we will release it at exec_stream_end. */ MemoryContext cxt = MemoryContextSwitchTo(THREAD_GET_MEM_CXT_GROUP(MEMORY_CONTEXT_OPTIMIZER)); m_estate = CreateExecutorState(); (void)MemoryContextSwitchTo(m_estate->es_query_cxt); if (isVec) { m_econtext = CreateExprContext(m_estate); ExecAssignVectorForExprEval(m_econtext); } else { m_econtext = CreateExprContext(m_estate); } foreach(lc, m_ssinfo) { qsinfo = (QualSkewInfo*)lfirst(lc); qsstate = (QualSkewState*)palloc0(sizeof(QualSkewState)); qsstate->skew_stream_type = qsinfo->skew_stream_type; if (isVec) { qsstate->skew_quals_state = (List*)ExecInitVecExpr((Expr*)(qsinfo->skew_quals), NULL); } else { qsstate->skew_quals_state = (List*)ExecInitExpr((Expr*)(qsinfo->skew_quals), NULL); } if (qsstate->skew_quals_state != NIL) m_skewQual = lappend(m_skewQual, (void*)qsstate); } MemoryContextSwitchTo(cxt); } /* * @Description: check if the input data match skew values, * and choose the suitable stream type for the data. * * @param[IN] tuple: input data. * @return int: stream type. */ int StreamSkew::chooseStreamType(TupleTableSlot* tuple) { ListCell* lc = NULL; QualSkewState* qsstate = NULL; ResetExprContext(m_econtext); m_econtext->ecxt_outertuple = tuple; qsstate = (QualSkewState*)linitial(m_skewQual); foreach(lc, m_skewQual) { qsstate = (QualSkewState*)lfirst(lc); if (ExecQual(qsstate->skew_quals_state, m_econtext, false)) { switch (qsstate->skew_stream_type) { case PART_REDISTRIBUTE_PART_BROADCAST: case PART_LOCAL_PART_BROADCAST: return STREAM_BROADCAST; case PART_REDISTRIBUTE_PART_ROUNDROBIN: return STREAM_ROUNDROBIN; case PART_REDISTRIBUTE_PART_LOCAL: return STREAM_LOCAL; default: ereport(ERROR, (errmodule(MOD_OPT_SKEW), errcode(ERRCODE_UNRECOGNIZED_NODE_TYPE), errmsg("Invalid skew stream type %d.", qsstate->skew_stream_type))); } } } /* If not match, do the original stream. */ switch (qsstate->skew_stream_type) { case PART_REDISTRIBUTE_PART_BROADCAST: case PART_REDISTRIBUTE_PART_ROUNDROBIN: case PART_REDISTRIBUTE_PART_LOCAL: return STREAM_REDISTRIBUTE; case PART_LOCAL_PART_BROADCAST: return STREAM_LOCAL; default: ereport(ERROR, (errmodule(MOD_OPT_SKEW), errcode(ERRCODE_UNRECOGNIZED_NODE_TYPE), errmsg("Invalid skew stream type %d.", qsstate->skew_stream_type))); } return -1; } /* * @Description: check if the input data(vector) match skew values, * and choose the suitable stream type for the data. * * @param[IN] tuple: input data. * @return int: stream type. */ void StreamSkew::chooseVecStreamType(VectorBatch* batch, int* skewStream) { int i; ListCell* lc = NULL; QualSkewState* qsstate = NULL; errno_t rc; bool select[BatchMaxSize] = {false}; ResetExprContext(m_econtext); m_econtext->ecxt_outerbatch = batch; m_econtext->ecxt_scanbatch = batch; qsstate = (QualSkewState*)linitial(m_skewQual); foreach(lc, m_skewQual) { qsstate = (QualSkewState*)lfirst(lc); ExecVecQual(qsstate->skew_quals_state, m_econtext, false); for (i = 0; i < batch->m_rows; i++) { if (batch->m_sel[i]) { switch (qsstate->skew_stream_type) { case PART_REDISTRIBUTE_PART_BROADCAST: case PART_LOCAL_PART_BROADCAST: skewStream[i] = STREAM_BROADCAST; break; case PART_REDISTRIBUTE_PART_ROUNDROBIN: skewStream[i] = STREAM_ROUNDROBIN; break; case PART_REDISTRIBUTE_PART_LOCAL: skewStream[i] = STREAM_LOCAL; break; default: ereport(ERROR, (errmodule(MOD_OPT_SKEW), errcode(ERRCODE_UNRECOGNIZED_NODE_TYPE), errmsg("Invalid skew stream type %d.", qsstate->skew_stream_type))); } } select[i] = select[i] || m_econtext->ecxt_scanbatch->m_sel[i]; } rc = memset_s(batch->m_sel, BatchMaxSize * sizeof(bool), 0, BatchMaxSize * sizeof(bool)); securec_check(rc, "\0", "\0"); } for (i = 0; i < batch->m_rows; i++) { if (select[i] == false) { switch (qsstate->skew_stream_type) { case PART_REDISTRIBUTE_PART_BROADCAST: case PART_REDISTRIBUTE_PART_ROUNDROBIN: case PART_REDISTRIBUTE_PART_LOCAL: skewStream[i] = STREAM_REDISTRIBUTE; break; case PART_LOCAL_PART_BROADCAST: skewStream[i] = STREAM_LOCAL; break; default: ereport(ERROR, (errmodule(MOD_OPT_SKEW), errcode(ERRCODE_UNRECOGNIZED_NODE_TYPE), errmsg("Invalid skew stream type %d.", qsstate->skew_stream_type))); } } } } /* * @Description: Get distribute keys from a plan, especially for skew join. * This function is only used for distinct number estimate. * * The skew join has no distribute keys because there is hybrid * stream at one or both sides of join. However, when we try to * estimate local distinct number, we will try to find if the * join's distribute keys equal to base rel's distribute keys, * in this case, if the join plan has no distribute keys, the * estimated local distinct number will be lager the the real number. * * Even though there is Hybrid stream under join, the most data * in Hybrid stream is distribute by hash, so we can still try to * get an approximate for local distinct number estimate. * * @return List*: plan distribute keys */ List* find_skew_join_distribute_keys(Plan* plan) { if (!IS_JOIN_PLAN(plan)) return plan->distributed_keys; Join* join = (Join*)plan; if (!join->skewoptimize) return plan->distributed_keys; /* * Skew join has no distribute keys, we need to find the keys for * distinct value estimate. */ Plan* inner_plan = plan->righttree; Plan* outer_plan = plan->lefttree; bool is_replicate_inner = is_replicated_plan(inner_plan); bool is_replicate_outer = is_replicated_plan(outer_plan); if (is_replicate_inner && is_replicate_outer) { return NIL; } else if (is_replicate_inner || is_replicate_outer) { if (is_replicate_outer) { return inner_plan->distributed_keys; } else { return outer_plan->distributed_keys; } } else { if (join->jointype != JOIN_FULL) { return locate_distribute_key( join->jointype, outer_plan->distributed_keys, inner_plan->distributed_keys, NIL, false); } } return NIL; }
29,238
341
<gh_stars>100-1000 from decimal import Decimal from typing import Optional, Union from .. import xdr as stellar_xdr from ..asset import Asset from ..muxed_account import MuxedAccount from .operation import Operation from .utils import check_amount __all__ = ["Payment"] class Payment(Operation): """The :class:`Payment` object, which represents a Payment operation on Stellar's network. Sends an amount in a specific asset to a destination account. Threshold: Medium :param destination: The destination account ID. :param asset: The asset to send. :param amount: The amount to send. :param source: The source account for the payment. Defaults to the transaction's source account. """ _XDR_OPERATION_TYPE: stellar_xdr.OperationType = stellar_xdr.OperationType.PAYMENT def __init__( self, destination: Union[MuxedAccount, str], asset: Asset, amount: Union[str, Decimal], source: Optional[Union[MuxedAccount, str]] = None, ) -> None: super().__init__(source) check_amount(amount) if isinstance(destination, str): destination = MuxedAccount.from_account(destination) self.destination: MuxedAccount = destination self.asset: Asset = asset self.amount: Union[str, Decimal] = amount def _to_operation_body(self) -> stellar_xdr.OperationBody: asset = self.asset.to_xdr_object() destination = self.destination.to_xdr_object() amount = stellar_xdr.Int64(Operation.to_xdr_amount(self.amount)) payment_op = stellar_xdr.PaymentOp(destination, asset, amount) body = stellar_xdr.OperationBody( type=self._XDR_OPERATION_TYPE, payment_op=payment_op ) return body @classmethod def from_xdr_object(cls, xdr_object: stellar_xdr.Operation) -> "Payment": """Creates a :class:`Payment` object from an XDR Operation object. """ source = Operation.get_source_from_xdr_obj(xdr_object) assert xdr_object.body.payment_op is not None destination = MuxedAccount.from_xdr_object( xdr_object.body.payment_op.destination ) asset = Asset.from_xdr_object(xdr_object.body.payment_op.asset) amount = Operation.from_xdr_amount(xdr_object.body.payment_op.amount.int64) op = cls(source=source, destination=destination, asset=asset, amount=amount) return op def __str__(self): return ( f"<Payment [destination={self.destination}, asset={self.asset}, " f"amount={self.amount}, source={self.source}]>" )
1,059
1,686
// Copyright (c) 2020 The Chromium Embedded Framework Authors. All rights // reserved. Use of this source code is governed by a BSD-style license that // can be found in the LICENSE file. #include "libcef/browser/media_router/media_source_impl.h" CefMediaSourceImpl::CefMediaSourceImpl( const media_router::MediaSource::Id& source_id) : source_(source_id) {} CefMediaSourceImpl::CefMediaSourceImpl(const GURL& presentation_url) : source_(presentation_url) {} CefString CefMediaSourceImpl::GetId() { return source_.id(); } bool CefMediaSourceImpl::IsCastSource() { return !IsDialSource(); } bool CefMediaSourceImpl::IsDialSource() { return source_.IsDialSource(); }
224
10,225
package io.quarkus.extest.runtime.config; import org.eclipse.microprofile.config.spi.Converter; public class YesNoConverter implements Converter<Boolean> { public YesNoConverter() { } @Override public Boolean convert(String s) { if (s == null || s.isEmpty()) { return false; } switch (s) { case "YES": return true; case "NO": return false; } throw new IllegalArgumentException("Unsupported value " + s + " given"); } }
255
1,056
<gh_stars>1000+ /* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.netbeans.modules.spring.beans.model; import java.io.File; import java.util.logging.Logger; import org.netbeans.modules.spring.beans.model.ExclusiveAccess.AsyncTask; import org.netbeans.modules.spring.beans.model.impl.ConfigFileSpringBeanSource; import java.io.IOException; import java.util.concurrent.atomic.AtomicBoolean; import java.util.logging.Level; import javax.swing.text.Position.Bias; import org.netbeans.editor.BaseDocument; import org.openide.cookies.EditorCookie; import org.openide.filesystems.FileObject; import org.openide.filesystems.FileUtil; import org.openide.loaders.DataObject; import org.openide.text.CloneableEditorSupport; import org.openide.text.PositionRef; import org.openide.util.Exceptions; /** * Handles the lifecycle of a single config file. Can be notified of external changes * to the file through the {@link #change} method. Also provides access * to the beans for a single file through the {@link #getBeanSource} method. * * @author <NAME> */ public class SpringConfigFileModelController { private static final Logger LOGGER = Logger.getLogger(SpringConfigFileModelController.class.getName()); private static final int DELAY = 500; private final ConfigFileSpringBeanSource beanSource; private final File file; // @GuardedBy("this") private boolean parsedAtLeastOnce; // @GuardedBy("this") private AsyncTask currentUpdateTask; // @GuardedBy("this") private FileObject currentFile; public SpringConfigFileModelController(File file, ConfigFileSpringBeanSource beanSource) { this.file = file; this.beanSource = beanSource; } public SpringBeanSource getUpToDateBeanSource() throws IOException { assert ExclusiveAccess.getInstance().isCurrentThreadAccess(); FileObject fo = getFileToMakeUpToDate(); if (fo != null) { doParse(fo, false); } return beanSource; } public LockedDocument getLockedDocument() throws IOException { assert ExclusiveAccess.getInstance().isCurrentThreadAccess(); FileObject fo = getFileToMakeUpToDate(); if (fo == null) { fo = FileUtil.toFileObject(file); } if (fo != null) { return new LockedDocument(fo); } return null; } /** * Makes the beans up to date, that is, if there has previously been * an external change and the config file hasn't been parsed yet, * it is parsed now. This method needs to be called under exclusive * access. */ private FileObject getFileToMakeUpToDate() throws IOException { assert ExclusiveAccess.getInstance().isCurrentThreadAccess(); FileObject fileToParse = null; synchronized (this) { if (currentUpdateTask == null || currentUpdateTask.isFinished()) { // No update scheduled. if (!parsedAtLeastOnce) { // Moreover, not parsed yet, so will parse now. fileToParse = FileUtil.toFileObject(file); } } else { // An update is scheduled, so will perform it now. fileToParse = currentFile; } } return fileToParse; } private void doParse(FileObject fo, boolean updateTask) throws IOException { assert ExclusiveAccess.getInstance().isCurrentThreadAccess(); BaseDocument document = (BaseDocument)getEditorCookie(fo).openDocument(); document.readLock(); try { doParse(fo, document, updateTask); } finally { document.readUnlock(); } } private void doParse(FileObject fo, BaseDocument document, boolean updateTask) throws IOException { assert ExclusiveAccess.getInstance().isCurrentThreadAccess(); beanSource.parse(document); synchronized (this) { if (!parsedAtLeastOnce) { parsedAtLeastOnce = true; } if (!updateTask && fo.equals(currentFile)) { // We were not invoked from an update task. By parsing the file, // we have just processed the scheduled update, so // it can be cancelled now. LOGGER.log(Level.FINE, "Canceling update task for " + currentFile); currentUpdateTask.cancel(); } } } public void notifyChange(FileObject configFO) { assert configFO != null; LOGGER.log(Level.FINE, "Scheduling update for {0}", configFO); synchronized (this) { if (configFO != currentFile) { // We are going to parse another FileObject (for example, because the // original one has been renamed). if (currentUpdateTask != null) { currentUpdateTask.cancel(); } currentFile = configFO; currentUpdateTask = ExclusiveAccess.getInstance().createAsyncTask(new Updater(configFO)); } currentUpdateTask.schedule(DELAY); } } private static EditorCookie getEditorCookie(FileObject fo) throws IOException { DataObject dataObject = DataObject.find(fo); EditorCookie result = dataObject.getCookie(EditorCookie.class); if (result == null) { throw new IllegalStateException("File " + fo + " does not have an EditorCookie."); } return result; } public final class LockedDocument { private final FileObject fo; private final CloneableEditorSupport editor; final BaseDocument document; // Although this class is single-threaded, better to have these thread-safe, // since they are guarding the document locking, and that needs to be right // even if when the class is misused. private final AtomicBoolean locked = new AtomicBoolean(); private final AtomicBoolean unlocked = new AtomicBoolean(); public LockedDocument(FileObject fo) throws IOException { this.fo = fo; editor = (CloneableEditorSupport)getEditorCookie(fo); document = (BaseDocument)editor.openDocument(); } public void lock() throws IOException { if (!locked.getAndSet(true)) { document.atomicLock(); boolean success = false; try { doParse(fo, document, false); success = true; } finally { if (!success) { document.atomicUnlock(); } } } } public void unlock() throws IOException { assert locked.get(); if (!unlocked.getAndSet(true)) { document.atomicUnlock(); } } public BaseDocument getDocument() { assert locked.get(); return document; } public SpringBeanSource getBeanSource() throws IOException { assert locked.get(); return beanSource; } public PositionRef createPositionRef(int offset, Bias bias) { assert locked.get(); return editor.createPositionRef(offset, bias); } } private final class Updater implements Runnable { private final FileObject configFile; public Updater(FileObject configFile) { this.configFile = configFile; } public void run() { LOGGER.log(Level.FINE, "Running scheduled update for file {0}", configFile); assert ExclusiveAccess.getInstance().isCurrentThreadAccess(); try { doParse(configFile, true); } catch (IOException e) { Exceptions.printStackTrace(e); } } } }
3,481
3,287
# -------------------------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # -------------------------------------------------------------------------------------------- import unittest from azure.cli.core.auth.adal_authentication import _normalize_expires_on class TestUtil(unittest.TestCase): def test_normalize_expires_on(self): assert _normalize_expires_on("11/05/2021 15:18:31 +00:00") == 1636125511 assert _normalize_expires_on('1636125511') == 1636125511 if __name__ == '__main__': unittest.main()
197
2,637
<filename>vendors/nordic/nRF5_SDK_15.2.0/components/nfc/ndef/connection_handover/ble_pair_lib/nfc_ble_pair_lib.c /** * Copyright (c) 2016 - 2018, Nordic Semiconductor ASA * * All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form, except as embedded into a Nordic * Semiconductor ASA integrated circuit in a product or a software update for * such product, must reproduce the above copyright notice, this list of * conditions and the following disclaimer in the documentation and/or other * materials provided with the distribution. * * 3. Neither the name of Nordic Semiconductor ASA nor the names of its * contributors may be used to endorse or promote products derived from this * software without specific prior written permission. * * 4. This software, with or without modification, must only be used with a * Nordic Semiconductor ASA integrated circuit. * * 5. Any software provided in binary form under this license must not be reverse * engineered, decompiled, modified and/or disassembled. * * THIS SOFTWARE IS PROVIDED BY NORDIC SEMICONDUCTOR ASA "AS IS" AND ANY EXPRESS * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL NORDIC SEMICONDUCTOR ASA OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE * GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * */ #include "sdk_common.h" #if NRF_MODULE_ENABLED(NFC_BLE_PAIR_LIB) #include "nfc_ble_pair_lib.h" #include "sdk_macros.h" #include "app_error.h" #include "nrf_drv_rng.h" #include "nfc_t2t_lib.h" #include "nfc_ble_pair_msg.h" #include "nrf_sdh_ble.h" #include "nrf_ble_lesc.h" #define NRF_LOG_MODULE_NAME nfc_ble_pair #if NFC_BLE_PAIR_LIB_LOG_ENABLED #define NRF_LOG_LEVEL NFC_BLE_PAIR_LIB_LOG_LEVEL #define NRF_LOG_INFO_COLOR NFC_BLE_PAIR_LIB_INFO_COLOR #define NRF_LOG_DEBUG_COLOR NFC_BLE_PAIR_LIB_DEBUG_COLOR #else // NFC_BLE_PAIR_LIB_LOG_ENABLED #define NRF_LOG_LEVEL 0 #endif // NFC_BLE_PAIR_LIB_LOG_ENABLED #include "nrf_log.h" NRF_LOG_MODULE_REGISTER(); // Verify bonding and keys distribution settings. #if ((BLE_NFC_SEC_PARAM_BOND) && \ !(BLE_NFC_SEC_PARAM_KDIST_OWN_ENC) && \ !(BLE_NFC_SEC_PARAM_KDIST_OWN_ID) && \ !(BLE_NFC_SEC_PARAM_KDIST_PEER_ENC) && \ !(BLE_NFC_SEC_PARAM_KDIST_PEER_ID)) #error "At least one of the BLE_NFC_SEC_PARAM_KDIST flags must be set to 1 when bonding is enabled." #endif // Macro for verifying if the pairing mode argument is valid #define VERIFY_PAIRING_MODE(arg) \ if ((arg) >= NFC_PAIRING_MODE_CNT) \ { \ return NRF_ERROR_INVALID_PARAM; \ } #define BLE_GAP_LESC_P256_SK_LEN 32 /**< GAP LE Secure Connections Elliptic Curve Diffie-Hellman P-256 Secret Key Length. */ #define TK_MAX_NUM 1 /**< Maximal number of TK locations in NDEF message buffer. */ #define NDEF_MSG_BUFF_SIZE 256 /**< Size of buffer for the NDEF pairing message. */ #define BLE_NFC_SEC_PARAM_KEYPRESS 0 /**< Keypress notifications not enabled. */ #define BLE_NFC_SEC_PARAM_IO_CAPS BLE_GAP_IO_CAPS_NONE /**< No I/O capabilities. */ typedef struct { uint8_t sk[BLE_GAP_LESC_P256_SK_LEN]; /**< LE Secure Connections Elliptic Curve Diffie-Hellman P-256 Secret Key. */ } ble_gap_lesc_p256_sk_t; static ble_advertising_t * m_p_advertising = NULL; /**< Pointer to the advertising module instance. */ static uint8_t m_ndef_msg_buf[NDEF_MSG_BUFF_SIZE]; /**< NFC tag NDEF message buffer. */ static ble_advdata_tk_value_t m_oob_auth_key; /**< Temporary Key buffer used in OOB legacy pairing mode. */ static uint8_t * m_tk_group[TK_MAX_NUM]; /**< Locations of TK in NDEF message. */ static nfc_pairing_mode_t m_pairing_mode; /**< Current pairing mode. */ static ble_gap_lesc_oob_data_t m_ble_lesc_oob_data; /**< LESC OOB data used in LESC OOB pairing mode. */ static ble_gap_sec_params_t m_sec_param; /**< Current Peer Manager secure parameters configuration. */ static uint8_t m_connections = 0; /**< Number of active connections. */ static void ble_evt_handler(ble_evt_t const * p_ble_evt, void * p_context); NRF_SDH_BLE_OBSERVER(m_ble_evt_observer, NFC_BLE_PAIR_LIB_BLE_OBSERVER_PRIO, ble_evt_handler, NULL); /** * @brief Generates random values to a given buffer * * @param[out] p_buff Buffer for random values * @param[in] size Number of bytes to generate * * @returns Number of generated bytes */ static uint8_t random_vector_generate(uint8_t * p_buff, uint8_t size) { uint8_t available; ret_code_t err_code = NRF_SUCCESS; nrf_drv_rng_bytes_available(&available); uint8_t length = (size < available) ? size : available; err_code = nrf_drv_rng_rand(p_buff, length); APP_ERROR_CHECK(err_code); return length; } /** * @brief Prints generated key to the log console * * @param[in] lenght TK value length */ static void random_vector_log(uint8_t length) { NRF_LOG_INFO("TK Random Value:"); for (uint32_t i = 0; i < length; i++) { NRF_LOG_RAW_INFO(" %02X",(int)m_oob_auth_key.tk[i]); } NRF_LOG_RAW_INFO("\r\n"); } /** * @brief Function for handling NFC events. * * @details Starts advertising and generates new OOB keys on the NFC_T2T_EVENT_FIELD_ON event. * * @param[in] p_context Context for callback execution, not used in this callback implementation. * @param[in] event Event generated by hal NFC lib. * @param[in] p_data Received/transmitted data or NULL, not used in this callback implementation. * @param[in] data_length Size of the received/transmitted packet, not used in this callback implementation. */ static void nfc_callback(void * p_context, nfc_t2t_event_t event, uint8_t const * p_data, size_t data_length) { UNUSED_PARAMETER(p_context); UNUSED_PARAMETER(p_data); UNUSED_PARAMETER(data_length); ret_code_t err_code = NRF_SUCCESS; nfc_pairing_mode_t pairing_mode; switch (event) { case NFC_T2T_EVENT_FIELD_ON: NRF_LOG_DEBUG("NFC_EVENT_FIELD_ON"); pairing_mode = nfc_ble_pair_mode_get(); if ((pairing_mode == NFC_PAIRING_MODE_OOB) || (pairing_mode == NFC_PAIRING_MODE_GENERIC_OOB)) { // Generate Authentication OOB Key and update NDEF message content. uint8_t length = random_vector_generate(m_oob_auth_key.tk, BLE_GAP_SEC_KEY_LEN); random_vector_log(length); err_code = nfc_tk_group_modifier_update(&m_oob_auth_key); APP_ERROR_CHECK(err_code); } // Start advertising when NFC field is sensed and there is a place for another connection. if (m_connections < NRF_SDH_BLE_PERIPHERAL_LINK_COUNT) { err_code = ble_advertising_start(m_p_advertising, BLE_ADV_MODE_FAST); if (err_code != NRF_ERROR_INVALID_STATE) { APP_ERROR_CHECK(err_code); } } break; case NFC_T2T_EVENT_FIELD_OFF: NRF_LOG_DEBUG("NFC_EVENT_FIELD_OFF"); break; default: break; } } /** * @brief Function for setting the Peer Manager secure mode used in device pairing. * * @param[in] mode NFC pairing mode, this is the value of @ref nfc_pairing_mode_t enum * * @retval NRF_SUCCESS If new secure mode has been set correctly. * @retval NRF_ERROR_INVALID_PARAM If pairing mode is invalid. * @retval Other Other error codes might be returned depending on used modules. */ static ret_code_t pm_secure_mode_set(nfc_pairing_mode_t mode) { ret_code_t err_code = NRF_SUCCESS; // Check if pairing mode is valid. VERIFY_PAIRING_MODE(mode); memset(&m_sec_param, 0x00, sizeof(m_sec_param)); // Pairing mode specific security parameters. switch (mode) { case NFC_PAIRING_MODE_JUST_WORKS: // Disable pairing with OOB data. m_sec_param.mitm = 0; m_sec_param.oob = 0; m_sec_param.lesc = 0; break; case NFC_PAIRING_MODE_OOB: // Enable legacy pairing with OOB data - TK value. m_sec_param.mitm = 1; m_sec_param.oob = 1; m_sec_param.lesc = 0; break; case NFC_PAIRING_MODE_LESC_OOB: case NFC_PAIRING_MODE_LESC_JUST_WORKS: // Enable LESC pairing - OOB and MITM flags are cleared because it is the central device // who decides if the connection will be authorized with LESC OOB data. m_sec_param.mitm = 0; m_sec_param.oob = 0; m_sec_param.lesc = 1; break; case NFC_PAIRING_MODE_GENERIC_OOB: // MITM, OOB and LESC flags are changing dynamically depending on central device pairing flags. break; default: return NRF_ERROR_INVALID_PARAM; } // Common security parameters to be used for all security procedures. m_sec_param.min_key_size = BLE_NFC_SEC_PARAM_MIN_KEY_SIZE; m_sec_param.max_key_size = BLE_NFC_SEC_PARAM_MAX_KEY_SIZE; m_sec_param.keypress = BLE_NFC_SEC_PARAM_KEYPRESS; m_sec_param.io_caps = BLE_NFC_SEC_PARAM_IO_CAPS; m_sec_param.bond = BLE_NFC_SEC_PARAM_BOND; #if (BLE_NFC_SEC_PARAM_BOND) // If bonding is enabled, set key distribution flags. m_sec_param.kdist_own.enc = BLE_NFC_SEC_PARAM_KDIST_OWN_ENC; m_sec_param.kdist_own.id = BLE_NFC_SEC_PARAM_KDIST_OWN_ID; m_sec_param.kdist_peer.enc = BLE_NFC_SEC_PARAM_KDIST_PEER_ENC; m_sec_param.kdist_peer.id = BLE_NFC_SEC_PARAM_KDIST_PEER_ID; #else // If bonding is not enabled, no keys can be distributed. m_sec_param.kdist_own.enc = 0; m_sec_param.kdist_own.id = 0; m_sec_param.kdist_peer.enc = 0; m_sec_param.kdist_peer.id = 0; #endif // Update Peer Manager security parameter settings. err_code = pm_sec_params_set(&m_sec_param); return err_code; } /**@brief Function for preparing the BLE pairing data for the NFC tag. * * @details This function does not stop and start the NFC tag data emulation. * * @param[in] mode Pairing mode for which the tag data will be prepared. * * @retval NRF_SUCCESS If new tag pairing data has been set correctly. * @retval NRF_ERROR_INVALID_PARAM If pairing mode is invalid. * @retval Other Other error codes might be returned depending on used modules. */ ret_code_t nfc_ble_pair_data_set(nfc_pairing_mode_t mode) { ret_code_t err_code = NRF_SUCCESS; ble_gap_lesc_p256_pk_t const * p_pk_own; // Check if pairing mode is valid VERIFY_PAIRING_MODE(mode); // Provide information about available buffer size to encoding function. uint32_t ndef_msg_len = sizeof(m_ndef_msg_buf); switch (mode) { case NFC_PAIRING_MODE_OOB: // Encode NDEF message with Secure Simple Pairing OOB optional data - TK value. err_code = nfc_ble_pair_msg_updatable_tk_encode(NFC_BLE_PAIR_MSG_BLUETOOTH_LE_SHORT, &m_oob_auth_key, NULL, m_ndef_msg_buf, &ndef_msg_len, m_tk_group, TK_MAX_NUM); break; case NFC_PAIRING_MODE_JUST_WORKS: // Encode NDEF message with Secure Simple Pairing OOB data. err_code = nfc_ble_pair_default_msg_encode(NFC_BLE_PAIR_MSG_BLUETOOTH_LE_SHORT, NULL, NULL, m_ndef_msg_buf, &ndef_msg_len); break; case NFC_PAIRING_MODE_LESC_OOB: // Get the local LESC public key p_pk_own = nrf_ble_lesc_public_key_get(); VERIFY_PARAM_NOT_NULL(p_pk_own); // Generate LESC OOB data err_code = sd_ble_gap_lesc_oob_data_get(BLE_CONN_HANDLE_INVALID, p_pk_own, &m_ble_lesc_oob_data); VERIFY_SUCCESS(err_code); // Encode NDEF message with BLE LESC OOB pairing data - LESC random and confirmation values. err_code = nfc_ble_pair_default_msg_encode(NFC_BLE_PAIR_MSG_BLUETOOTH_LE_SHORT, NULL, &m_ble_lesc_oob_data, m_ndef_msg_buf, &ndef_msg_len); break; case NFC_PAIRING_MODE_LESC_JUST_WORKS: err_code = nfc_ble_pair_default_msg_encode(NFC_BLE_PAIR_MSG_BLUETOOTH_LE_SHORT, NULL, NULL, m_ndef_msg_buf, &ndef_msg_len); break; case NFC_PAIRING_MODE_GENERIC_OOB: // Get the local LESC public key p_pk_own = nrf_ble_lesc_public_key_get(); VERIFY_PARAM_NOT_NULL(p_pk_own); // Generate LESC OOB data err_code = sd_ble_gap_lesc_oob_data_get(BLE_CONN_HANDLE_INVALID, p_pk_own, &m_ble_lesc_oob_data); VERIFY_SUCCESS(err_code); // Encode NDEF message with Secure Simple Pairing OOB data - TK value and LESC Random and Confirmation Keys. err_code = nfc_ble_pair_msg_updatable_tk_encode(NFC_BLE_PAIR_MSG_BLUETOOTH_LE_SHORT, &m_oob_auth_key, &m_ble_lesc_oob_data, m_ndef_msg_buf, &ndef_msg_len, m_tk_group, TK_MAX_NUM); break; default: return NRF_ERROR_INVALID_PARAM; } VERIFY_SUCCESS(err_code); // Update NFC tag data err_code = nfc_t2t_payload_set(m_ndef_msg_buf, ndef_msg_len); return err_code; } ret_code_t nfc_ble_pair_init(ble_advertising_t * const p_advertising, nfc_pairing_mode_t mode) { ret_code_t err_code = NRF_SUCCESS; // Check if pairing mode is valid VERIFY_PAIRING_MODE(mode); // Check if pointer to the advertising module instance is not NULL VERIFY_PARAM_NOT_NULL(p_advertising); m_p_advertising = p_advertising; m_pairing_mode = mode; // Initialize LESC module. err_code = nrf_ble_lesc_init(); APP_ERROR_CHECK(err_code); // Initialize RNG peripheral for authentication OOB data generation err_code = nrf_drv_rng_init(NULL); if (err_code != NRF_ERROR_INVALID_STATE && err_code != NRF_ERROR_MODULE_ALREADY_INITIALIZED) { VERIFY_SUCCESS(err_code); } // Start NFC err_code = nfc_t2t_setup(nfc_callback, NULL); VERIFY_SUCCESS(err_code); // Set Peer Manager pairing mode err_code = pm_secure_mode_set(mode); VERIFY_SUCCESS(err_code); if ((mode == NFC_PAIRING_MODE_LESC_OOB) || (mode == NFC_PAIRING_MODE_LESC_JUST_WORKS) || (mode == NFC_PAIRING_MODE_GENERIC_OOB)) { err_code = nrf_ble_lesc_keypair_generate(); VERIFY_SUCCESS(err_code); } // Set proper NFC data according to the pairing mode err_code = nfc_ble_pair_data_set(mode); VERIFY_SUCCESS(err_code); // Turn on tag emulation err_code = nfc_t2t_emulation_start(); return err_code; } ret_code_t nfc_ble_pair_mode_set(nfc_pairing_mode_t mode) { ret_code_t err_code = NRF_SUCCESS; // Check if pairing mode is valid VERIFY_PAIRING_MODE(mode); if (mode != m_pairing_mode) { m_pairing_mode = mode; if ((mode == NFC_PAIRING_MODE_LESC_OOB) || (mode == NFC_PAIRING_MODE_LESC_JUST_WORKS) || (mode == NFC_PAIRING_MODE_GENERIC_OOB)) { err_code = nrf_ble_lesc_keypair_generate(); VERIFY_SUCCESS(err_code); } // Update Peer Manager settings according to the new pairing mode err_code = pm_secure_mode_set(mode); VERIFY_SUCCESS(err_code); // NFC tag emulation must be turned off during changes in payload err_code = nfc_t2t_emulation_stop(); VERIFY_SUCCESS(err_code); // Update NFC tag data err_code = nfc_ble_pair_data_set(mode); VERIFY_SUCCESS(err_code); // Turn on tag emulation after changes err_code = nfc_t2t_emulation_start(); VERIFY_SUCCESS(err_code); } return NRF_SUCCESS; } nfc_pairing_mode_t nfc_ble_pair_mode_get(void) { return m_pairing_mode; } /** * @brief Generates new key pair for LESC pairing. * * @details If device is in the @ref NFC_PAIRING_MODE_LESC_OOB mode or in * the @ref NFC_PAIRING_MODE_GENERIC_OOB mode, NFC Connection Handover * message is also updated with newly generated LESC OOB data. * * @retval NRF_SUCCESS If new tag pairing data has been set correctly. * @retval Other Other error codes might be returned depending on used modules. */ static ret_code_t generate_lesc_keys(void) { ret_code_t err_code = NRF_SUCCESS; ble_gap_lesc_p256_pk_t const * p_pk_own; // Generate new LESC keys err_code = nrf_ble_lesc_keypair_generate(); VERIFY_SUCCESS(err_code); if ((m_pairing_mode == NFC_PAIRING_MODE_LESC_OOB) || (m_pairing_mode == NFC_PAIRING_MODE_GENERIC_OOB)) { // Get the local LESC public key p_pk_own = nrf_ble_lesc_public_key_get(); VERIFY_PARAM_NOT_NULL(p_pk_own); // Generate LESC OOB data. err_code = sd_ble_gap_lesc_oob_data_get(BLE_CONN_HANDLE_INVALID, p_pk_own, &m_ble_lesc_oob_data); VERIFY_SUCCESS(err_code); // Update NDEF message with new LESC OOB data. err_code = nfc_lesc_data_update(&m_ble_lesc_oob_data); VERIFY_SUCCESS(err_code); } return NRF_SUCCESS; } /** * @brief Function for handling BLE events. * * @param[in] p_ble_evt Event received from the BLE stack. * @param[in] p_context Context. */ static void ble_evt_handler(ble_evt_t const * p_ble_evt, void * p_context) { ret_code_t err_code = NRF_SUCCESS; switch (p_ble_evt->header.evt_id) { // Upon authorization key request, reply with Temporary Key that was read from the NFC tag case BLE_GAP_EVT_AUTH_KEY_REQUEST: NRF_LOG_DEBUG("BLE_GAP_EVT_AUTH_KEY_REQUEST"); err_code = sd_ble_gap_auth_key_reply(p_ble_evt->evt.gap_evt.conn_handle, BLE_GAP_AUTH_KEY_TYPE_OOB, m_oob_auth_key.tk); APP_ERROR_CHECK(err_code); break; // Upon LESC Diffie_Hellman key request, reply with key computed from device secret key and peer public key case BLE_GAP_EVT_LESC_DHKEY_REQUEST: NRF_LOG_DEBUG("BLE_GAP_EVT_LESC_DHKEY_REQUEST"); // If LESC OOB pairing is on, perform authentication with OOB data if (p_ble_evt->evt.gap_evt.params.lesc_dhkey_request.oobd_req) { err_code = sd_ble_gap_lesc_oob_data_set(p_ble_evt->evt.gap_evt.conn_handle, &m_ble_lesc_oob_data, NULL); APP_ERROR_CHECK(err_code); } break; case BLE_GAP_EVT_CONNECTED: m_connections++; break; case BLE_GAP_EVT_DISCONNECTED: m_connections--; // Intentional fallthrough. case BLE_GAP_EVT_AUTH_STATUS: // Generate new LESC key pair and OOB data if ((m_pairing_mode == NFC_PAIRING_MODE_LESC_OOB) || (m_pairing_mode == NFC_PAIRING_MODE_LESC_JUST_WORKS) || (m_pairing_mode == NFC_PAIRING_MODE_GENERIC_OOB)) { err_code = generate_lesc_keys(); if (err_code != NRF_ERROR_BUSY) { APP_ERROR_CHECK(err_code); } } break; default: break; } } ret_code_t nfc_ble_pair_on_pm_params_req(pm_evt_t const * p_evt) { ret_code_t err_code = NRF_SUCCESS; NRF_LOG_DEBUG("PM_EVT_CONN_SEC_PARAMS_REQ"); // Dynamic security parameters changes are needed only // by NFC_PAIRING_MODE_GENERIC_OOB pairing mode. if (m_pairing_mode == NFC_PAIRING_MODE_GENERIC_OOB) { // Check if pointer to the Peer Manager event is not NULL. VERIFY_PARAM_NOT_NULL(p_evt); // Set up proper MITM, OOB and LESC flags depending on peer LESC flag // to support either Legacy OOB or LESC OOB pairing mode. if (p_evt->params.conn_sec_params_req.p_peer_params->lesc) { NRF_LOG_DEBUG("LESC OOB mode flags set."); m_sec_param.mitm = 0; m_sec_param.oob = 0; m_sec_param.lesc = 1; } else { NRF_LOG_DEBUG("Legacy OOB mode flags set."); m_sec_param.mitm = 1; m_sec_param.oob = 1; m_sec_param.lesc = 0; } // Reply with new security parameters to the Peer Manager. err_code = pm_conn_sec_params_reply(p_evt->conn_handle, &m_sec_param, p_evt->params.conn_sec_params_req.p_context); } return err_code; } #endif // NRF_MODULE_ENABLED(NFC_BLE_PAIR_LIB)
12,252
368
<reponame>irlanrobson/bounce<filename>include/bounce/common/graphics/color.h /* * Copyright (c) 2016-2019 <NAME> * * This software is provided 'as-is', without any express or implied * warranty. In no event will the authors be held liable for any damages * arising from the use of this software. * Permission is granted to anyone to use this software for any purpose, * including commercial applications, and to alter it and redistribute it * freely, subject to the following restrictions: * 1. The origin of this software must not be misrepresented; you must not * claim that you wrote the original software. If you use this software * in a product, an acknowledgment in the product documentation would be * appreciated but is not required. * 2. Altered source versions must be plainly marked as such, and must not be * misrepresented as being the original software. * 3. This notice may not be removed or altered from any source distribution. */ #ifndef B3_COLOR_H #define B3_COLOR_H #include <bounce/common/settings.h> // Color channels. struct b3Color { // Default constructor does nothing for performance. b3Color() { } // Construct this color from four components. b3Color(scalar R, scalar G, scalar B, scalar A = scalar(1)) : r(R), g(G), b(B), a(A) { } // Set this color from four components. void Set(scalar R, scalar G, scalar B, scalar A) { r = R; g = G; b = B; a = A; } scalar r, g, b, a; }; // Color pallete. extern const b3Color b3Color_black; extern const b3Color b3Color_white; extern const b3Color b3Color_red; extern const b3Color b3Color_green; extern const b3Color b3Color_blue; extern const b3Color b3Color_yellow; extern const b3Color b3Color_pink; extern const b3Color b3Color_gray; #endif
559
14,668
// Copyright 2021 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. package org.chromium.android_webview; import org.chromium.android_webview.common.SafeModeAction; import org.chromium.android_webview.variations.VariationsSeedSafeModeAction; /** * Exposes the SafeModeActions supported by the browser process. */ public final class BrowserSafeModeActionList { // Do not instantiate this class. private BrowserSafeModeActionList() {} /** * A list of SafeModeActions supported in the browser process. The set of actions to be * executed will be specified by the nonembedded SafeModeService, however each action (if * specified by the service) will be executed in the order listed below. */ public static final SafeModeAction[] sList = { new VariationsSeedSafeModeAction(), }; }
273
2,208
<filename>pybrain/optimization/populationbased/coevolution/__init__.py from .coevolution import Coevolution from .competitivecoevolution import CompetitiveCoevolution from .multipopulationcoevolution import MultiPopulationCoevolution
58
2,727
<reponame>jiangkang/ndk-samples<gh_stars>1000+ /** * Copyright 2020 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <fcntl.h> #include <jni.h> #include <iomanip> #include <sstream> #include <string> #include <android/asset_manager_jni.h> #include <android/log.h> #include <android/sharedmem.h> #include <sys/mman.h> #include "sequence_model.h" extern "C" JNIEXPORT jlong JNICALL Java_com_example_android_sequence_MainActivity_initModel( JNIEnv* env, jobject /* this */, jfloat ratio) { auto model = SimpleSequenceModel::Create(ratio); if (model == nullptr) { __android_log_print(ANDROID_LOG_ERROR, LOG_TAG, "Failed to create the model."); return 0; } return (jlong) (uintptr_t) model.release(); } extern "C" JNIEXPORT jfloat JNICALL Java_com_example_android_sequence_MainActivity_compute( JNIEnv* env, jobject /* this */, jfloat initialValue, jint steps, jlong _nnModel) { SimpleSequenceModel* nn_model = (SimpleSequenceModel*) _nnModel; float result = 0.0f; nn_model->Compute(initialValue, static_cast<uint32_t>(steps), &result); return result; } extern "C" JNIEXPORT void JNICALL Java_com_example_android_sequence_MainActivity_destroyModel( JNIEnv* env, jobject /* this */, jlong _nnModel) { SimpleSequenceModel* nn_model = (SimpleSequenceModel*) _nnModel; delete (nn_model); }
704
516
<reponame>XinXianChen/xpocket package com.perfma.xlab.xpocket.plugin.loader; import com.perfma.xlab.xpocket.plugin.base.NamedObject; import com.perfma.xlab.xpocket.plugin.context.FrameworkPluginContext; import java.lang.instrument.Instrumentation; import java.util.Set; /** * @author gongyu <<EMAIL>> */ public interface PluginLoader extends NamedObject { /** * Load plugins and build it into FrameworkPluginContexts * * @param resouceName 如:META-INF/xpocket.def * @return */ @Deprecated default boolean loadPlugins(String resouceName){ throw new UnsupportedOperationException("It`s default implementation " + "for loadPlugins(String resouceName),what means your provider " + "never implemented this interface. "); } /** * Load plugins and build it into FrameworkPluginContexts * * @param resouceName 如:META-INF/xpocket.def * @param isOnLoad is onLoad or onAttach when agent mode * @param inst Instrumentation instance of Java Agent * @return */ default boolean loadPlugins(String resouceName,boolean isOnLoad,Instrumentation inst){ throw new UnsupportedOperationException("It`s default implementation " + "for loadPlugins(String resouceName,boolean isOnLoad),what " + "means your provider never implemented this interface. "); } /** * Gets a list of available plug-ins * * @return */ Set<FrameworkPluginContext> getAvailablePlugins(); /** * Get all plugins * * @return */ Set<FrameworkPluginContext> getAllPlugins(); /** * Get the plug-in by its name and namespace * * @param name * @param namespace * @return */ FrameworkPluginContext getPlugin(String name, String namespace); /** * Add the plugin * * @param pluginContext */ void addPlugin(FrameworkPluginContext pluginContext); }
768
381
{ "name": "livewire-alert", "version": "2.1.9", "description": "This package provides a simple alert utilities for your livewire components", "main": "resources/js/index.js", "repository": "<EMAIL>:jantinnerezo/livewire-alert.git", "author": "<NAME> <<EMAIL>>", "license": "MIT", "scripts": { "dev": "npm run development", "development": "mix", "watch": "mix watch", "watch-poll": "mix watch -- --watch-options-poll=1000", "hot": "mix watch --hot", "prod": "npm run production", "production": "mix --production" }, "dependencies": { "laravel-mix": "^6.0.37" } }
238
634
<reponame>halotroop2288/consulo /* * Copyright 2000-2017 JetBrains s.r.o. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.intellij.util.io; import com.intellij.openapi.util.ThrowableComputable; import com.intellij.openapi.util.io.DataInputOutputUtilRt; import com.intellij.util.ThrowableConsumer; import javax.annotation.Nonnull; import javax.annotation.Nullable; import java.io.DataInput; import java.io.DataOutput; import java.io.IOException; import java.nio.ByteBuffer; /** * @author max */ @SuppressWarnings("MethodOverridesStaticMethodOfSuperclass") public class DataInputOutputUtil extends DataInputOutputUtilRt { public static final long timeBase = 33L * 365L * 24L * 3600L * 1000L; private DataInputOutputUtil() { } public static int readINT(@Nonnull DataInput record) throws IOException { return DataInputOutputUtilRt.readINT(record); } public static int readINT(@Nonnull ByteBuffer byteBuffer) { return DataInputOutputUtilRt.readINT(byteBuffer); } public static void writeINT(@Nonnull DataOutput record, int val) throws IOException { DataInputOutputUtilRt.writeINT(record, val); } public static void writeINT(@Nonnull ByteBuffer byteBuffer, int val) { DataInputOutputUtilRt.writeINT(byteBuffer, val); } public static long readLONG(@Nonnull DataInput record) throws IOException { final int val = record.readUnsignedByte(); if (val < 192) { return val; } long res = val - 192; for (int sh = 6; ; sh += 7) { int next = record.readUnsignedByte(); res |= (long)(next & 0x7F) << sh; if ((next & 0x80) == 0) { return res; } } } public static void writeLONG(@Nonnull DataOutput record, long val) throws IOException { if (0 > val || val >= 192) { record.writeByte(192 + (int)(val & 0x3F)); val >>>= 6; while (val >= 128) { record.writeByte((int)(val & 0x7F) | 0x80); val >>>= 7; } } record.writeByte((int)val); } public static int readSINT(@Nonnull DataInput record) throws IOException { return readINT(record) - 64; } public static void writeSINT(@Nonnull DataOutput record, int val) throws IOException { writeINT(record, val + 64); } public static void writeTIME(@Nonnull DataOutput record, long timestamp) throws IOException { long relStamp = timestamp - timeBase; if (relStamp < 0 || relStamp >= 0xFF00000000L) { record.writeByte(255); record.writeLong(timestamp); } else { record.writeByte((int)(relStamp >> 32)); record.writeByte((int)(relStamp >> 24)); record.writeByte((int)(relStamp >> 16)); record.writeByte((int)(relStamp >> 8)); record.writeByte((int)(relStamp)); } } public static long readTIME(@Nonnull DataInput record) throws IOException { final int first = record.readUnsignedByte(); if (first == 255) { return record.readLong(); } else { final int second = record.readUnsignedByte(); final int third = record.readUnsignedByte() << 16; final int fourth = record.readUnsignedByte() << 8; final int fifth = record.readUnsignedByte(); return ((((long)((first << 8) | second)) << 24) | (third | fourth | fifth)) + timeBase; } } /** * Writes the given (possibly null) element to the output using the given procedure to write the element if it's not null. * Should be coupled with {@link #readNullable} */ public static <T> void writeNullable(@Nonnull DataOutput out, @Nullable T value, @Nonnull ThrowableConsumer<T, IOException> writeValue) throws IOException { out.writeBoolean(value != null); if (value != null) writeValue.consume(value); } /** * Reads an element from the stream, using the given function to read it when a not-null element is expected, or returns null otherwise. * Should be coupled with {@link #writeNullable} */ @Nullable public static <T> T readNullable(@Nonnull DataInput in, @Nonnull ThrowableComputable<T, IOException> readValue) throws IOException { return in.readBoolean() ? readValue.compute() : null; } }
1,588
1,062
<reponame>larkov/MailTrackerBlocker // // Generated by class-dump 3.5b1 (64 bit) (Debug version compiled Dec 3 2019 19:59:57). // // Copyright (C) 1997-2019 <NAME>. // #import "_MTMStoredMetaMailbox.h" @interface _MTMDraftsMailbox : _MTMStoredMetaMailbox { } - (id)severalMessagesFormatString; // IMP=0x000000010023d802 - (id)oneMessageFormatString; // IMP=0x000000010023d799 - (id)defaultMessageColumnAttributesKey; // IMP=0x000000010023d77a - (id)messageColumnConfiguration; // IMP=0x000000010023d75b - (id)restoreMode; // IMP=0x000000010023d747 - (id)recoverDisplayName; // IMP=0x000000010023d6da - (id)displayName; // IMP=0x000000010023d66d @end
262
2,707
<reponame>ronaldocan/jetlinks-community package org.jetlinks.community.device.enums; import com.alibaba.fastjson.annotation.JSONField; import lombok.AllArgsConstructor; import lombok.Getter; import org.hswebframework.web.dict.EnumDict; import org.jetlinks.core.message.DeviceMessage; import org.jetlinks.core.message.MessageType; import java.util.EnumMap; import java.util.Map; import java.util.Optional; @AllArgsConstructor @Getter public enum DeviceLogType implements EnumDict<String> { event("事件上报"), readProperty("读取属性"), writeProperty("修改属性"), writePropertyReply("修改属性回复"), reportProperty("属性上报"), readPropertyReply("读取属性回复"), child("子设备消息"), childReply("子设备消息回复"), functionInvoke("调用功能"), functionReply("调用功能回复"), register("设备注册"), unregister("设备注销"), log("日志"), tag("标签更新"), offline("离线"), online("上线"), other("其它"); @JSONField(serialize = false) private final String text; @Override public String getValue() { return name(); } private final static Map<MessageType, DeviceLogType> typeMapping = new EnumMap<>(MessageType.class); static { typeMapping.put(MessageType.EVENT, event); typeMapping.put(MessageType.ONLINE, online); typeMapping.put(MessageType.OFFLINE, offline); typeMapping.put(MessageType.CHILD, child); typeMapping.put(MessageType.CHILD_REPLY, childReply); typeMapping.put(MessageType.LOG, log); typeMapping.put(MessageType.UPDATE_TAG, tag); typeMapping.put(MessageType.REPORT_PROPERTY, reportProperty); typeMapping.put(MessageType.READ_PROPERTY, readProperty); typeMapping.put(MessageType.READ_PROPERTY_REPLY, readPropertyReply); typeMapping.put(MessageType.INVOKE_FUNCTION, functionInvoke); typeMapping.put(MessageType.INVOKE_FUNCTION_REPLY, functionReply); typeMapping.put(MessageType.WRITE_PROPERTY, writeProperty); typeMapping.put(MessageType.WRITE_PROPERTY_REPLY, writePropertyReply); typeMapping.put(MessageType.REGISTER, register); typeMapping.put(MessageType.UN_REGISTER, unregister); } public static DeviceLogType of(DeviceMessage message) { return Optional.ofNullable(typeMapping.get(message.getMessageType())).orElse(DeviceLogType.other); } // @Override // public Object getWriteJSONObject() { // return getValue(); // } }
1,062
1,444
<filename>Mage.Sets/src/mage/cards/p/PredatorDragon.java<gh_stars>1000+ package mage.cards.p; import java.util.UUID; import mage.MageInt; import mage.abilities.effects.common.DevourEffect.DevourFactor; import mage.abilities.keyword.DevourAbility; import mage.abilities.keyword.FlyingAbility; import mage.abilities.keyword.HasteAbility; import mage.cards.CardImpl; import mage.cards.CardSetInfo; import mage.constants.CardType; import mage.constants.SubType; /** * * @author LevelX2 */ public final class PredatorDragon extends CardImpl { public PredatorDragon(UUID ownerId, CardSetInfo setInfo) { super(ownerId,setInfo,new CardType[]{CardType.CREATURE},"{3}{R}{R}{R}"); this.subtype.add(SubType.DRAGON); this.power = new MageInt(4); this.toughness = new MageInt(4); // Flying, haste this.addAbility(FlyingAbility.getInstance()); this.addAbility(HasteAbility.getInstance()); // Devour 2 (As this enters the battlefield, you may sacrifice any number of creatures. This creature enters the battlefield with twice that many +1/+1 counters on it.) this.addAbility(new DevourAbility(DevourFactor.Devour2)); } private PredatorDragon(final PredatorDragon card) { super(card); } @Override public PredatorDragon copy() { return new PredatorDragon(this); } }
488
2,151
/* * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved. * * Use of this source code is governed by a BSD-style license * that can be found in the LICENSE file in the root of the source * tree. An additional intellectual property rights grant can be found * in the file PATENTS. All contributing project authors may * be found in the AUTHORS file in the root of the source tree. */ #include "modules/audio_processing/aec3/clockdrift_detector.h" namespace webrtc { ClockdriftDetector::ClockdriftDetector() : level_(Level::kNone), stability_counter_(0) { delay_history_.fill(0); } ClockdriftDetector::~ClockdriftDetector() = default; void ClockdriftDetector::Update(int delay_estimate) { if (delay_estimate == delay_history_[0]) { // Reset clockdrift level if delay estimate is stable for 7500 blocks (30 // seconds). if (++stability_counter_ > 7500) level_ = Level::kNone; return; } stability_counter_ = 0; const int d1 = delay_history_[0] - delay_estimate; const int d2 = delay_history_[1] - delay_estimate; const int d3 = delay_history_[2] - delay_estimate; // Patterns recognized as positive clockdrift: // [x-3], x-2, x-1, x. // [x-3], x-1, x-2, x. const bool probable_drift_up = (d1 == -1 && d2 == -2) || (d1 == -2 && d2 == -1); const bool drift_up = probable_drift_up && d3 == -3; // Patterns recognized as negative clockdrift: // [x+3], x+2, x+1, x. // [x+3], x+1, x+2, x. const bool probable_drift_down = (d1 == 1 && d2 == 2) || (d1 == 2 && d2 == 1); const bool drift_down = probable_drift_down && d3 == 3; // Set clockdrift level. if (drift_up || drift_down) { level_ = Level::kVerified; } else if ((probable_drift_up || probable_drift_down) && level_ == Level::kNone) { level_ = Level::kProbable; } // Shift delay history one step. delay_history_[2] = delay_history_[1]; delay_history_[1] = delay_history_[0]; delay_history_[0] = delay_estimate; } } // namespace webrtc
749
1,192
//===-- RegisterPressure.cpp - Dynamic Register Pressure ------------------===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file implements the RegisterPressure class which can be used to track // MachineInstr level register pressure. // //===----------------------------------------------------------------------===// #include "llvm/CodeGen/RegisterPressure.h" #include "llvm/CodeGen/LiveInterval.h" #include "llvm/CodeGen/LiveIntervalAnalysis.h" #include "llvm/CodeGen/MachineRegisterInfo.h" #include "llvm/CodeGen/RegisterClassInfo.h" #include "llvm/Support/Debug.h" #include "llvm/Support/raw_ostream.h" using namespace llvm; /// Increase pressure for each pressure set provided by TargetRegisterInfo. static void increaseSetPressure(std::vector<unsigned> &CurrSetPressure, PSetIterator PSetI) { unsigned Weight = PSetI.getWeight(); for (; PSetI.isValid(); ++PSetI) CurrSetPressure[*PSetI] += Weight; } /// Decrease pressure for each pressure set provided by TargetRegisterInfo. static void decreaseSetPressure(std::vector<unsigned> &CurrSetPressure, PSetIterator PSetI) { unsigned Weight = PSetI.getWeight(); for (; PSetI.isValid(); ++PSetI) { assert(CurrSetPressure[*PSetI] >= Weight && "register pressure underflow"); CurrSetPressure[*PSetI] -= Weight; } } LLVM_DUMP_METHOD void llvm::dumpRegSetPressure(ArrayRef<unsigned> SetPressure, const TargetRegisterInfo *TRI) { bool Empty = true; for (unsigned i = 0, e = SetPressure.size(); i < e; ++i) { if (SetPressure[i] != 0) { dbgs() << TRI->getRegPressureSetName(i) << "=" << SetPressure[i] << '\n'; Empty = false; } } if (Empty) dbgs() << "\n"; } LLVM_DUMP_METHOD void RegisterPressure::dump(const TargetRegisterInfo *TRI) const { dbgs() << "Max Pressure: "; dumpRegSetPressure(MaxSetPressure, TRI); dbgs() << "Live In: "; for (unsigned i = 0, e = LiveInRegs.size(); i < e; ++i) dbgs() << PrintVRegOrUnit(LiveInRegs[i], TRI) << " "; dbgs() << '\n'; dbgs() << "Live Out: "; for (unsigned i = 0, e = LiveOutRegs.size(); i < e; ++i) dbgs() << PrintVRegOrUnit(LiveOutRegs[i], TRI) << " "; dbgs() << '\n'; } LLVM_DUMP_METHOD void RegPressureTracker::dump() const { if (!isTopClosed() || !isBottomClosed()) { dbgs() << "Curr Pressure: "; dumpRegSetPressure(CurrSetPressure, TRI); } P.dump(TRI); } void PressureDiff::dump(const TargetRegisterInfo &TRI) const { for (const PressureChange &Change : *this) { if (!Change.isValid() || Change.getUnitInc() == 0) continue; dbgs() << " " << TRI.getRegPressureSetName(Change.getPSet()) << " " << Change.getUnitInc(); } dbgs() << '\n'; } /// Increase the current pressure as impacted by these registers and bump /// the high water mark if needed. void RegPressureTracker::increaseRegPressure(ArrayRef<unsigned> RegUnits) { for (unsigned i = 0, e = RegUnits.size(); i != e; ++i) { PSetIterator PSetI = MRI->getPressureSets(RegUnits[i]); unsigned Weight = PSetI.getWeight(); for (; PSetI.isValid(); ++PSetI) { CurrSetPressure[*PSetI] += Weight; if (CurrSetPressure[*PSetI] > P.MaxSetPressure[*PSetI]) { P.MaxSetPressure[*PSetI] = CurrSetPressure[*PSetI]; } } } } /// Simply decrease the current pressure as impacted by these registers. void RegPressureTracker::decreaseRegPressure(ArrayRef<unsigned> RegUnits) { for (unsigned I = 0, E = RegUnits.size(); I != E; ++I) decreaseSetPressure(CurrSetPressure, MRI->getPressureSets(RegUnits[I])); } /// Clear the result so it can be used for another round of pressure tracking. void IntervalPressure::reset() { TopIdx = BottomIdx = SlotIndex(); MaxSetPressure.clear(); LiveInRegs.clear(); LiveOutRegs.clear(); } /// Clear the result so it can be used for another round of pressure tracking. void RegionPressure::reset() { TopPos = BottomPos = MachineBasicBlock::const_iterator(); MaxSetPressure.clear(); LiveInRegs.clear(); LiveOutRegs.clear(); } /// If the current top is not less than or equal to the next index, open it. /// We happen to need the SlotIndex for the next top for pressure update. void IntervalPressure::openTop(SlotIndex NextTop) { if (TopIdx <= NextTop) return; TopIdx = SlotIndex(); LiveInRegs.clear(); } /// If the current top is the previous instruction (before receding), open it. void RegionPressure::openTop(MachineBasicBlock::const_iterator PrevTop) { if (TopPos != PrevTop) return; TopPos = MachineBasicBlock::const_iterator(); LiveInRegs.clear(); } /// If the current bottom is not greater than the previous index, open it. void IntervalPressure::openBottom(SlotIndex PrevBottom) { if (BottomIdx > PrevBottom) return; BottomIdx = SlotIndex(); LiveInRegs.clear(); } /// If the current bottom is the previous instr (before advancing), open it. void RegionPressure::openBottom(MachineBasicBlock::const_iterator PrevBottom) { if (BottomPos != PrevBottom) return; BottomPos = MachineBasicBlock::const_iterator(); LiveInRegs.clear(); } const LiveRange *RegPressureTracker::getLiveRange(unsigned Reg) const { if (TargetRegisterInfo::isVirtualRegister(Reg)) return &LIS->getInterval(Reg); return LIS->getCachedRegUnit(Reg); } void RegPressureTracker::reset() { MBB = nullptr; LIS = nullptr; CurrSetPressure.clear(); LiveThruPressure.clear(); P.MaxSetPressure.clear(); if (RequireIntervals) static_cast<IntervalPressure&>(P).reset(); else static_cast<RegionPressure&>(P).reset(); LiveRegs.PhysRegs.clear(); LiveRegs.VirtRegs.clear(); UntiedDefs.clear(); } /// Setup the RegPressureTracker. /// /// TODO: Add support for pressure without LiveIntervals. void RegPressureTracker::init(const MachineFunction *mf, const RegisterClassInfo *rci, const LiveIntervals *lis, const MachineBasicBlock *mbb, MachineBasicBlock::const_iterator pos, bool ShouldTrackUntiedDefs) { reset(); MF = mf; TRI = MF->getSubtarget().getRegisterInfo(); RCI = rci; MRI = &MF->getRegInfo(); MBB = mbb; TrackUntiedDefs = ShouldTrackUntiedDefs; if (RequireIntervals) { assert(lis && "IntervalPressure requires LiveIntervals"); LIS = lis; } CurrPos = pos; CurrSetPressure.assign(TRI->getNumRegPressureSets(), 0); P.MaxSetPressure = CurrSetPressure; LiveRegs.PhysRegs.setUniverse(TRI->getNumRegs()); LiveRegs.VirtRegs.setUniverse(MRI->getNumVirtRegs()); if (TrackUntiedDefs) UntiedDefs.setUniverse(MRI->getNumVirtRegs()); } /// Does this pressure result have a valid top position and live ins. bool RegPressureTracker::isTopClosed() const { if (RequireIntervals) return static_cast<IntervalPressure&>(P).TopIdx.isValid(); return (static_cast<RegionPressure&>(P).TopPos == MachineBasicBlock::const_iterator()); } /// Does this pressure result have a valid bottom position and live outs. bool RegPressureTracker::isBottomClosed() const { if (RequireIntervals) return static_cast<IntervalPressure&>(P).BottomIdx.isValid(); return (static_cast<RegionPressure&>(P).BottomPos == MachineBasicBlock::const_iterator()); } SlotIndex RegPressureTracker::getCurrSlot() const { MachineBasicBlock::const_iterator IdxPos = CurrPos; while (IdxPos != MBB->end() && IdxPos->isDebugValue()) ++IdxPos; if (IdxPos == MBB->end()) return LIS->getMBBEndIdx(MBB); return LIS->getInstructionIndex(IdxPos).getRegSlot(); } /// Set the boundary for the top of the region and summarize live ins. void RegPressureTracker::closeTop() { if (RequireIntervals) static_cast<IntervalPressure&>(P).TopIdx = getCurrSlot(); else static_cast<RegionPressure&>(P).TopPos = CurrPos; assert(P.LiveInRegs.empty() && "inconsistent max pressure result"); P.LiveInRegs.reserve(LiveRegs.PhysRegs.size() + LiveRegs.VirtRegs.size()); P.LiveInRegs.append(LiveRegs.PhysRegs.begin(), LiveRegs.PhysRegs.end()); for (SparseSet<unsigned>::const_iterator I = LiveRegs.VirtRegs.begin(), E = LiveRegs.VirtRegs.end(); I != E; ++I) P.LiveInRegs.push_back(*I); std::sort(P.LiveInRegs.begin(), P.LiveInRegs.end()); P.LiveInRegs.erase(std::unique(P.LiveInRegs.begin(), P.LiveInRegs.end()), P.LiveInRegs.end()); } /// Set the boundary for the bottom of the region and summarize live outs. void RegPressureTracker::closeBottom() { if (RequireIntervals) static_cast<IntervalPressure&>(P).BottomIdx = getCurrSlot(); else static_cast<RegionPressure&>(P).BottomPos = CurrPos; assert(P.LiveOutRegs.empty() && "inconsistent max pressure result"); P.LiveOutRegs.reserve(LiveRegs.PhysRegs.size() + LiveRegs.VirtRegs.size()); P.LiveOutRegs.append(LiveRegs.PhysRegs.begin(), LiveRegs.PhysRegs.end()); for (SparseSet<unsigned>::const_iterator I = LiveRegs.VirtRegs.begin(), E = LiveRegs.VirtRegs.end(); I != E; ++I) P.LiveOutRegs.push_back(*I); std::sort(P.LiveOutRegs.begin(), P.LiveOutRegs.end()); P.LiveOutRegs.erase(std::unique(P.LiveOutRegs.begin(), P.LiveOutRegs.end()), P.LiveOutRegs.end()); } /// Finalize the region boundaries and record live ins and live outs. void RegPressureTracker::closeRegion() { if (!isTopClosed() && !isBottomClosed()) { assert(LiveRegs.PhysRegs.empty() && LiveRegs.VirtRegs.empty() && "no region boundary"); return; } if (!isBottomClosed()) closeBottom(); else if (!isTopClosed()) closeTop(); // If both top and bottom are closed, do nothing. } /// The register tracker is unaware of global liveness so ignores normal /// live-thru ranges. However, two-address or coalesced chains can also lead /// to live ranges with no holes. Count these to inform heuristics that we /// can never drop below this pressure. void RegPressureTracker::initLiveThru(const RegPressureTracker &RPTracker) { LiveThruPressure.assign(TRI->getNumRegPressureSets(), 0); assert(isBottomClosed() && "need bottom-up tracking to intialize."); for (unsigned i = 0, e = P.LiveOutRegs.size(); i < e; ++i) { unsigned Reg = P.LiveOutRegs[i]; if (TargetRegisterInfo::isVirtualRegister(Reg) && !RPTracker.hasUntiedDef(Reg)) { increaseSetPressure(LiveThruPressure, MRI->getPressureSets(Reg)); } } } /// \brief Convenient wrapper for checking membership in RegisterOperands. /// (std::count() doesn't have an early exit). static bool containsReg(ArrayRef<unsigned> RegUnits, unsigned RegUnit) { return std::find(RegUnits.begin(), RegUnits.end(), RegUnit) != RegUnits.end(); } namespace { /// Collect this instruction's unique uses and defs into SmallVectors for /// processing defs and uses in order. /// /// FIXME: always ignore tied opers class RegisterOperands { const TargetRegisterInfo *TRI; const MachineRegisterInfo *MRI; bool IgnoreDead; public: SmallVector<unsigned, 8> Uses; SmallVector<unsigned, 8> Defs; SmallVector<unsigned, 8> DeadDefs; RegisterOperands(const TargetRegisterInfo *tri, const MachineRegisterInfo *mri, bool ID = false): TRI(tri), MRI(mri), IgnoreDead(ID) {} /// Push this operand's register onto the correct vector. void collect(const MachineOperand &MO) { if (!MO.isReg() || !MO.getReg()) return; if (MO.readsReg()) pushRegUnits(MO.getReg(), Uses); if (MO.isDef()) { if (MO.isDead()) { if (!IgnoreDead) pushRegUnits(MO.getReg(), DeadDefs); } else pushRegUnits(MO.getReg(), Defs); } } protected: void pushRegUnits(unsigned Reg, SmallVectorImpl<unsigned> &RegUnits) { if (TargetRegisterInfo::isVirtualRegister(Reg)) { if (containsReg(RegUnits, Reg)) return; RegUnits.push_back(Reg); } else if (MRI->isAllocatable(Reg)) { for (MCRegUnitIterator Units(Reg, TRI); Units.isValid(); ++Units) { if (containsReg(RegUnits, *Units)) continue; RegUnits.push_back(*Units); } } } }; } // namespace /// Collect physical and virtual register operands. static void collectOperands(const MachineInstr *MI, RegisterOperands &RegOpers) { for (ConstMIBundleOperands OperI(MI); OperI.isValid(); ++OperI) RegOpers.collect(*OperI); // Remove redundant physreg dead defs. SmallVectorImpl<unsigned>::iterator I = std::remove_if(RegOpers.DeadDefs.begin(), RegOpers.DeadDefs.end(), std::bind1st(std::ptr_fun(containsReg), RegOpers.Defs)); RegOpers.DeadDefs.erase(I, RegOpers.DeadDefs.end()); } /// Initialize an array of N PressureDiffs. void PressureDiffs::init(unsigned N) { Size = N; if (N <= Max) { memset(PDiffArray, 0, N * sizeof(PressureDiff)); return; } Max = Size; // HLSL Change Begin: Use overridable operator new/delete delete[] PDiffArray; PDiffArray = new PressureDiff[N]; std::memset(PDiffArray, 0, N * sizeof(PressureDiff)); // HLSL Change End } /// Add a change in pressure to the pressure diff of a given instruction. void PressureDiff::addPressureChange(unsigned RegUnit, bool IsDec, const MachineRegisterInfo *MRI) { PSetIterator PSetI = MRI->getPressureSets(RegUnit); int Weight = IsDec ? -PSetI.getWeight() : PSetI.getWeight(); for (; PSetI.isValid(); ++PSetI) { // Find an existing entry in the pressure diff for this PSet. PressureDiff::iterator I = begin(), E = end(); for (; I != E && I->isValid(); ++I) { if (I->getPSet() >= *PSetI) break; } // If all pressure sets are more constrained, skip the remaining PSets. if (I == E) break; // Insert this PressureChange. if (!I->isValid() || I->getPSet() != *PSetI) { PressureChange PTmp = PressureChange(*PSetI); for (PressureDiff::iterator J = I; J != E && PTmp.isValid(); ++J) std::swap(*J,PTmp); } // Update the units for this pressure set. I->setUnitInc(I->getUnitInc() + Weight); } } /// Record the pressure difference induced by the given operand list. static void collectPDiff(PressureDiff &PDiff, RegisterOperands &RegOpers, const MachineRegisterInfo *MRI) { assert(!PDiff.begin()->isValid() && "stale PDiff"); for (unsigned i = 0, e = RegOpers.Defs.size(); i != e; ++i) PDiff.addPressureChange(RegOpers.Defs[i], true, MRI); for (unsigned i = 0, e = RegOpers.Uses.size(); i != e; ++i) PDiff.addPressureChange(RegOpers.Uses[i], false, MRI); } /// Force liveness of registers. void RegPressureTracker::addLiveRegs(ArrayRef<unsigned> Regs) { for (unsigned i = 0, e = Regs.size(); i != e; ++i) { if (LiveRegs.insert(Regs[i])) increaseRegPressure(Regs[i]); } } /// Add Reg to the live in set and increase max pressure. void RegPressureTracker::discoverLiveIn(unsigned Reg) { assert(!LiveRegs.contains(Reg) && "avoid bumping max pressure twice"); if (containsReg(P.LiveInRegs, Reg)) return; // At live in discovery, unconditionally increase the high water mark. P.LiveInRegs.push_back(Reg); increaseSetPressure(P.MaxSetPressure, MRI->getPressureSets(Reg)); } /// Add Reg to the live out set and increase max pressure. void RegPressureTracker::discoverLiveOut(unsigned Reg) { assert(!LiveRegs.contains(Reg) && "avoid bumping max pressure twice"); if (containsReg(P.LiveOutRegs, Reg)) return; // At live out discovery, unconditionally increase the high water mark. P.LiveOutRegs.push_back(Reg); increaseSetPressure(P.MaxSetPressure, MRI->getPressureSets(Reg)); } /// Recede across the previous instruction. If LiveUses is provided, record any /// RegUnits that are made live by the current instruction's uses. This includes /// registers that are both defined and used by the instruction. If a pressure /// difference pointer is provided record the changes is pressure caused by this /// instruction independent of liveness. bool RegPressureTracker::recede(SmallVectorImpl<unsigned> *LiveUses, PressureDiff *PDiff) { // Check for the top of the analyzable region. if (CurrPos == MBB->begin()) { closeRegion(); return false; } if (!isBottomClosed()) closeBottom(); // Open the top of the region using block iterators. if (!RequireIntervals && isTopClosed()) static_cast<RegionPressure&>(P).openTop(CurrPos); // Find the previous instruction. do --CurrPos; while (CurrPos != MBB->begin() && CurrPos->isDebugValue()); if (CurrPos->isDebugValue()) { closeRegion(); return false; } SlotIndex SlotIdx; if (RequireIntervals) SlotIdx = LIS->getInstructionIndex(CurrPos).getRegSlot(); // Open the top of the region using slot indexes. if (RequireIntervals && isTopClosed()) static_cast<IntervalPressure&>(P).openTop(SlotIdx); RegisterOperands RegOpers(TRI, MRI); collectOperands(CurrPos, RegOpers); if (PDiff) collectPDiff(*PDiff, RegOpers, MRI); // Boost pressure for all dead defs together. increaseRegPressure(RegOpers.DeadDefs); decreaseRegPressure(RegOpers.DeadDefs); // Kill liveness at live defs. // TODO: consider earlyclobbers? for (unsigned i = 0, e = RegOpers.Defs.size(); i < e; ++i) { unsigned Reg = RegOpers.Defs[i]; bool DeadDef = false; if (RequireIntervals) { const LiveRange *LR = getLiveRange(Reg); if (LR) { LiveQueryResult LRQ = LR->Query(SlotIdx); DeadDef = LRQ.isDeadDef(); } } if (DeadDef) { // LiveIntervals knows this is a dead even though it's MachineOperand is // not flagged as such. Since this register will not be recorded as // live-out, increase its PDiff value to avoid underflowing pressure. if (PDiff) PDiff->addPressureChange(Reg, false, MRI); } else { if (LiveRegs.erase(Reg)) decreaseRegPressure(Reg); else discoverLiveOut(Reg); } } // Generate liveness for uses. for (unsigned i = 0, e = RegOpers.Uses.size(); i < e; ++i) { unsigned Reg = RegOpers.Uses[i]; if (!LiveRegs.contains(Reg)) { // Adjust liveouts if LiveIntervals are available. if (RequireIntervals) { const LiveRange *LR = getLiveRange(Reg); if (LR) { LiveQueryResult LRQ = LR->Query(SlotIdx); if (!LRQ.isKill() && !LRQ.valueDefined()) discoverLiveOut(Reg); } } increaseRegPressure(Reg); LiveRegs.insert(Reg); if (LiveUses && !containsReg(*LiveUses, Reg)) LiveUses->push_back(Reg); } } if (TrackUntiedDefs) { for (unsigned i = 0, e = RegOpers.Defs.size(); i < e; ++i) { unsigned Reg = RegOpers.Defs[i]; if (TargetRegisterInfo::isVirtualRegister(Reg) && !LiveRegs.contains(Reg)) UntiedDefs.insert(Reg); } } return true; } /// Advance across the current instruction. bool RegPressureTracker::advance() { assert(!TrackUntiedDefs && "unsupported mode"); // Check for the bottom of the analyzable region. if (CurrPos == MBB->end()) { closeRegion(); return false; } if (!isTopClosed()) closeTop(); SlotIndex SlotIdx; if (RequireIntervals) SlotIdx = getCurrSlot(); // Open the bottom of the region using slot indexes. if (isBottomClosed()) { if (RequireIntervals) static_cast<IntervalPressure&>(P).openBottom(SlotIdx); else static_cast<RegionPressure&>(P).openBottom(CurrPos); } RegisterOperands RegOpers(TRI, MRI); collectOperands(CurrPos, RegOpers); for (unsigned i = 0, e = RegOpers.Uses.size(); i < e; ++i) { unsigned Reg = RegOpers.Uses[i]; // Discover live-ins. bool isLive = LiveRegs.contains(Reg); if (!isLive) discoverLiveIn(Reg); // Kill liveness at last uses. bool lastUse = false; if (RequireIntervals) { const LiveRange *LR = getLiveRange(Reg); lastUse = LR && LR->Query(SlotIdx).isKill(); } else { // Allocatable physregs are always single-use before register rewriting. lastUse = !TargetRegisterInfo::isVirtualRegister(Reg); } if (lastUse && isLive) { LiveRegs.erase(Reg); decreaseRegPressure(Reg); } else if (!lastUse && !isLive) increaseRegPressure(Reg); } // Generate liveness for defs. for (unsigned i = 0, e = RegOpers.Defs.size(); i < e; ++i) { unsigned Reg = RegOpers.Defs[i]; if (LiveRegs.insert(Reg)) increaseRegPressure(Reg); } // Boost pressure for all dead defs together. increaseRegPressure(RegOpers.DeadDefs); decreaseRegPressure(RegOpers.DeadDefs); // Find the next instruction. do ++CurrPos; while (CurrPos != MBB->end() && CurrPos->isDebugValue()); return true; } /// Find the max change in excess pressure across all sets. static void computeExcessPressureDelta(ArrayRef<unsigned> OldPressureVec, ArrayRef<unsigned> NewPressureVec, RegPressureDelta &Delta, const RegisterClassInfo *RCI, ArrayRef<unsigned> LiveThruPressureVec) { Delta.Excess = PressureChange(); for (unsigned i = 0, e = OldPressureVec.size(); i < e; ++i) { unsigned POld = OldPressureVec[i]; unsigned PNew = NewPressureVec[i]; int PDiff = (int)PNew - (int)POld; if (!PDiff) // No change in this set in the common case. continue; // Only consider change beyond the limit. unsigned Limit = RCI->getRegPressureSetLimit(i); if (!LiveThruPressureVec.empty()) Limit += LiveThruPressureVec[i]; if (Limit > POld) { if (Limit > PNew) PDiff = 0; // Under the limit else PDiff = PNew - Limit; // Just exceeded limit. } else if (Limit > PNew) PDiff = Limit - POld; // Just obeyed limit. if (PDiff) { Delta.Excess = PressureChange(i); Delta.Excess.setUnitInc(PDiff); break; } } } /// Find the max change in max pressure that either surpasses a critical PSet /// limit or exceeds the current MaxPressureLimit. /// /// FIXME: comparing each element of the old and new MaxPressure vectors here is /// silly. It's done now to demonstrate the concept but will go away with a /// RegPressureTracker API change to work with pressure differences. static void computeMaxPressureDelta(ArrayRef<unsigned> OldMaxPressureVec, ArrayRef<unsigned> NewMaxPressureVec, ArrayRef<PressureChange> CriticalPSets, ArrayRef<unsigned> MaxPressureLimit, RegPressureDelta &Delta) { Delta.CriticalMax = PressureChange(); Delta.CurrentMax = PressureChange(); unsigned CritIdx = 0, CritEnd = CriticalPSets.size(); for (unsigned i = 0, e = OldMaxPressureVec.size(); i < e; ++i) { unsigned POld = OldMaxPressureVec[i]; unsigned PNew = NewMaxPressureVec[i]; if (PNew == POld) // No change in this set in the common case. continue; if (!Delta.CriticalMax.isValid()) { while (CritIdx != CritEnd && CriticalPSets[CritIdx].getPSet() < i) ++CritIdx; if (CritIdx != CritEnd && CriticalPSets[CritIdx].getPSet() == i) { int PDiff = (int)PNew - (int)CriticalPSets[CritIdx].getUnitInc(); if (PDiff > 0) { Delta.CriticalMax = PressureChange(i); Delta.CriticalMax.setUnitInc(PDiff); } } } // Find the first increase above MaxPressureLimit. // (Ignores negative MDiff). if (!Delta.CurrentMax.isValid() && PNew > MaxPressureLimit[i]) { Delta.CurrentMax = PressureChange(i); Delta.CurrentMax.setUnitInc(PNew - POld); if (CritIdx == CritEnd || Delta.CriticalMax.isValid()) break; } } } /// Record the upward impact of a single instruction on current register /// pressure. Unlike the advance/recede pressure tracking interface, this does /// not discover live in/outs. /// /// This is intended for speculative queries. It leaves pressure inconsistent /// with the current position, so must be restored by the caller. void RegPressureTracker::bumpUpwardPressure(const MachineInstr *MI) { assert(!MI->isDebugValue() && "Expect a nondebug instruction."); // Account for register pressure similar to RegPressureTracker::recede(). RegisterOperands RegOpers(TRI, MRI, /*IgnoreDead=*/true); collectOperands(MI, RegOpers); // Boost max pressure for all dead defs together. // Since CurrSetPressure and MaxSetPressure increaseRegPressure(RegOpers.DeadDefs); decreaseRegPressure(RegOpers.DeadDefs); // Kill liveness at live defs. for (unsigned i = 0, e = RegOpers.Defs.size(); i < e; ++i) { unsigned Reg = RegOpers.Defs[i]; bool DeadDef = false; if (RequireIntervals) { const LiveRange *LR = getLiveRange(Reg); if (LR) { SlotIndex SlotIdx = LIS->getInstructionIndex(MI); LiveQueryResult LRQ = LR->Query(SlotIdx); DeadDef = LRQ.isDeadDef(); } } if (!DeadDef) { if (!containsReg(RegOpers.Uses, Reg)) decreaseRegPressure(Reg); } } // Generate liveness for uses. for (unsigned i = 0, e = RegOpers.Uses.size(); i < e; ++i) { unsigned Reg = RegOpers.Uses[i]; if (!LiveRegs.contains(Reg)) increaseRegPressure(Reg); } } /// Consider the pressure increase caused by traversing this instruction /// bottom-up. Find the pressure set with the most change beyond its pressure /// limit based on the tracker's current pressure, and return the change in /// number of register units of that pressure set introduced by this /// instruction. /// /// This assumes that the current LiveOut set is sufficient. /// /// This is expensive for an on-the-fly query because it calls /// bumpUpwardPressure to recompute the pressure sets based on current /// liveness. This mainly exists to verify correctness, e.g. with /// -verify-misched. getUpwardPressureDelta is the fast version of this query /// that uses the per-SUnit cache of the PressureDiff. void RegPressureTracker:: getMaxUpwardPressureDelta(const MachineInstr *MI, PressureDiff *PDiff, RegPressureDelta &Delta, ArrayRef<PressureChange> CriticalPSets, ArrayRef<unsigned> MaxPressureLimit) { // Snapshot Pressure. // FIXME: The snapshot heap space should persist. But I'm planning to // summarize the pressure effect so we don't need to snapshot at all. std::vector<unsigned> SavedPressure = CurrSetPressure; std::vector<unsigned> SavedMaxPressure = P.MaxSetPressure; bumpUpwardPressure(MI); computeExcessPressureDelta(SavedPressure, CurrSetPressure, Delta, RCI, LiveThruPressure); computeMaxPressureDelta(SavedMaxPressure, P.MaxSetPressure, CriticalPSets, MaxPressureLimit, Delta); assert(Delta.CriticalMax.getUnitInc() >= 0 && Delta.CurrentMax.getUnitInc() >= 0 && "cannot decrease max pressure"); // Restore the tracker's state. P.MaxSetPressure.swap(SavedMaxPressure); CurrSetPressure.swap(SavedPressure); #ifndef NDEBUG if (!PDiff) return; // Check if the alternate algorithm yields the same result. RegPressureDelta Delta2; getUpwardPressureDelta(MI, *PDiff, Delta2, CriticalPSets, MaxPressureLimit); if (Delta != Delta2) { dbgs() << "PDiff: "; PDiff->dump(*TRI); dbgs() << "DELTA: " << *MI; if (Delta.Excess.isValid()) dbgs() << "Excess1 " << TRI->getRegPressureSetName(Delta.Excess.getPSet()) << " " << Delta.Excess.getUnitInc() << "\n"; if (Delta.CriticalMax.isValid()) dbgs() << "Critic1 " << TRI->getRegPressureSetName(Delta.CriticalMax.getPSet()) << " " << Delta.CriticalMax.getUnitInc() << "\n"; if (Delta.CurrentMax.isValid()) dbgs() << "CurrMx1 " << TRI->getRegPressureSetName(Delta.CurrentMax.getPSet()) << " " << Delta.CurrentMax.getUnitInc() << "\n"; if (Delta2.Excess.isValid()) dbgs() << "Excess2 " << TRI->getRegPressureSetName(Delta2.Excess.getPSet()) << " " << Delta2.Excess.getUnitInc() << "\n"; if (Delta2.CriticalMax.isValid()) dbgs() << "Critic2 " << TRI->getRegPressureSetName(Delta2.CriticalMax.getPSet()) << " " << Delta2.CriticalMax.getUnitInc() << "\n"; if (Delta2.CurrentMax.isValid()) dbgs() << "CurrMx2 " << TRI->getRegPressureSetName(Delta2.CurrentMax.getPSet()) << " " << Delta2.CurrentMax.getUnitInc() << "\n"; llvm_unreachable("RegP Delta Mismatch"); } #endif } /// This is the fast version of querying register pressure that does not /// directly depend on current liveness. /// /// @param Delta captures information needed for heuristics. /// /// @param CriticalPSets Are the pressure sets that are known to exceed some /// limit within the region, not necessarily at the current position. /// /// @param MaxPressureLimit Is the max pressure within the region, not /// necessarily at the current position. void RegPressureTracker:: getUpwardPressureDelta(const MachineInstr *MI, /*const*/ PressureDiff &PDiff, RegPressureDelta &Delta, ArrayRef<PressureChange> CriticalPSets, ArrayRef<unsigned> MaxPressureLimit) const { unsigned CritIdx = 0, CritEnd = CriticalPSets.size(); for (PressureDiff::const_iterator PDiffI = PDiff.begin(), PDiffE = PDiff.end(); PDiffI != PDiffE && PDiffI->isValid(); ++PDiffI) { unsigned PSetID = PDiffI->getPSet(); unsigned Limit = RCI->getRegPressureSetLimit(PSetID); if (!LiveThruPressure.empty()) Limit += LiveThruPressure[PSetID]; unsigned POld = CurrSetPressure[PSetID]; unsigned MOld = P.MaxSetPressure[PSetID]; unsigned MNew = MOld; // Ignore DeadDefs here because they aren't captured by PressureChange. unsigned PNew = POld + PDiffI->getUnitInc(); assert((PDiffI->getUnitInc() >= 0) == (PNew >= POld) && "PSet overflow"); if (PNew > MOld) MNew = PNew; // Check if current pressure has exceeded the limit. if (!Delta.Excess.isValid()) { unsigned ExcessInc = 0; if (PNew > Limit) ExcessInc = POld > Limit ? PNew - POld : PNew - Limit; else if (POld > Limit) ExcessInc = Limit - POld; if (ExcessInc) { Delta.Excess = PressureChange(PSetID); Delta.Excess.setUnitInc(ExcessInc); } } // Check if max pressure has exceeded a critical pressure set max. if (MNew == MOld) continue; if (!Delta.CriticalMax.isValid()) { while (CritIdx != CritEnd && CriticalPSets[CritIdx].getPSet() < PSetID) ++CritIdx; if (CritIdx != CritEnd && CriticalPSets[CritIdx].getPSet() == PSetID) { int CritInc = (int)MNew - (int)CriticalPSets[CritIdx].getUnitInc(); if (CritInc > 0 && CritInc <= INT16_MAX) { Delta.CriticalMax = PressureChange(PSetID); Delta.CriticalMax.setUnitInc(CritInc); } } } // Check if max pressure has exceeded the current max. if (!Delta.CurrentMax.isValid() && MNew > MaxPressureLimit[PSetID]) { Delta.CurrentMax = PressureChange(PSetID); Delta.CurrentMax.setUnitInc(MNew - MOld); } } } /// Helper to find a vreg use between two indices [PriorUseIdx, NextUseIdx). static bool findUseBetween(unsigned Reg, SlotIndex PriorUseIdx, SlotIndex NextUseIdx, const MachineRegisterInfo *MRI, const LiveIntervals *LIS) { for (MachineRegisterInfo::use_instr_nodbg_iterator UI = MRI->use_instr_nodbg_begin(Reg), UE = MRI->use_instr_nodbg_end(); UI != UE; ++UI) { const MachineInstr* MI = &*UI; if (MI->isDebugValue()) continue; SlotIndex InstSlot = LIS->getInstructionIndex(MI).getRegSlot(); if (InstSlot >= PriorUseIdx && InstSlot < NextUseIdx) return true; } return false; } /// Record the downward impact of a single instruction on current register /// pressure. Unlike the advance/recede pressure tracking interface, this does /// not discover live in/outs. /// /// This is intended for speculative queries. It leaves pressure inconsistent /// with the current position, so must be restored by the caller. void RegPressureTracker::bumpDownwardPressure(const MachineInstr *MI) { assert(!MI->isDebugValue() && "Expect a nondebug instruction."); // Account for register pressure similar to RegPressureTracker::recede(). RegisterOperands RegOpers(TRI, MRI); collectOperands(MI, RegOpers); // Kill liveness at last uses. Assume allocatable physregs are single-use // rather than checking LiveIntervals. SlotIndex SlotIdx; if (RequireIntervals) SlotIdx = LIS->getInstructionIndex(MI).getRegSlot(); for (unsigned i = 0, e = RegOpers.Uses.size(); i < e; ++i) { unsigned Reg = RegOpers.Uses[i]; if (RequireIntervals) { // FIXME: allow the caller to pass in the list of vreg uses that remain // to be bottom-scheduled to avoid searching uses at each query. SlotIndex CurrIdx = getCurrSlot(); const LiveRange *LR = getLiveRange(Reg); if (LR) { LiveQueryResult LRQ = LR->Query(SlotIdx); if (LRQ.isKill() && !findUseBetween(Reg, CurrIdx, SlotIdx, MRI, LIS)) { decreaseRegPressure(Reg); } } } else if (!TargetRegisterInfo::isVirtualRegister(Reg)) { // Allocatable physregs are always single-use before register rewriting. decreaseRegPressure(Reg); } } // Generate liveness for defs. increaseRegPressure(RegOpers.Defs); // Boost pressure for all dead defs together. increaseRegPressure(RegOpers.DeadDefs); decreaseRegPressure(RegOpers.DeadDefs); } /// Consider the pressure increase caused by traversing this instruction /// top-down. Find the register class with the most change in its pressure limit /// based on the tracker's current pressure, and return the number of excess /// register units of that pressure set introduced by this instruction. /// /// This assumes that the current LiveIn set is sufficient. /// /// This is expensive for an on-the-fly query because it calls /// bumpDownwardPressure to recompute the pressure sets based on current /// liveness. We don't yet have a fast version of downward pressure tracking /// analagous to getUpwardPressureDelta. void RegPressureTracker:: getMaxDownwardPressureDelta(const MachineInstr *MI, RegPressureDelta &Delta, ArrayRef<PressureChange> CriticalPSets, ArrayRef<unsigned> MaxPressureLimit) { // Snapshot Pressure. std::vector<unsigned> SavedPressure = CurrSetPressure; std::vector<unsigned> SavedMaxPressure = P.MaxSetPressure; bumpDownwardPressure(MI); computeExcessPressureDelta(SavedPressure, CurrSetPressure, Delta, RCI, LiveThruPressure); computeMaxPressureDelta(SavedMaxPressure, P.MaxSetPressure, CriticalPSets, MaxPressureLimit, Delta); assert(Delta.CriticalMax.getUnitInc() >= 0 && Delta.CurrentMax.getUnitInc() >= 0 && "cannot decrease max pressure"); // Restore the tracker's state. P.MaxSetPressure.swap(SavedMaxPressure); CurrSetPressure.swap(SavedPressure); } /// Get the pressure of each PSet after traversing this instruction bottom-up. void RegPressureTracker:: getUpwardPressure(const MachineInstr *MI, std::vector<unsigned> &PressureResult, std::vector<unsigned> &MaxPressureResult) { // Snapshot pressure. PressureResult = CurrSetPressure; MaxPressureResult = P.MaxSetPressure; bumpUpwardPressure(MI); // Current pressure becomes the result. Restore current pressure. P.MaxSetPressure.swap(MaxPressureResult); CurrSetPressure.swap(PressureResult); } /// Get the pressure of each PSet after traversing this instruction top-down. void RegPressureTracker:: getDownwardPressure(const MachineInstr *MI, std::vector<unsigned> &PressureResult, std::vector<unsigned> &MaxPressureResult) { // Snapshot pressure. PressureResult = CurrSetPressure; MaxPressureResult = P.MaxSetPressure; bumpDownwardPressure(MI); // Current pressure becomes the result. Restore current pressure. P.MaxSetPressure.swap(MaxPressureResult); CurrSetPressure.swap(PressureResult); }
13,962
362
// Copyright (c) 2016, Baidu.com, Inc. All Rights Reserved // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "tmpfs_volum.h" #include "protocol/galaxy.pb.h" #include "mounter.h" #include "util/error_code.h" #include "collector/collector_engine.h" #include "glog/logging.h" #include <boost/filesystem/path.hpp> #include <boost/filesystem/operations.hpp> #include <assert.h> #include <sys/mount.h> #include <sstream> namespace baidu { namespace galaxy { namespace volum { TmpfsVolum::TmpfsVolum() { } TmpfsVolum::~TmpfsVolum() {} baidu::galaxy::util::ErrorCode TmpfsVolum::Construct() { baidu::galaxy::util::ErrorCode err = Construct_(); if (err.Code() == 0) { vc_.reset(new VolumCollector(this->TargetPath())); vc_->Enable(true); baidu::galaxy::collector::CollectorEngine::GetInstance()->Register(vc_); } return err; } baidu::galaxy::util::ErrorCode TmpfsVolum::Construct_() { const boost::shared_ptr<baidu::galaxy::proto::VolumRequired> vr = Description(); assert(baidu::galaxy::proto::kTmpfs == vr->medium()); // create target dir boost::system::error_code ec; boost::filesystem::path target_path(this->TargetPath()); std::map<std::string, boost::shared_ptr<Mounter> > mounters; ListMounters(mounters); std::map<std::string, boost::shared_ptr<Mounter> >::iterator iter = mounters.find(target_path.string()); if (iter != mounters.end() && iter->second->source == "tmpfs") { return ERRORCODE_OK; } if (iter != mounters.end()) { return ERRORCODE(-1, "mount another path"); } if (!boost::filesystem::exists(target_path, ec) && !baidu::galaxy::file::create_directories(target_path, ec)) { return ERRORCODE(-1, "failed in creating dirs(%s): %s", target_path.string().c_str(), ec.message().c_str()); } baidu::galaxy::util::ErrorCode errc = MountTmpfs(target_path.string(), vr->size(), vr->readonly()); if (0 != errc.Code()) { return ERRORCODE(-1, "mount failed: %s", errc.Message().c_str()); } return ERRORCODE_OK; } baidu::galaxy::util::ErrorCode TmpfsVolum::Destroy() { // do nothing if (vc_.get() != NULL) { vc_->Enable(false); } return Umount(this->TargetPath()); } int64_t TmpfsVolum::Used() { int64_t ret = 0L; if (NULL != vc_.get()) { ret = vc_->Size(); } return ret; } std::string TmpfsVolum::ToString() { return ""; } } } }
1,071
1,080
#pragma once /* name type description */ #define PROCESS_SMAP_METRIC(ACTION) \ ACTION( smap_create, METRIC_COUNTER, "# smap create requests" )\ ACTION( smap_create_exist, METRIC_COUNTER, "# smap already exist" )\ ACTION( smap_create_ok, METRIC_COUNTER, "# smap stored" )\ ACTION( smap_create_ex, METRIC_COUNTER, "# smap create exceptions" )\ ACTION( smap_delete, METRIC_COUNTER, "# smap delete requests" )\ ACTION( smap_delete_ok, METRIC_COUNTER, "# smap delete success" )\ ACTION( smap_delete_notfound, METRIC_COUNTER, "# smap delete miss" )\ ACTION( smap_delete_ex, METRIC_COUNTER, "# smap delete exceptions" )\ ACTION( smap_len, METRIC_COUNTER, "# smap length requests" )\ ACTION( smap_len_ok, METRIC_COUNTER, "# smap length success" )\ ACTION( smap_len_notfound, METRIC_COUNTER, "# smap length miss" )\ ACTION( smap_len_ex, METRIC_COUNTER, "# smap len exceptions" )\ ACTION( smap_find, METRIC_COUNTER, "# smap find requests" )\ ACTION( smap_find_ok, METRIC_COUNTER, "# smap find success" )\ ACTION( smap_find_notfound, METRIC_COUNTER, "# smap find miss" )\ ACTION( smap_find_notamember, METRIC_COUNTER, "# smap find not present" )\ ACTION( smap_find_ex, METRIC_COUNTER, "# smap find exceptions" )\ ACTION( smap_get, METRIC_COUNTER, "# smap get requests" )\ ACTION( smap_get_ok, METRIC_COUNTER, "# smap get success" )\ ACTION( smap_get_notfound, METRIC_COUNTER, "# smap get miss" )\ ACTION( smap_get_oob, METRIC_COUNTER, "# smap get out of bound" )\ ACTION( smap_get_ex, METRIC_COUNTER, "# smap get exceptions" )\ ACTION( smap_insert, METRIC_COUNTER, "# smap insert requests" )\ ACTION( smap_insert_ok, METRIC_COUNTER, "# smap insert success" )\ ACTION( smap_insert_notfound, METRIC_COUNTER, "# smap insert miss" )\ ACTION( smap_insert_noop, METRIC_COUNTER, "# smap insert no action" )\ ACTION( smap_insert_trim, METRIC_COUNTER, "# smap insert lead to trim" )\ ACTION( smap_insert_ex, METRIC_COUNTER, "# smap insert exceptions" )\ ACTION( smap_remove, METRIC_COUNTER, "# smap remove requests" )\ ACTION( smap_remove_ok, METRIC_COUNTER, "# smap remove success" )\ ACTION( smap_remove_notfound, METRIC_COUNTER, "# smap remove miss" )\ ACTION( smap_remove_noop, METRIC_COUNTER, "# smap remove no-op" )\ ACTION( smap_remove_ex, METRIC_COUNTER, "# smap remove exceptions" )\ ACTION( smap_truncate, METRIC_COUNTER, "# smap truncate requests" )\ ACTION( smap_truncate_ok, METRIC_COUNTER, "# smap truncate success" )\ ACTION( smap_truncate_notfound, METRIC_COUNTER, "# smap truncate miss" )\ ACTION( smap_truncate_ex, METRIC_COUNTER, "# smap truncate exceptions" ) struct request; struct response; struct command; /* cmd_* functions must be command_fn (process.c) compatible */ void cmd_smap_create(struct response *rsp, const struct request *req, const struct command *cmd); void cmd_smap_delete(struct response *rsp, const struct request *req, const struct command *cmd); void cmd_smap_truncate(struct response *rsp, const struct request *req, const struct command *cmd); void cmd_smap_len(struct response *rsp, const struct request *req, const struct command *cmd); void cmd_smap_find(struct response *rsp, const struct request *req, const struct command *cmd); void cmd_smap_get(struct response *rsp, const struct request *req, const struct command *cmd); void cmd_smap_insert(struct response *rsp, const struct request *req, const struct command *cmd); void cmd_smap_remove(struct response *rsp, const struct request *req, const struct command *cmd);
1,968
36,552
// Copyright 2021 gRPC authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #ifndef GRPC_CORE_LIB_RESOURCE_QUOTA_RESOURCE_QUOTA_H #define GRPC_CORE_LIB_RESOURCE_QUOTA_RESOURCE_QUOTA_H #include <grpc/support/port_platform.h> #include "src/core/lib/resource_quota/memory_quota.h" #include "src/core/lib/resource_quota/thread_quota.h" namespace grpc_core { class ResourceQuota : public RefCounted<ResourceQuota> { public: ResourceQuota(); ~ResourceQuota() override; ResourceQuota(const ResourceQuota&) = delete; ResourceQuota& operator=(const ResourceQuota&) = delete; std::shared_ptr<MemoryQuota> memory_quota() { return memory_quota_; } const RefCountedPtr<ThreadQuota>& thread_quota() { return thread_quota_; } private: std::shared_ptr<MemoryQuota> memory_quota_; RefCountedPtr<ThreadQuota> thread_quota_; }; } // namespace grpc_core #endif // GRPC_CORE_LIB_RESOURCE_QUOTA_RESOURCE_QUOTA_H
469
864
/********************************************************************************************************************** This file is part of the Control Toolbox (https://github.com/ethz-adrl/control-toolbox), copyright by ETH Zurich. Licensed under the BSD-2 license (see LICENSE file in main directory) **********************************************************************************************************************/ #pragma once #include "JointAcceleration.h" #include "RBDState.h" #include "RigidBodyAcceleration.h" namespace ct { namespace rbd { /** * @class RBDAcceleration * * \ingroup State * * @brief joint acceleration and base acceleration */ template <size_t NJOINTS, typename SCALAR = double> class RBDAcceleration { public: EIGEN_MAKE_ALIGNED_OPERATOR_NEW enum DIMS { NDOF = NJOINTS + 6, NSTATE = 2 * NDOF ///< position/velocity of (joints + base) }; typedef Eigen::Matrix<SCALAR, NDOF, 1> coordinate_vector_t; typedef tpl::RigidBodyAcceleration<SCALAR> RigidBodyAcceleration_t; RBDAcceleration() { setZero(); } RBDAcceleration(const RigidBodyAcceleration_t& baseStateDerivative, const JointAcceleration<NJOINTS, SCALAR>& jointStateDerivative) : baseStateDerivative_(baseStateDerivative), jointStateDerivative_(jointStateDerivative) { } /// @brief get base acceleration RigidBodyAcceleration_t& base() { return baseStateDerivative_; } /// @brief get constant base acceleration const RigidBodyAcceleration_t& base() const { return baseStateDerivative_; } /// @brief get joint acceleration JointAcceleration<NJOINTS, SCALAR>& joints() { return jointStateDerivative_; } /// @brief get constant joint acceleration const JointAcceleration<NJOINTS, SCALAR>& joints() const { return jointStateDerivative_; } typename RBDState<NJOINTS, SCALAR>::state_vector_quat_t toStateUpdateVectorQuaternion( const RBDState<NJOINTS, SCALAR>& state) const { typename RBDState<NJOINTS, SCALAR>::state_vector_quat_t stateDerivative; kindr::RotationQuaternionDiff<SCALAR> rotationQuaternionDiff( state.basePose().getRotationQuaternion(), state.baseLocalAngularVelocity()); stateDerivative << rotationQuaternionDiff.w(), rotationQuaternionDiff.x(), rotationQuaternionDiff.y(), rotationQuaternionDiff.z(), state.base().computeTranslationalVelocityW().toImplementation(), state.joints().getVelocities(), base().getAngularAcceleration().toImplementation(), base().getTranslationalAcceleration().toImplementation(), joints().getAcceleration(); return stateDerivative; } typename RBDState<NJOINTS, SCALAR>::state_vector_euler_t toStateUpdateVectorEulerXyz( const RBDState<NJOINTS, SCALAR>& state) const { typename RBDState<NJOINTS, SCALAR>::state_vector_euler_t stateDerivative; kindr::EulerAnglesXyzDiff<SCALAR> eulerAnglesXyzDiff( state.basePose().getEulerAnglesXyz(), state.baseLocalAngularVelocity()); stateDerivative << eulerAnglesXyzDiff.toImplementation(), state.base().computeTranslationalVelocityW().toImplementation(), state.joints().getVelocities(), base().getAngularAcceleration().toImplementation(), base().getTranslationalAcceleration().toImplementation(), joints().getAcceleration(); return stateDerivative; } coordinate_vector_t toCoordinateAcceleration() const { coordinate_vector_t ddq; ddq << base().getAngularAcceleration().toImplementation(), base().getTranslationalAcceleration().toImplementation(), joints().getAcceleration(); return ddq; } void setZero() { baseStateDerivative_.setZero(); jointStateDerivative_.setZero(); } static RBDAcceleration<NJOINTS, SCALAR> Zero() { return RBDAcceleration<NJOINTS, SCALAR>( RigidBodyAcceleration_t::Zero(), JointAcceleration<NJOINTS, SCALAR>::Zero()); } protected: RigidBodyAcceleration_t baseStateDerivative_; JointAcceleration<NJOINTS, SCALAR> jointStateDerivative_; }; } // namespace rbd } // namespace ct
1,578
1,191
<filename>android/modules/android/src/java/ti/modules/titanium/android/EnvironmentModule.java<gh_stars>1000+ package ti.modules.titanium.android; import org.appcelerator.kroll.KrollModule; import org.appcelerator.kroll.annotations.Kroll; import android.os.Environment; @Kroll.module(parentModule = AndroidModule.class) public class EnvironmentModule extends KrollModule { @Kroll.constant public static final String MEDIA_BAD_REMOVAL = Environment.MEDIA_BAD_REMOVAL; @Kroll.constant public static final String MEDIA_CHECKING = Environment.MEDIA_CHECKING; @Kroll.constant public static final String MEDIA_MOUNTED = Environment.MEDIA_MOUNTED; @Kroll.constant public static final String MEDIA_MOUNTED_READ_ONLY = Environment.MEDIA_MOUNTED_READ_ONLY; @Kroll.constant public static final String MEDIA_NOFS = Environment.MEDIA_NOFS; @Kroll.constant public static final String MEDIA_REMOVED = Environment.MEDIA_REMOVED; @Kroll.constant public static final String MEDIA_SHARED = Environment.MEDIA_SHARED; @Kroll.constant public static final String MEDIA_UNMOUNTABLE = Environment.MEDIA_UNMOUNTABLE; @Kroll.constant public static final String MEDIA_UNMOUNTED = Environment.MEDIA_UNMOUNTED; @Kroll.getProperty public String getDataDirectory() { return Environment.getDataDirectory().getAbsolutePath(); } @Kroll.getProperty public String getDownloadCacheDirectory() { return Environment.getDownloadCacheDirectory().getAbsolutePath(); } @Kroll.getProperty public String getExternalStorageDirectory() { return Environment.getExternalStorageDirectory().getAbsolutePath(); } @Kroll.getProperty public String getExternalStorageState() { return Environment.getExternalStorageState(); } @Kroll.getProperty public String getRootDirectory() { return Environment.getRootDirectory().getAbsolutePath(); } }
590
310
{ "name": "Metal", "description": "An API for working with 3D graphics.", "url": "https://en.wikipedia.org/wiki/Metal_%28API%29" }
51
1,444
package mage.abilities.effects.common.continuous; import mage.abilities.Ability; import mage.abilities.effects.ContinuousEffectImpl; import mage.abilities.effects.common.ChooseCreatureTypeEffect; import mage.constants.*; import mage.game.Game; import mage.game.permanent.Permanent; public class AddChosenSubtypeEffect extends ContinuousEffectImpl { public AddChosenSubtypeEffect() { super(Duration.WhileOnBattlefield, Layer.TypeChangingEffects_4, SubLayer.NA, Outcome.Benefit); staticText = "{this} is the chosen type in addition to its other types"; } public AddChosenSubtypeEffect(final AddChosenSubtypeEffect effect) { super(effect); } @Override public boolean apply(Game game, Ability source) { Permanent permanent = game.getPermanent(source.getSourceId()); if (permanent != null) { SubType subType = ChooseCreatureTypeEffect.getChosenCreatureType(permanent.getId(), game); if (subType != null) { permanent.addSubType(game, subType); } } return true; } @Override public AddChosenSubtypeEffect copy() { return new AddChosenSubtypeEffect(this); } }
441
399
<reponame>Mu-L/Addax { "name": "rediswriter", "class": "com.wgzhao.addax.plugin.writer.rediswriter.RedisWriter", "description": "Use jedis connect to redis, execute restore command", "developer": "alibaba" }
81
22,481
"""Support for XBee Zigbee lights.""" import voluptuous as vol from homeassistant.components.light import LightEntity from . import PLATFORM_SCHEMA, XBeeDigitalOut, XBeeDigitalOutConfig from .const import CONF_ON_STATE, DEFAULT_ON_STATE, DOMAIN, STATES PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend( {vol.Optional(CONF_ON_STATE, default=DEFAULT_ON_STATE): vol.In(STATES)} ) def setup_platform(hass, config, add_entities, discovery_info=None): """Create and add an entity based on the configuration.""" zigbee_device = hass.data[DOMAIN] add_entities([XBeeLight(XBeeDigitalOutConfig(config), zigbee_device)]) class XBeeLight(XBeeDigitalOut, LightEntity): """Use XBeeDigitalOut as light."""
247
327
package com.honvay.cloud.framework.criteria.parser; import com.baomidou.mybatisplus.mapper.EntityWrapper; import com.honvay.cloud.framework.criteria.CriteriaContext; import com.honvay.cloud.framework.criteria.annotation.In; import com.honvay.cola.cloud.framework.util.ConvertUtils; import com.honvay.cola.cloud.framework.util.ObjectUtils; import org.apache.commons.lang3.ArrayUtils; import org.apache.commons.lang3.StringUtils; import java.util.Collection; import java.util.Date; /** * @author LIQIU * @date 2018-4-19 **/ public class InParser extends AbstractParser<In> { private void doParse(CriteriaContext<In> criteriaContext, EntityWrapper entityWrapper, String[] columns, Object[] values) { boolean reverse = criteriaContext.getType().reverse(); for (String column : columns) { if (reverse) { entityWrapper.notIn(column, values); } else { entityWrapper.in(column, values); } } } private void doParse(CriteriaContext<In> criteriaContext, EntityWrapper entityWrapper, String[] columns, Collection<?> values) { boolean reverse = criteriaContext.getType().reverse(); for (String column : columns) { if (reverse) { entityWrapper.notIn(column, values); } else { entityWrapper.in(column, values); } } } /** * 获取值 * * @param criteriaContext * @return */ public Object getValue(CriteriaContext<In> criteriaContext) { Object value = criteriaContext.getValue(); String defaultValue = criteriaContext.getType().defaultValue(); if (ObjectUtils.isNull(value) && StringUtils.isNotEmpty(defaultValue)) { value = defaultValue; } return value; } @Override public void parse(CriteriaContext<In> criteriaContext, EntityWrapper entityWrapper) { Object value = this.getValue(criteriaContext); In in = criteriaContext.getType(); if (criteriaContext.getValue() == null && !this.allowEmpty(criteriaContext.getType())) { throw new IllegalArgumentException("The condition is not allowed to be empty but the value is empty"); } String[] columns = in.columns(); if (ArrayUtils.isEmpty(columns)) { columns = new String[]{this.getDefaultColumn(criteriaContext)}; } //判断是否为数组 if (value.getClass().isArray()) { this.doParse(criteriaContext, entityWrapper, columns, (Object[]) value); } else if (value instanceof Collection) { //是否为集合 this.doParse(criteriaContext, entityWrapper, columns, (Collection) value); } else { //将字符串进行转换 String[] stringValues = String.valueOf(value).split(in.delimiter()); if (in.dataType().equals(String.class)) { this.doParse(criteriaContext, entityWrapper, columns, stringValues); } else { Object[] values = (Object[]) ConvertUtils.convert(stringValues, in.dataType()); this.doParse(criteriaContext, entityWrapper, columns, values); } } } @Override public String getGroup(In in) { return in.group(); } @Override public boolean allowEmpty(In in) { return in.allowEmpty() || StringUtils.isNotEmpty(in.defaultValue()); } @Override public boolean isEnable(In in) { return in.enable(); } public static void main(String[] args) { String dates = "2017-12-10,2018-19-10,2020-10-07"; Object[] values = (Object[]) ConvertUtils.convert(dates, Date.class); System.out.println(values); } }
1,594
665
<reponame>kstepanmpmg/mldb // This file is part of MLDB. Copyright 2015 mldb.ai inc. All rights reserved. // // centroid_feature_generator_test.cc // <NAME> - 19 Jun 2013 // Copyright (c) 2013 mldb.ai inc. All rights reserved. // #define BOOST_TEST_MAIN #define BOOST_TEST_DYN_LINK #include <boost/test/unit_test.hpp> #include "mldb/vfs/filter_streams.h" #include "mldb/plugins/jml/kmeans.h" #include "mldb/utils/testing/fixtures.h" #include <iostream> #include <stdlib.h> using namespace MLDB; using namespace ML; using namespace std; MLDB_FIXTURE( kmeans_test ); BOOST_FIXTURE_TEST_CASE( test_kmeans, kmeans_test ) // BOOST_AUTO_TEST_CASE( test_kmeans) { // Lets' add three "kind of" clusters vector<distribution<float>> centroids; distribution<float> c1(2); c1[0] = 0.; c1[1] = 5.; distribution<float> c2(2); c2[0] = -20.; c2[1] = 0.; distribution<float> c3(2); c3[0] = 10.; c3[1] = -20.; distribution<float> c4(2); c4[0] = -20.; c4[1] = -20.; centroids = {c1,c2,c3,c4}; vector<distribution<float>> data; int nbPerClass = 10; for (int k=0; k < centroids.size(); ++k) for (int i=0; i < nbPerClass; i++) { distribution<float> point = centroids[k]; distribution<float> noise(2); noise[0] = ((rand() % 100) - 50) / 50.; noise[1] = ((rand() % 100) - 50) / 50.; data.push_back(point + noise); } // add trivial points // it causes problems for cosine distance distribution<float> zero(2); zero[0] = 0.; zero[1] = 0.; for (int i=0; i < nbPerClass; ++i) data.push_back(zero); KMeans kmeans; vector<int> in_cluster; // kmeans.train(data, in_cluster, centroids.size()); auto test = [&] () { for (int i=0; i < centroids.size(); ++i) { int cluster = in_cluster[nbPerClass * i]; for (int j=0; j < nbPerClass; ++j) { BOOST_CHECK(in_cluster[i*nbPerClass + j] == cluster); cerr << in_cluster[i*nbPerClass + j] << " "; } cerr << endl; } for (int i=0; i < centroids.size()-1; ++i) BOOST_CHECK(in_cluster[i*nbPerClass] != in_cluster[(i+1)*nbPerClass]); }; // test(); KMeans kmeans2(new KMeansCosineMetric()); kmeans2.train(data, in_cluster, centroids.size()+1, 100); test(); // FIXME finish the test kmeans2.save("test_kmeans.bin.gz"); KMeans kmeans3(new KMeansCosineMetric()); kmeans3.load("test_kmeans.bin.gz"); for (int i=0; i<data.size(); ++i) in_cluster[i] = kmeans3.assign(data[i]); test(); }
1,360
815
// -*- c++ -*- /*========================================================================= Program: Visualization Toolkit Module: vtkMomentGlyphs.h Copyright (c) <NAME>, <NAME>, <NAME> All rights reserved. See Copyright.txt or http://www.kitware.com/Copyright.htm for details. This software is distributed WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the above copyright notice for more information. =========================================================================*/ /*---------------------------------------------------------------------------- Copyright (c) Sandia Corporation See Copyright.txt or http://www.paraview.org/HTML/Copyright.html for details. ----------------------------------------------------------------------------*/ // .NAME vtkMomentGlyphs - Create arrow glyphs representing flux or circulation. // // .SECTION Description // // Circulation is a vector field on 1D cells that represents flow along the path // of the cell. Flux is a vector field on 2D cells that represents flow through // the area of the cell. This filter creates arrow glyphs in the direction of // the flow. // #ifndef vtkMomentGlyphs_h #define vtkMomentGlyphs_h #include "vtkMomentFiltersModule.h" // for export macro #include "vtkPolyDataAlgorithm.h" #include "vtkSmartPointer.h" // For internal methods. class VTKMOMENTFILTERS_EXPORT vtkMomentGlyphs : public vtkPolyDataAlgorithm { public: vtkTypeMacro(vtkMomentGlyphs, vtkPolyDataAlgorithm); static vtkMomentGlyphs* New(); void PrintSelf(ostream& os, vtkIndent indent) override; // Description: // These are basically a convenience method that calls SetInputArrayToProcess // to set the array used as the input flux or circulation. The // fieldAttributeType comes from the vtkDataSetAttributes::AttributeTypes // enum. virtual void SetInputMoment(const char* name); virtual void SetInputMoment(int fieldAttributeType); // Description: // If off (the default), then the input array is taken to be the total flux // through or circulation along each element. If on, then the input array is // taken to be the density of the flux or circulation. vtkGetMacro(InputMomentIsDensity, int); vtkSetMacro(InputMomentIsDensity, int); vtkBooleanMacro(InputMomentIsDensity, int); // Description: // If off (the default), then the glyphs are scaled by the total flux through // or circulation along each element. If on, then the glyphs are scaled by // the flux or circulation density. vtkGetMacro(ScaleByDensity, int); vtkSetMacro(ScaleByDensity, int); vtkBooleanMacro(ScaleByDensity, int); protected: vtkMomentGlyphs(); ~vtkMomentGlyphs() override; int InputMomentIsDensity; int ScaleByDensity; int FillInputPortInformation(int port, vtkInformation* info) override; int RequestData(vtkInformation* request, vtkInformationVector** inputVector, vtkInformationVector* outputVector) override; virtual void MakeMomentVectors( vtkSmartPointer<vtkDataSet>& input, vtkSmartPointer<vtkDataArray>& inputArray); virtual vtkSmartPointer<vtkDataArray> MakeGlyphScaleFactors( vtkDataSet* input, vtkDataArray* inputArray); virtual vtkSmartPointer<vtkPolyData> MakeGlyphs(vtkDataSet* input, vtkDataArray* inputArray); private: vtkMomentGlyphs(const vtkMomentGlyphs&) = delete; void operator=(const vtkMomentGlyphs&) = delete; }; #endif // vtkMomentGlyphs_h
1,039
399
<reponame>adoug/3D-Graphics-Rendering-Cookbook #pragma once #include "shared/vkFramework/CompositeRenderer.h" #include "shared/vkFramework/VulkanShaderProcessor.h" #include "shared/vkFramework/Barriers.h" const int SSAOWidth = 0; // smaller SSAO buffer can be used 512 const int SSAOHeight = 0; // 512; struct SSAOProcessor: public CompositeRenderer { SSAOProcessor(VulkanRenderContext&ctx, VulkanTexture colorTex, VulkanTexture depthTex, VulkanTexture outputTex): CompositeRenderer(ctx), rotateTex(ctx.resources.loadTexture2D("data/rot_texture.bmp")), SSAOTex(ctx.resources.addColorTexture(SSAOWidth, SSAOHeight)), SSAOBlurXTex(ctx.resources.addColorTexture(SSAOWidth, SSAOHeight)), SSAOBlurYTex(ctx.resources.addColorTexture(SSAOWidth, SSAOHeight)), SSAOParamBuffer(mappedUniformBufferAttachment(ctx.resources, &params, VK_SHADER_STAGE_FRAGMENT_BIT)), SSAO(ctx, { .buffers = { SSAOParamBuffer }, .textures = { fsTextureAttachment(depthTex), fsTextureAttachment(rotateTex) } }, { SSAOTex }, "data/shaders/chapter08/VK02_SSAO.frag"), BlurX(ctx, { .textures = { fsTextureAttachment(SSAOTex) } }, { SSAOBlurXTex }, "data/shaders/chapter08/VK02_SSAOBlurX.frag"), BlurY(ctx, { .textures = { fsTextureAttachment(SSAOBlurXTex) } }, { SSAOBlurYTex }, "data/shaders/chapter08/VK02_SSAOBlurY.frag"), SSAOFinal(ctx, { .buffers = { SSAOParamBuffer }, .textures = { fsTextureAttachment(colorTex), fsTextureAttachment(SSAOBlurYTex) } }, { outputTex }, "data/shaders/chapter08/VK02_SSAOFinal.frag"), ssaoColorToShader(ctx_, SSAOTex), ssaoShaderToColor(ctx_, SSAOTex), blurXColorToShader(ctx_, SSAOBlurXTex), blurXShaderToColor(ctx_, SSAOBlurXTex), blurYColorToShader(ctx_, SSAOBlurYTex), blurYShaderToColor(ctx_, SSAOBlurYTex), finalColorToShader(ctx_, outputTex), finalShaderToColor(ctx_, outputTex) { setVkImageName(ctx_.vkDev, rotateTex.image.image, "rotateTex"); setVkImageName(ctx_.vkDev, SSAOTex.image.image, "SSAO"); setVkImageName(ctx_.vkDev, SSAOBlurXTex.image.image, "SSAOBlurX"); setVkImageName(ctx_.vkDev, SSAOBlurYTex.image.image, "SSAOBlurY"); renderers_.emplace_back(ssaoShaderToColor, false); renderers_.emplace_back(blurXShaderToColor, false); renderers_.emplace_back(blurYShaderToColor, false); renderers_.emplace_back(finalShaderToColor, false); renderers_.emplace_back(SSAO, false); renderers_.emplace_back(ssaoColorToShader, false); renderers_.emplace_back(BlurX, false); renderers_.emplace_back(blurXColorToShader, false); renderers_.emplace_back(BlurY, false); renderers_.emplace_back(blurYColorToShader, false); renderers_.emplace_back(SSAOFinal, false); renderers_.emplace_back(finalColorToShader, false); } inline VulkanTexture getSSAO() const { return SSAOTex; } inline VulkanTexture getBlurX() const { return SSAOBlurXTex; } inline VulkanTexture getBlurY() const { return SSAOBlurYTex; } struct Params { float scale_ = 1.0f; float bias_ = 0.2f; float zNear = 0.1f; float zFar = 1000.0f; float radius = 0.2f; float attScale = 1.0f; float distScale = 0.5f; } *params; private: VulkanTexture rotateTex; VulkanTexture SSAOTex, SSAOBlurXTex, SSAOBlurYTex; BufferAttachment SSAOParamBuffer; QuadProcessor SSAO, BlurX, BlurY, SSAOFinal; ColorToShaderOptimalBarrier ssaoColorToShader; ShaderOptimalToColorBarrier ssaoShaderToColor; ColorToShaderOptimalBarrier blurXColorToShader; ShaderOptimalToColorBarrier blurXShaderToColor; ColorToShaderOptimalBarrier blurYColorToShader; ShaderOptimalToColorBarrier blurYShaderToColor; ColorToShaderOptimalBarrier finalColorToShader; ShaderOptimalToColorBarrier finalShaderToColor; };
1,493
497
<reponame>LiuStart/SwipeMenu package com.tubb.smrv.demo; import android.app.Activity; import java.util.ArrayList; import java.util.List; import java.util.Random; /** * Created by tubingbing on 16/4/11. */ public class BaseActivity extends Activity{ Random random = new Random(); protected List<User> getUsers() { List<User> userList = new ArrayList<>(); for (int i=0; i<100; i++){ User user = new User(); user.setUserId(i+1000); user.setUserName("Pobi "+(i+1)); int num = random.nextInt(4); if(num == 0){ user.setPhotoRes(R.drawable.one); }else if(num == 1){ user.setPhotoRes(R.drawable.two); }else if(num == 2){ user.setPhotoRes(R.drawable.three); }else if(num == 3){ user.setPhotoRes(R.drawable.four); } userList.add(user); } return userList; } }
496
602
<reponame>kaxap/PySyncObj import socket from .poller import POLL_EVENT_TYPE from .tcp_connection import TcpConnection, _getAddrType class SERVER_STATE: UNBINDED = 0, BINDED = 1 class TcpServer(object): def __init__(self, poller, host, port, onNewConnection, sendBufferSize = 2 ** 13, recvBufferSize = 2 ** 13, connectionTimeout = 3.5,): self.__poller = poller self.__host = host self.__port = int(port) self.__hostAddrType = _getAddrType(host) self.__sendBufferSize = sendBufferSize self.__recvBufferSize = recvBufferSize self.__socket = None self.__fileno = None self.__state = SERVER_STATE.UNBINDED self.__onNewConnectionCallback = onNewConnection self.__connectionTimeout = connectionTimeout def bind(self): self.__socket = socket.socket(self.__hostAddrType, socket.SOCK_STREAM) self.__socket.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, self.__sendBufferSize) self.__socket.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, self.__recvBufferSize) self.__socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) self.__socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) self.__socket.setblocking(0) self.__socket.bind((self.__host, self.__port)) self.__socket.listen(5) self.__fileno = self.__socket.fileno() self.__poller.subscribe(self.__fileno, self.__onNewConnection, POLL_EVENT_TYPE.READ | POLL_EVENT_TYPE.ERROR) self.__state = SERVER_STATE.BINDED def unbind(self): self.__state = SERVER_STATE.UNBINDED if self.__fileno is not None: self.__poller.unsubscribe(self.__fileno) self.__fileno = None if self.__socket is not None: self.__socket.close() def __onNewConnection(self, descr, event): if event & POLL_EVENT_TYPE.READ: try: sock, addr = self.__socket.accept() sock.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, self.__sendBufferSize) sock.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, self.__recvBufferSize) sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) sock.setblocking(0) conn = TcpConnection(poller=self.__poller, socket=sock, timeout=self.__connectionTimeout, sendBufferSize=self.__sendBufferSize, recvBufferSize=self.__recvBufferSize) self.__onNewConnectionCallback(conn) except socket.error as e: if e.errno not in (socket.errno.EAGAIN, socket.errno.EWOULDBLOCK): self.unbind() return if event & POLL_EVENT_TYPE.ERROR: self.unbind() return
1,571
1,600
<filename>tests/transforms_tests/image_tests/test_pca_lighting.py<gh_stars>1000+ import unittest import numpy as np from chainer import testing from chainercv.transforms import pca_lighting class TestPCALighting(unittest.TestCase): def test_pca_lighting(self): img = np.random.uniform(size=(3, 48, 32)) out = pca_lighting(img, 0.1) self.assertEqual(img.shape, out.shape) self.assertEqual(img.dtype, out.dtype) out = pca_lighting(img, 0) self.assertEqual(img.shape, out.shape) self.assertEqual(img.dtype, out.dtype) np.testing.assert_equal(out, img) testing.run_module(__name__, __file__)
284
438
<filename>models/ensemble.py import torch import torch.nn as nn class Ensemble(nn.Module): def __init__(self, models, name=None): super(Ensemble, self).__init__() if name is not None: self.name = name else: self.name = "%s_ensemble" % models[0].name self.models = nn.ModuleList(models) def forward(self, x): xs = torch.stack([model(x) for model in self.models]) xs = xs - torch.logsumexp(xs, dim=-1, keepdim=True) x = torch.logsumexp(xs, dim=0) return x
256