max_stars_count
int64
301
224k
text
stringlengths
6
1.05M
token_count
int64
3
727k
348
<reponame>chamberone/Leaflet.PixiOverlay<filename>docs/data/leg-t2/077/07703119.json {"nom":"Clos-Fontaine","circ":"3ème circonscription","dpt":"Seine-et-Marne","inscrits":221,"abs":127,"votants":94,"blancs":7,"nuls":0,"exp":87,"res":[{"nuance":"UDI","nom":"<NAME>","voix":45},{"nuance":"FN","nom":"<NAME>","voix":42}]}
136
416
// // NSUserActivity+IntentsAdditions.h // Intents // // Copyright (c) 2016-2017 Apple Inc. All rights reserved. // #import <Foundation/Foundation.h> @class INInteraction; NS_ASSUME_NONNULL_BEGIN @interface NSUserActivity (IntentsAdditions) @property (readonly, nullable, NS_NONATOMIC_IOSONLY) INInteraction *interaction API_AVAILABLE(macosx(10.12), ios(10.0), watchos(3.2)); // A human-understandable string that can be used to suggest a voice shortcut phrase to the user @property (readwrite, copy, nullable, NS_NONATOMIC_IOSONLY) NSString *suggestedInvocationPhrase API_AVAILABLE(ios(12.0), watchos(5.0)) API_UNAVAILABLE(macosx, tvos); @end NS_ASSUME_NONNULL_END
255
2,151
/* * Copyright (C) 2015 The Android Open Source Project * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * http://www.apache.org/licenses/LICENSE-2.0 * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package android.databinding; import android.test.AndroidTestCase; import android.view.View; public class DataBindingMapperTest extends AndroidTestCase { public void testNotDataBindingId() { View view = new View(getContext()); view.setTag("layout/unexpected"); android.databinding.DataBinderMapper mapper = new android.databinding.DataBinderMapper(); ViewDataBinding binding = mapper.getDataBinder(null, view, 1); assertNull(binding); } public void testInvalidView() { View view = new View(getContext()); view.setTag("layout/unexpected"); android.databinding.DataBinderMapper mapper = new android.databinding.DataBinderMapper(); Throwable error = null; try { mapper.getDataBinder(null, view, android.databinding.testapp.R.layout.multi_res_layout); } catch (Throwable t) { error = t; } assertNotNull(error); assertEquals("The tag for multi_res_layout is invalid. Received: layout/unexpected", error.getMessage()); } }
587
381
{ "task_params": { "max_nodes_per_batch": 5000 }, "model_params": { "gnn_num_layers": 4, "gnn_hidden_dim": 256, "gnn_use_target_state_as_input": true, "gnn_normalize_by_num_incoming": false, "gnn_num_edge_MLP_hidden_layers": 0, "gnn_layer_input_dropout_rate": 0.1, "gnn_dense_every_num_layers": 1, "gnn_residual_every_num_layers": 2, "gnn_global_exchange_every_num_layers": 10000, "gnn_use_inter_layer_layernorm": false, "gnn_initial_node_representation_activation": "tanh", "gnn_dense_intermediate_layer_activation": "tanh", "gnn_message_activation_function": "ReLU", "gnn_aggregation_function": "sum" } }
380
892
<filename>advisories/unreviewed/2022/04/GHSA-h8w6-2w9h-h7rw/GHSA-h8w6-2w9h-h7rw.json { "schema_version": "1.2.0", "id": "GHSA-h8w6-2w9h-h7rw", "modified": "2022-04-29T02:57:14Z", "published": "2022-04-29T02:57:14Z", "aliases": [ "CVE-2004-0152" ], "details": "Multiple stack-based buffer overflows in (1) the encode_mime function, (2) the encode_uuencode function, (3) or the decode_uuencode function for emil 2.1.0 and earlier allow remote attackers to execute arbitrary code via e-mail messages containing attachments with filenames.", "severity": [ ], "affected": [ ], "references": [ { "type": "ADVISORY", "url": "https://nvd.nist.gov/vuln/detail/CVE-2004-0152" }, { "type": "WEB", "url": "https://exchange.xforce.ibmcloud.com/vulnerabilities/15601" }, { "type": "WEB", "url": "http://marc.info/?l=bugtraq&m=108024939827236&w=2" }, { "type": "WEB", "url": "http://www.debian.org/security/2004/dsa-468" } ], "database_specific": { "cwe_ids": [ ], "severity": "HIGH", "github_reviewed": false } }
526
5,169
<reponame>Gantios/Specs { "name": "rapidjson", "version": "1.1.0", "summary": "A fast JSON parser/generator for C++ with both SAX/DOM style API", "description": "RapidJSON is a JSON parser and generator for C++. It was inspired by RapidXml.\n\nRapidJSON is small but complete. It supports both SAX and DOM style API. The SAX parser is only a half thousand lines of code.\nRapidJSON is fast. Its performance can be comparable to strlen(). It also optionally supports SSE2/SSE4.2 for acceleration.\nRapidJSON is self-contained and header-only. It does not depend on external libraries such as BOOST. It even does not depend on STL.\nRapidJSON is memory-friendly. Each JSON value occupies exactly 16 bytes for most 32/64-bit machines (excluding text string). By default it uses a fast memory allocator, and the parser allocates memory compactly during parsing.\nRapidJSON is Unicode-friendly. It supports UTF-8, UTF-16, UTF-32 (LE & BE), and their detection, validation and transcoding internally. For example, you can read a UTF-8 file and let RapidJSON transcode the JSON strings into UTF-16 in the DOM. It also supports surrogates and \"\u0000\" (null character).", "homepage": "http://rapidjson.org/", "license": { "type": "MIT", "file": "license.txt" }, "authors": "<NAME>", "platforms": { "ios": "8.0" }, "source": { "git": "https://github.com/miloyip/rapidjson.git", "tag": "version1.1.0" }, "source_files": "include/rapidjson/**/*.h", "header_mappings_dir": "include/rapidjson", "requires_arc": false }
495
2,142
<filename>demo/src/main/java/com/liaoinstan/demospring/demo12/Demo12Activity.java package com.liaoinstan.demospring.demo12; import android.os.Bundle; import android.view.View; import android.widget.Toast; import androidx.appcompat.app.AlertDialog; import androidx.appcompat.app.AppCompatActivity; import androidx.recyclerview.widget.RecyclerView; import com.liaoinstan.demospring.R; import com.liaoinstan.demospring.utils.StatusBarUtil; import com.liaoinstan.springview.weixinheader.Program; import com.liaoinstan.springview.weixinheaderv2.WeixinHeaderV2; import com.liaoinstan.springview.weixinheaderv2.WeixinTitleBar; import com.liaoinstan.springview.widget.SpringView; import java.util.ArrayList; import java.util.List; import java.util.Random; public class Demo12Activity extends AppCompatActivity implements WeixinHeaderV2.OnMoreClickListener, WeixinHeaderV2.OnProgramClickListener, WeixinHeaderV2.OnProgramDropListener, WeixinHeaderV2.OnSearchClickListener { private View bottomView; private WeixinTitleBar weixinTitleBar; private SpringView springView; private WeixinHeaderV2 weixinHeaderV2; //测试数据 //注意这里本应该给每个对象一个网络图片url链接,再利用图片加载框架加载网络图片, //但是在本demo中不想引入无关框架,所以这里把本地资源图片id转成String类型保存在url字段中,加载图片的时候再转回int类型设置图片,这样做仅为演示 private List<Program> dataRecent = new ArrayList<Program>() {{ add(new Program("ofo小黄车", String.valueOf(R.drawable.wx_program1))); add(new Program("哈图", String.valueOf(R.drawable.wx_program2))); add(new Program("好货", String.valueOf(R.drawable.wx_program3))); add(new Program("快闪", String.valueOf(R.drawable.wx_program4))); add(new Program("蘑菇街", String.valueOf(R.drawable.wx_program5))); }}; @Override protected void onCreate(Bundle savedInstanceState) { super.onCreate(savedInstanceState); setContentView(R.layout.activity_demo12); bottomView = findViewById(R.id.bottom_view); weixinTitleBar = findViewById(R.id.weixin_title_bar); springView = findViewById(R.id.springview); //设置状态栏为透明重叠(沉浸式) StatusBarUtil.setTranslucent(this); springView.setListener(new SpringView.OnFreshListener() { @Override public void onRefresh() { } @Override public void onLoadmore() { } }); /////////////////////////////////// //// 初始化微信小程序headerV2 //// /////////////////////////////////// //微信headerV2 的构造函数需要bottomView,weixinTitleBar两个参数 //bottomView 是页面底部导航条,weixinHeaderV2在拖拽过程中会控制它收起或弹出,可以是任意View,这里为了方便使用了material包下的BottomNavigationView //weixinTitleBar 是页面顶部的标题栏,weixinHeaderV2在拖拽过程中会控制它进行动画交互 //这两个参数均可为 null,为null则 weixinHeaderV2不会进行任何处理 weixinHeaderV2 = new WeixinHeaderV2(bottomView, weixinTitleBar); //设置加载图片回调方法 weixinHeaderV2.setOnLoadImgCallback((imageView, imgUrl, position) -> { //在这个回调中自行使用项目中的图片加载框架加载网络图片,这里因为不想在demo中导入无关框架,所以直接加载本地图片演示 imageView.setImageResource(Integer.parseInt(imgUrl)); }); //item 点击事件 weixinHeaderV2.setOnProgramClickListener(this); //item drop(拖拽删除)事件 weixinHeaderV2.setOnProgramDropListener(this); //“更多”按钮点击事件 weixinHeaderV2.setOnMoreClickListener(this); //“搜索框”点击事件 weixinHeaderV2.setOnSearchClickListener(this); //设置小程序数据('最近使用') //如果要添加数据到'我的小程序',调用addItemMine(...)方法 weixinHeaderV2.addItemRecent(dataRecent); //设置header到SpringView springView.setHeader(weixinHeaderV2); } @Override public void onClick(Program program, RecyclerView.ViewHolder holder, int position) { Toast.makeText(Demo12Activity.this, program.getName() + " click", Toast.LENGTH_SHORT).show(); } @Override public boolean onDrop(Program program, RecyclerView.ViewHolder holder, int position) { //返回ture会执行删除操作,false不删除,默认会删除 return true; } @Override public void onSearchClick() { Toast.makeText(this, "onSearchClick", Toast.LENGTH_SHORT).show(); } @Override public void onMoreClick() { //弹出个对话框,点确定就随机添加一个小程序 new AlertDialog.Builder(this) .setMessage("确定要添加新的小程序?(仅测试)") .setPositiveButton("确定", (dialog, which) -> { if (weixinHeaderV2.getRecentProgramCount() >= 7) { //'最近使用'最多只能有7条数据 Toast.makeText(Demo12Activity.this, "已经放不下了", Toast.LENGTH_SHORT).show(); return; } switch (new Random().nextInt(5)) { case 0: weixinHeaderV2.addItemRecent(new Program("ofo小黄车", String.valueOf(R.drawable.wx_program1))); break; case 1: weixinHeaderV2.addItemRecent(new Program("哈图", String.valueOf(R.drawable.wx_program2))); break; case 2: weixinHeaderV2.addItemRecent(new Program("好货", String.valueOf(R.drawable.wx_program3))); break; case 3: weixinHeaderV2.addItemRecent(new Program("快闪", String.valueOf(R.drawable.wx_program4))); break; case 4: weixinHeaderV2.addItemRecent(new Program("蘑菇街", String.valueOf(R.drawable.wx_program5))); break; } }) .create() .show(); } }
3,506
1,553
<gh_stars>1000+ #!/usr/bin/env python # -*- coding: utf-8 -*- """ Options """ import json import os import pkgutil import shlex from argparse import ArgumentParser import six def build_argument_parser(): """ Builds the argument parser :return: the argument parser :rtype: ArgumentParser """ opts = ArgumentParser() opts.add_argument(dest='filename', help='Filename or release name to guess', nargs='*') naming_opts = opts.add_argument_group("Naming") naming_opts.add_argument('-t', '--type', dest='type', default=None, help='The suggested file type: movie, episode. If undefined, type will be guessed.') naming_opts.add_argument('-n', '--name-only', dest='name_only', action='store_true', default=None, help='Parse files as name only, considering "/" and "\\" like other separators.') naming_opts.add_argument('-Y', '--date-year-first', action='store_true', dest='date_year_first', default=None, help='If short date is found, consider the first digits as the year.') naming_opts.add_argument('-D', '--date-day-first', action='store_true', dest='date_day_first', default=None, help='If short date is found, consider the second digits as the day.') naming_opts.add_argument('-L', '--allowed-languages', action='append', dest='allowed_languages', default=None, help='Allowed language (can be used multiple times)') naming_opts.add_argument('-C', '--allowed-countries', action='append', dest='allowed_countries', default=None, help='Allowed country (can be used multiple times)') naming_opts.add_argument('-E', '--episode-prefer-number', action='store_true', dest='episode_prefer_number', default=None, help='Guess "serie.213.avi" as the episode 213. Without this option, ' 'it will be guessed as season 2, episode 13') naming_opts.add_argument('-T', '--expected-title', action='append', dest='expected_title', default=None, help='Expected title to parse (can be used multiple times)') naming_opts.add_argument('-G', '--expected-group', action='append', dest='expected_group', default=None, help='Expected release group (can be used multiple times)') input_opts = opts.add_argument_group("Input") input_opts.add_argument('-f', '--input-file', dest='input_file', default=None, help='Read filenames from an input text file. File should use UTF-8 charset.') output_opts = opts.add_argument_group("Output") output_opts.add_argument('-v', '--verbose', action='store_true', dest='verbose', default=None, help='Display debug output') output_opts.add_argument('-P', '--show-property', dest='show_property', default=None, help='Display the value of a single property (title, series, video_codec, year, ...)') output_opts.add_argument('-a', '--advanced', dest='advanced', action='store_true', default=None, help='Display advanced information for filename guesses, as json output') output_opts.add_argument('-s', '--single-value', dest='single_value', action='store_true', default=None, help='Keep only first value found for each property') output_opts.add_argument('-l', '--enforce-list', dest='enforce_list', action='store_true', default=None, help='Wrap each found value in a list even when property has a single value') output_opts.add_argument('-j', '--json', dest='json', action='store_true', default=None, help='Display information for filename guesses as json output') output_opts.add_argument('-y', '--yaml', dest='yaml', action='store_true', default=None, help='Display information for filename guesses as yaml output') conf_opts = opts.add_argument_group("Configuration") conf_opts.add_argument('-c', '--config', dest='config', action='append', default=None, help='Filepath to the configuration file. Configuration contains the same options as ' 'those command line options, but option names have "-" characters replaced with "_". ' 'If not defined, guessit tries to read a configuration default configuration file at ' '~/.guessit/options.(json|yml|yaml) and ~/.config/guessit/options.(json|yml|yaml). ' 'Set to "false" to disable default configuration file loading.') conf_opts.add_argument('--no-embedded-config', dest='no_embedded_config', action='store_true', default=None, help='Disable default configuration.') information_opts = opts.add_argument_group("Information") information_opts.add_argument('-p', '--properties', dest='properties', action='store_true', default=None, help='Display properties that can be guessed.') information_opts.add_argument('-V', '--values', dest='values', action='store_true', default=None, help='Display property values that can be guessed.') information_opts.add_argument('--version', dest='version', action='store_true', default=None, help='Display the guessit version.') return opts def parse_options(options=None, api=False): """ Parse given option string :param options: :type options: :param api :type boolean :return: :rtype: """ if isinstance(options, six.string_types): args = shlex.split(options) options = vars(argument_parser.parse_args(args)) elif options is None: if api: options = {} else: options = vars(argument_parser.parse_args()) elif not isinstance(options, dict): options = vars(argument_parser.parse_args(options)) return options argument_parser = build_argument_parser() class ConfigurationException(Exception): """ Exception related to configuration file. """ pass def load_config(options): """ Load configuration from configuration file, if defined. :param options: :type options: :return: :rtype: """ config_files_enabled = True custom_config_files = None if options.get('config') is not None: custom_config_files = options.get('config') if not custom_config_files \ or not custom_config_files[0] \ or custom_config_files[0].lower() in ['0', 'no', 'false', 'disabled']: config_files_enabled = False configurations = [] if config_files_enabled: home_directory = os.path.expanduser("~") cwd = os.getcwd() yaml_supported = False try: import yaml # pylint: disable=unused-variable yaml_supported = True except ImportError: pass config_file_locations = get_config_file_locations(home_directory, cwd, yaml_supported) config_files = [f for f in config_file_locations if os.path.exists(f)] if custom_config_files: config_files = config_files + custom_config_files for config_file in config_files: config_file_options = load_config_file(config_file) if config_file_options: configurations.append(config_file_options) if not options.get('no_embedded_config'): embedded_options_data = pkgutil.get_data('guessit', 'config/options.json').decode("utf-8") embedded_options = json.loads(embedded_options_data) configurations.append(embedded_options) if configurations: configurations.append(options) return merge_configurations(*configurations) return options def merge_configurations(*configurations): """ Merge configurations into a single options dict. :param configurations: :type configurations: :return: :rtype: """ merged = {} for options in configurations: pristine = options.get('pristine') if pristine: if pristine is True: merged = {} else: for to_reset in pristine: if to_reset in merged: del merged[to_reset] for (option, value) in options.items(): if value is not None and option != 'pristine': if option in merged.keys() and isinstance(merged[option], list): merged[option].extend(value) elif isinstance(value, list): merged[option] = list(value) else: merged[option] = value return merged def load_config_file(filepath): """ Load a configuration as an options dict. Format of the file is given with filepath extension. :param filepath: :type filepath: :return: :rtype: """ if filepath.endswith('.json'): with open(filepath) as config_file_data: return json.load(config_file_data) if filepath.endswith('.yaml') or filepath.endswith('.yml'): try: import yaml with open(filepath) as config_file_data: return yaml.load(config_file_data) except ImportError: # pragma: no cover raise ConfigurationException('Configuration file extension is not supported. ' 'PyYAML should be installed to support "%s" file' % ( filepath,)) raise ConfigurationException('Configuration file extension is not supported for "%s" file.' % (filepath,)) def get_config_file_locations(homedir, cwd, yaml_supported=False): """ Get all possible locations for configuration file. :param homedir: user home directory :type homedir: basestring :param cwd: current working directory :type homedir: basestring :return: :rtype: list """ locations = [] configdirs = [(os.path.join(homedir, '.guessit'), 'options'), (os.path.join(homedir, '.config', 'guessit'), 'options'), (cwd, 'guessit.options')] configexts = ['json'] if yaml_supported: configexts.append('yaml') configexts.append('yml') for configdir in configdirs: for configext in configexts: locations.append(os.path.join(configdir[0], configdir[1] + '.' + configext)) return locations
4,570
1,049
package me.devilsen.czxing.code; /** * desc: code result model * date: 2019/08/17 * * @author : dongsen */ public class CodeResult { private BarcodeFormat format; private String text; private float[] points; private int scanType; CodeResult(BarcodeFormat format, String text) { this.format = format; this.text = text; } public CodeResult(String content, int formatIndex, float[] points, int scanType) { this.text = content; if (formatIndex < 0) { this.format = BarcodeFormat.QR_CODE; } else { this.format = BarcodeFormat.values()[formatIndex]; } this.points = points; this.scanType = scanType; } public void setPoint(float[] lists) { points = lists; } public BarcodeFormat getFormat() { return format; } public String getText() { return text; } public float[] getPoints() { return points; } public int getScanType() { return scanType; } @Override public String toString() { return "text: " + text + "\nformat: " + getFormat() + "\nscanType: " + getScanType() + "\npoints: " + getPointsString(); } private String getPointsString() { StringBuilder stringBuilder = new StringBuilder(); int i = 0; for (float list : points) { i++; stringBuilder.append(list).append(" "); if (i % 2 == 0) { stringBuilder.append("\n"); } } return stringBuilder.toString(); } }
739
348
<gh_stars>100-1000 {"nom":"Monclar","circ":"3ème circonscription","dpt":"Lot-et-Garonne","inscrits":590,"abs":311,"votants":279,"blancs":37,"nuls":8,"exp":234,"res":[{"nuance":"REM","nom":"M. <NAME>","voix":145},{"nuance":"FN","nom":"<NAME>","voix":89}]}
104
1,968
<reponame>agramonte/corona ////////////////////////////////////////////////////////////////////////////// // // This file is part of the Corona game engine. // For overview and more information on licensing please refer to README.md // Home page: https://github.com/coronalabs/corona // Contact: <EMAIL> // ////////////////////////////////////////////////////////////////////////////// #ifndef _Rtt_DisplayDefaults_H__ #define _Rtt_DisplayDefaults_H__ #include "Core/Rtt_Types.h" #include "Display/Rtt_Paint.h" #include "Renderer/Rtt_RenderTypes.h" // ---------------------------------------------------------------------------- namespace Rtt { // ---------------------------------------------------------------------------- class DisplayDefaults { public: DisplayDefaults(); public: Color GetClearColor() const { return fClearColor; } Color GetFillColor() const { return fFillColor; } Color GetStrokeColor() const { return fStrokeColor; } Color GetTextColor() const { return fTextColor; } Color GetLineColor() const { return fLineColor; } float GetAnchorX() const { return fAnchorX; } float GetAnchorY() const { return fAnchorY; } bool IsAnchorClamped() const { return fIsAnchorClamped; } void SetClearColor( Color newValue ) { fClearColor = newValue; } void SetFillColor( Color newValue ) { fFillColor = newValue; } void SetStrokeColor( Color newValue ) { fStrokeColor = newValue; } void SetTextColor( Color newValue ) { fTextColor = newValue; } void SetLineColor( Color newValue ) { fLineColor = newValue; } void SetAnchorX( float newValue ) { fAnchorX = newValue; } void SetAnchorY( float newValue ) { fAnchorY = newValue; } void SetAnchorClamped( bool newValue ) { fIsAnchorClamped = newValue; } public: RenderTypes::TextureFilter GetMagTextureFilter() const { return (RenderTypes::TextureFilter)fMagTextureFilter; } void SetMagTextureFilter( RenderTypes::TextureFilter newValue ) { fMagTextureFilter = newValue; } RenderTypes::TextureFilter GetMinTextureFilter() const { return (RenderTypes::TextureFilter)fMinTextureFilter; } void SetMinTextureFilter( RenderTypes::TextureFilter newValue ) { fMinTextureFilter = newValue; } RenderTypes::TextureWrap GetTextureWrapX() const { return (RenderTypes::TextureWrap)fWrapX; } void SetTextureWrapX( RenderTypes::TextureWrap newValue ) { fWrapX = newValue; } RenderTypes::TextureWrap GetTextureWrapY() const { return (RenderTypes::TextureWrap)fWrapY; } void SetTextureWrapY( RenderTypes::TextureWrap newValue ) { fWrapY = newValue; } bool IsImageSheetSampledInsideFrame() const { return fIsImageSheetSampledInsideFrame;} void SetImageSheetSampledInsideFrame( bool newValue ) { fIsImageSheetSampledInsideFrame = newValue; } public: bool IsV1Compatibility() const { return fV1Compatibility; } void SetV1Compatibility( bool newValue ) { fV1Compatibility = newValue; } bool IsByteColorRange() const { return IsV1Compatibility(); } bool ShouldPreloadTextures() const { return fPreloadTextures; } void SetPreloadTextures( bool newValue ) { fPreloadTextures = newValue; } bool IsNativeTextFieldFontSizeScaled() const { return fIsNativeTextFieldFontSizeScaled; } void SetIsNativeTextFieldFontSizeScaled( bool value ) { fIsNativeTextFieldFontSizeScaled = value; } bool IsNativeTextBoxFontSizeScaled() const { return fIsNativeTextBoxFontSizeScaled; } void SetIsNativeTextBoxFontSizeScaled( bool value ) { fIsNativeTextBoxFontSizeScaled = value; } bool IsShaderCompilerVerbose() const { return fShaderCompilerVerbose; } void SetShaderCompilerVerbose( bool newValue ) { fShaderCompilerVerbose = newValue; } private: Color fClearColor; Color fFillColor; Color fStrokeColor; Color fTextColor; Color fLineColor; float fAnchorX; float fAnchorY; U8 fMagTextureFilter; U8 fMinTextureFilter; U8 fWrapX; U8 fWrapY; bool fV1Compatibility; bool fPreloadTextures; bool fIsNativeTextFieldFontSizeScaled; bool fIsNativeTextBoxFontSizeScaled; bool fShaderCompilerVerbose; bool fIsAnchorClamped; bool fIsImageSheetSampledInsideFrame; }; // ---------------------------------------------------------------------------- } // namespace Rtt // ---------------------------------------------------------------------------- #endif // _Rtt_DisplayDefaults_H__
1,328
14,668
// Copyright 2015 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. package org.chromium.content_shell.browsertests; import android.content.Context; import android.net.Uri; import android.view.Window; import android.view.WindowManager; import androidx.core.content.FileProvider; import org.chromium.base.ContentUriUtils; import org.chromium.base.ContextUtils; import org.chromium.base.StrictModeContext; import org.chromium.base.library_loader.LibraryLoader; import org.chromium.base.library_loader.LibraryProcessType; import org.chromium.content_public.browser.BrowserStartupController; import org.chromium.content_public.browser.BrowserStartupController.StartupCallback; import org.chromium.content_shell.ShellManager; import org.chromium.native_test.NativeBrowserTest; import org.chromium.native_test.NativeBrowserTestActivity; import org.chromium.ui.base.ActivityWindowAndroid; import org.chromium.ui.base.IntentRequestTracker; import org.chromium.ui.base.WindowAndroid; import java.io.File; /** An Activity base class for running browser tests against ContentShell. */ public abstract class ContentShellBrowserTestActivity extends NativeBrowserTestActivity { private static final String TAG = "native_test"; private ShellManager mShellManager; private WindowAndroid mWindowAndroid; private static class FileProviderHelper implements ContentUriUtils.FileProviderUtil { // Keep this variable in sync with the value defined in file_paths.xml. private static final String API_AUTHORITY_SUFFIX = ".FileProvider"; @Override public Uri getContentUriFromFile(File file) { Context appContext = ContextUtils.getApplicationContext(); return FileProvider.getUriForFile( appContext, appContext.getPackageName() + API_AUTHORITY_SUFFIX, file); } } /** * Initializes the browser process. * * This generally includes loading native libraries and switching to the native command line, * among other things. */ @Override protected void initializeBrowserProcess() { try (StrictModeContext ignored = StrictModeContext.allowDiskReads()) { LibraryLoader.getInstance().ensureInitialized(); } ContentUriUtils.setFileProviderUtil(new FileProviderHelper()); setContentView(getTestActivityViewId()); mShellManager = (ShellManager) findViewById(getShellManagerViewId()); IntentRequestTracker intentRequestTracker = IntentRequestTracker.createFromActivity(this); mWindowAndroid = new ActivityWindowAndroid( this, /* listenToActivityState= */ true, intentRequestTracker); mShellManager.setWindow(mWindowAndroid); Window wind = this.getWindow(); wind.addFlags(WindowManager.LayoutParams.FLAG_DISMISS_KEYGUARD); wind.addFlags(WindowManager.LayoutParams.FLAG_SHOW_WHEN_LOCKED); wind.addFlags(WindowManager.LayoutParams.FLAG_TURN_SCREEN_ON); BrowserStartupController.getInstance().setContentMainCallbackForTests(() -> { // This jumps into C++ to set up and run the test harness. The test harness runs // ContentMain()-equivalent code, and then waits for javaStartupTasksComplete() // to be called. runTests(); }); BrowserStartupController.getInstance().startBrowserProcessesAsync( LibraryProcessType.PROCESS_BROWSER, false, false, new StartupCallback() { @Override public void onSuccess() { // The C++ test harness is running thanks to runTests() above, but it // waits for Java initialization to complete. This tells C++ that it may // continue now to finish running the tests. NativeBrowserTest.javaStartupTasksComplete(); } @Override public void onFailure() { throw new RuntimeException("Failed to startBrowserProcessesAsync()"); } }); } @Override /** * Ensure that the user data directory gets overridden to getPrivateDataDirectory() (which is * cleared at the start of every run); the directory that ANDROID_APP_DATA_DIR is set to in the * context of Java browsertests is not cleared as it also holds persistent state, which * causes test failures due to state bleedthrough. See crbug.com/617734 for details. */ protected String getUserDataDirectoryCommandLineSwitch() { return "data-path"; } protected abstract int getTestActivityViewId(); protected abstract int getShellManagerViewId(); }
1,726
2,808
#!/usr/bin/env python import os os.system("clear") f = open("tmp","r") # Setup a read connection to file filedata = f.read() # Read the file f.close() # Close the connection filedata = filedata.split("\n") # Turn into a list ############################## out = [] # Create an empty array for i in filedata: if "@" in i: # grep '@' if not "apples" in i: # grep -v 'apples' out.append(i.lower()) # Append to array and change to lower case out = list(set(out)) # Make list unique out.sort() # Sort for j in out: print j
513
575
<gh_stars>100-1000 class Preparer(object): """ A plain preparation object which just passes through data. It also is relevant as the protocol subclasses should implement to work with Restless. """ def __init__(self): super(Preparer, self).__init__() def prepare(self, data): """ Handles actually transforming the data. By default, this does nothing & simply returns the data passed to it. """ return data class FieldsPreparer(Preparer): """ A more complex preparation object, this will return a given set of fields. This takes a ``fields`` parameter, which should be a dictionary of keys (fieldnames to expose to the user) & values (a dotted lookup path to the desired attribute/key on the object). Example:: preparer = FieldsPreparer(fields={ # ``user`` is the key the client will see. # ``author.pk`` is the dotted path lookup ``FieldsPreparer`` # will traverse on the data to return a value. 'user': 'author.pk', }) """ def __init__(self, fields): super(FieldsPreparer, self).__init__() self.fields = fields def prepare(self, data): """ Handles transforming the provided data into the fielded data that should be exposed to the end user. Uses the ``lookup_data`` method to traverse dotted paths. Returns a dictionary of data as the response. """ result = {} if not self.fields: # No fields specified. Serialize everything. return data for fieldname, lookup in self.fields.items(): if isinstance(lookup, SubPreparer): result[fieldname] = lookup.prepare(data) else: result[fieldname] = self.lookup_data(lookup, data) return result def lookup_data(self, lookup, data): """ Given a lookup string, attempts to descend through nested data looking for the value. Can work with either dictionary-alikes or objects (or any combination of those). Lookups should be a string. If it is a dotted path, it will be split on ``.`` & it will traverse through to find the final value. If not, it will simply attempt to find either a key or attribute of that name & return it. Example:: >>> data = { ... 'type': 'message', ... 'greeting': { ... 'en': 'hello', ... 'fr': 'bonjour', ... 'es': 'hola', ... }, ... 'person': Person( ... name='daniel' ... ) ... } >>> lookup_data('type', data) 'message' >>> lookup_data('greeting.en', data) 'hello' >>> lookup_data('person.name', data) 'daniel' """ value = data parts = lookup.split('.') if not parts or not parts[0]: return value part = parts[0] remaining_lookup = '.'.join(parts[1:]) if callable(getattr(data, 'keys', None)) and hasattr(data, '__getitem__'): # Dictionary enough for us. value = data[part] elif data is not None: # Assume it's an object. value = getattr(data, part) # Call if it's callable except if it's a Django DB manager instance # We check if is a manager by checking the db_manager (duck typing) if callable(value) and not hasattr(value, 'db_manager'): value = value() if not remaining_lookup: return value # There's more to lookup, so dive in recursively. return self.lookup_data(remaining_lookup, value) class SubPreparer(FieldsPreparer): """ A preparation class designed to be used within other preparers. This is primary to enable deeply-nested structures, allowing you to compose/share definitions as well. Typical usage consists of creating a configured instance of a FieldsPreparer, then use a `SubPreparer` to pull it in. Example:: # First, define the nested fields you'd like to expose. author_preparer = FieldsPreparer(fields={ 'id': 'pk', 'username': 'username', 'name': 'get_full_name', }) # Then, in the main preparer, pull them in using `SubPreparer`. preparer = FieldsPreparer(fields={ 'author': SubPreparer('user', author_preparer), # Other fields can come before/follow as normal. 'content': 'post', 'created': 'created_at', }) """ def __init__(self, lookup, preparer): self.lookup = lookup self.preparer = preparer def get_inner_data(self, data): """ Used internally so that the correct data is extracted out of the broader dataset, allowing the preparer being called to deal with just the expected subset. """ return self.lookup_data(self.lookup, data) def prepare(self, data): """ Handles passing the data to the configured preparer. Uses the ``get_inner_data`` method to provide the correct subset of the data. Returns a dictionary of data as the response. """ return self.preparer.prepare(self.get_inner_data(data)) class CollectionSubPreparer(SubPreparer): """ A preparation class designed to handle collections of data. This is useful in the case where you have a 1-to-many or many-to-many relationship of data to expose as part of the parent data. Example:: # First, set up a preparer that handles the data for each thing in # the broader collection. comment_preparer = FieldsPreparer(fields={ 'comment': 'comment_text', 'created': 'created', }) # Then use it with the ``CollectionSubPreparer`` to create a list # of prepared sub items. preparer = FieldsPreparer(fields={ # A normal blog post field. 'post': 'post_text', # All the comments on the post. 'comments': CollectionSubPreparer('comments.all', comment_preparer), }) """ def prepare(self, data): """ Handles passing each item in the collection data to the configured subpreparer. Uses a loop and the ``get_inner_data`` method to provide the correct item of the data. Returns a list of data as the response. """ result = [] for item in self.get_inner_data(data): result.append(self.preparer.prepare(item)) return result
2,829
1,946
<reponame>OpnTec/open-event-scraper import os import PIL from PIL import Image import requests def save_img(img_url, filename, filter_image=True, dirname='speakers'): try: img_content = requests.get(img_url).content filename = os.path.join(dirname, filename) f = open((filename), 'wb') f.write(img_content) f.close() if filter_image: basewidth = 100 img = Image.open(filename) wpercent = (basewidth / float(img.size[0])) hsize = int((float(img.size[1]) * float(wpercent))) img = img.resize((basewidth, hsize), PIL.Image.ANTIALIAS) img.save(filename) return filename except: return ""
249
443
#!/usr/bin/python3 # # Copyright 2018 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Converts pickled imagenet_32x32 files to .npy files. The default imagenet_32x32 data files are stored in Python 3 pickly encoding. The rest of our code is in Python 2, so we have a separate script that just deals with this issue separately. You can execute it as: python3 convert_imagenet.py after which you should no longer have to manually deal with imagenet. """ import os import numpy as np import pickle _DATA_DIR = "data/imagenet_32/" def unpickle(filename): with open(filename, "rb") as fo: dict = pickle.load(fo) return dict train_file_names = ["train_data_batch_" + str(idx) for idx in range(1, 11)] val_file_names = ["val_data"] for file_name in train_file_names + val_file_names: data = unpickle(os.path.join(_DATA_DIR, file_name)) image_file_name = file_name + "_image" label_file_name = file_name + "_label" np.save(os.path.join(_DATA_DIR, image_file_name), data["data"]) np.save(os.path.join(_DATA_DIR, label_file_name), np.array(data["labels"]))
526
310
<filename>gear/software/m/motionx-gps-ios.json { "name": "MotionX GPS (iOS)", "description": "A position tracking app.", "url": "https://itunes.apple.com/us/app/motionx-gps/id299949744" }
76
2,338
<gh_stars>1000+ //===-- PThreadEvent.cpp ----------------------------------------*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // Created by <NAME> on 6/16/07. // //===----------------------------------------------------------------------===// #include "PThreadEvent.h" #include "DNBLog.h" #include <cerrno> PThreadEvent::PThreadEvent(uint32_t bits, uint32_t validBits) : m_mutex(), m_set_condition(), m_reset_condition(), m_bits(bits), m_validBits(validBits), m_reset_ack_mask(0) { // DNBLogThreadedIf(LOG_EVENTS, "%p PThreadEvent::%s (0x%8.8x, 0x%8.8x)", // this, __FUNCTION__, bits, validBits); } PThreadEvent::~PThreadEvent() { // DNBLogThreadedIf(LOG_EVENTS, "%p %s", this, LLVM_PRETTY_FUNCTION); } uint32_t PThreadEvent::NewEventBit() { // DNBLogThreadedIf(LOG_EVENTS, "%p %s", this, LLVM_PRETTY_FUNCTION); PTHREAD_MUTEX_LOCKER(locker, m_mutex); uint32_t mask = 1; while (mask & m_validBits) mask <<= 1; m_validBits |= mask; return mask; } void PThreadEvent::FreeEventBits(const uint32_t mask) { // DNBLogThreadedIf(LOG_EVENTS, "%p PThreadEvent::%s (0x%8.8x)", this, // __FUNCTION__, mask); if (mask) { PTHREAD_MUTEX_LOCKER(locker, m_mutex); m_bits &= ~mask; m_validBits &= ~mask; } } uint32_t PThreadEvent::GetEventBits() const { // DNBLogThreadedIf(LOG_EVENTS, "%p %s", this, LLVM_PRETTY_FUNCTION); PTHREAD_MUTEX_LOCKER(locker, m_mutex); uint32_t bits = m_bits; return bits; } // Replace the event bits with a new bitmask value void PThreadEvent::ReplaceEventBits(const uint32_t bits) { // DNBLogThreadedIf(LOG_EVENTS, "%p PThreadEvent::%s (0x%8.8x)", this, // __FUNCTION__, bits); PTHREAD_MUTEX_LOCKER(locker, m_mutex); // Make sure we have some bits and that they aren't already set... if (m_bits != bits) { // Figure out which bits are changing uint32_t changed_bits = m_bits ^ bits; // Set the new bit values m_bits = bits; // If any new bits are set, then broadcast if (changed_bits & m_bits) m_set_condition.Broadcast(); } } // Set one or more event bits and broadcast if any new event bits get set // that weren't already set. void PThreadEvent::SetEvents(const uint32_t mask) { // DNBLogThreadedIf(LOG_EVENTS, "%p PThreadEvent::%s (0x%8.8x)", this, // __FUNCTION__, mask); // Make sure we have some bits to set if (mask) { PTHREAD_MUTEX_LOCKER(locker, m_mutex); // Save the old event bit state so we can tell if things change uint32_t old = m_bits; // Set the all event bits that are set in 'mask' m_bits |= mask; // Broadcast only if any extra bits got set. if (old != m_bits) m_set_condition.Broadcast(); } } // Reset one or more event bits void PThreadEvent::ResetEvents(const uint32_t mask) { // DNBLogThreadedIf(LOG_EVENTS, "%p PThreadEvent::%s (0x%8.8x)", this, // __FUNCTION__, mask); if (mask) { PTHREAD_MUTEX_LOCKER(locker, m_mutex); // Save the old event bit state so we can tell if things change uint32_t old = m_bits; // Clear the all event bits that are set in 'mask' m_bits &= ~mask; // Broadcast only if any extra bits got reset. if (old != m_bits) m_reset_condition.Broadcast(); } } // Wait until 'timeout_abstime' for any events that are set in // 'mask'. If 'timeout_abstime' is NULL, then wait forever. uint32_t PThreadEvent::WaitForSetEvents(const uint32_t mask, const struct timespec *timeout_abstime) const { // DNBLogThreadedIf(LOG_EVENTS, "%p PThreadEvent::%s (0x%8.8x, %p)", this, // __FUNCTION__, mask, timeout_abstime); int err = 0; // pthread_cond_timedwait() or pthread_cond_wait() will atomically // unlock the mutex and wait for the condition to be set. When either // function returns, they will re-lock the mutex. We use an auto lock/unlock // class (PThreadMutex::Locker) to allow us to return at any point in this // function and not have to worry about unlocking the mutex. PTHREAD_MUTEX_LOCKER(locker, m_mutex); do { // Check our predicate (event bits) in case any are already set if (mask & m_bits) { uint32_t bits_set = mask & m_bits; // Our PThreadMutex::Locker will automatically unlock our mutex return bits_set; } if (timeout_abstime) { // Wait for condition to get broadcast, or for a timeout. If we get // a timeout we will drop out of the do loop and return false which // is what we want. err = ::pthread_cond_timedwait(m_set_condition.Condition(), m_mutex.Mutex(), timeout_abstime); // Retest our predicate in case of a race condition right at the end // of the timeout. if (err == ETIMEDOUT) { uint32_t bits_set = mask & m_bits; return bits_set; } } else { // Wait for condition to get broadcast. The only error this function // should return is if err = ::pthread_cond_wait(m_set_condition.Condition(), m_mutex.Mutex()); } } while (err == 0); return 0; } // Wait until 'timeout_abstime' for any events in 'mask' to reset. // If 'timeout_abstime' is NULL, then wait forever. uint32_t PThreadEvent::WaitForEventsToReset( const uint32_t mask, const struct timespec *timeout_abstime) const { // DNBLogThreadedIf(LOG_EVENTS, "%p PThreadEvent::%s (0x%8.8x, %p)", this, // __FUNCTION__, mask, timeout_abstime); int err = 0; // pthread_cond_timedwait() or pthread_cond_wait() will atomically // unlock the mutex and wait for the condition to be set. When either // function returns, they will re-lock the mutex. We use an auto lock/unlock // class (PThreadMutex::Locker) to allow us to return at any point in this // function and not have to worry about unlocking the mutex. PTHREAD_MUTEX_LOCKER(locker, m_mutex); do { // Check our predicate (event bits) each time through this do loop if ((mask & m_bits) == 0) { // All the bits requested have been reset, return zero indicating // which bits from the mask were still set (none of them) return 0; } if (timeout_abstime) { // Wait for condition to get broadcast, or for a timeout. If we get // a timeout we will drop out of the do loop and return false which // is what we want. err = ::pthread_cond_timedwait(m_reset_condition.Condition(), m_mutex.Mutex(), timeout_abstime); } else { // Wait for condition to get broadcast. The only error this function // should return is if err = ::pthread_cond_wait(m_reset_condition.Condition(), m_mutex.Mutex()); } } while (err == 0); // Return a mask indicating which bits (if any) were still set return mask & m_bits; } uint32_t PThreadEvent::WaitForResetAck(const uint32_t mask, const struct timespec *timeout_abstime) const { if (mask & m_reset_ack_mask) { // DNBLogThreadedIf(LOG_EVENTS, "%p PThreadEvent::%s (0x%8.8x, %p)", this, // __FUNCTION__, mask, timeout_abstime); return WaitForEventsToReset(mask & m_reset_ack_mask, timeout_abstime); } return 0; }
2,838
2,381
<filename>docker-java-api/src/main/java/com/github/dockerjava/api/command/LogContainerCmd.java<gh_stars>1000+ package com.github.dockerjava.api.command; import java.io.InputStream; import javax.annotation.CheckForNull; import javax.annotation.Nonnull; import com.github.dockerjava.api.DockerClient; import com.github.dockerjava.api.async.ResultCallback; import com.github.dockerjava.api.model.Frame; /** * Get container logs * * @param followStream * - true or false, return stream. Defaults to false. * @param stdout * - true or false, includes stdout log. Defaults to false. * @param stderr * - true or false, includes stderr log. Defaults to false. * @param timestamps * - true or false, if true, print timestamps for every log line. Defaults to false. * @param tail * - `all` or `<number>`, Output specified number of lines at the end of logs * @param since * - UNIX timestamp (integer) to filter logs. Specifying a timestamp will only output log-entries since that timestamp. Default: * 0 (unfiltered) */ public interface LogContainerCmd extends AsyncDockerCmd<LogContainerCmd, Frame> { @CheckForNull String getContainerId(); @CheckForNull Integer getTail(); @CheckForNull Boolean hasFollowStreamEnabled(); @CheckForNull Boolean hasTimestampsEnabled(); @CheckForNull Boolean hasStdoutEnabled(); @CheckForNull Boolean hasStderrEnabled(); @CheckForNull Integer getSince(); LogContainerCmd withContainerId(@Nonnull String containerId); /** * Following the stream means the resulting {@link InputStream} returned by {@link #exec()} reads infinitely. So a * {@link InputStream#read()} MAY BLOCK FOREVER as long as no data is streamed from the docker host to {@link DockerClient}! */ LogContainerCmd withFollowStream(Boolean followStream); LogContainerCmd withTimestamps(Boolean timestamps); LogContainerCmd withStdOut(Boolean stdout); LogContainerCmd withStdErr(Boolean stderr); LogContainerCmd withTailAll(); LogContainerCmd withTail(Integer tail); LogContainerCmd withSince(Integer since); /** * @throws com.github.dockerjava.api.NotFoundException * No such container */ @Override <T extends ResultCallback<Frame>> T exec(T resultCallback); interface Exec extends DockerCmdAsyncExec<LogContainerCmd, Frame> { } }
855
609
package org.consenlabs.tokencore.wallet.model; import java.util.Objects; public class KeyPair { String privateKey; String publicKey; public KeyPair() { } public KeyPair(String privateKey, String publicKey) { this.privateKey = privateKey; this.publicKey = publicKey; } public String getPrivateKey() { return privateKey; } public void setPrivateKey(String privateKey) { this.privateKey = privateKey; } public String getPublicKey() { return publicKey; } public void setPublicKey(String publicKey) { this.publicKey = publicKey; } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; KeyPair keyPair = (KeyPair) o; return Objects.equals(privateKey, keyPair.privateKey) && Objects.equals(publicKey, keyPair.publicKey); } @Override public int hashCode() { return Objects.hash(privateKey, publicKey); } }
342
2,151
<filename>packages/StatementService/src/com/android/statementservice/retriever/AbstractAsset.java<gh_stars>1000+ /* * Copyright (C) 2015 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.android.statementservice.retriever; import android.util.JsonReader; import org.json.JSONException; import java.io.IOException; import java.io.StringReader; /** * A handle representing the identity and address of some digital asset. An asset is an online * entity that typically provides some service or content. Examples of assets are websites, Android * apps, Twitter feeds, and Plus Pages. * * <p> Asset can be represented by a JSON string. For example, the web site https://www.google.com * can be represented by * <pre> * {"namespace": "web", "site": "https://www.google.com"} * </pre> * * <p> The Android app with package name com.google.test that is signed by a certificate with sha256 * fingerprint 11:22:33 can be represented by * <pre> * {"namespace": "android_app", * "package_name": "com.google.test", * "sha256_cert_fingerprints": ["11:22:33"]} * </pre> * * <p>Given a signed APK, Java 7's commandline keytool can compute the fingerprint using: * {@code keytool -list -printcert -jarfile signed_app.apk} */ public abstract class AbstractAsset { /** * Returns a JSON string representation of this asset. The strings returned by this function are * normalized -- they can be used for equality testing. */ public abstract String toJson(); /** * Returns a key that can be used by {@link AbstractAssetMatcher} to lookup the asset. * * <p> An asset will match an {@code AssetMatcher} only if the value of this method is equal to * {@code AssetMatcher.getMatchedLookupKey()}. */ public abstract int lookupKey(); /** * Creates a new Asset from its JSON string representation. * * @throws AssociationServiceException if the assetJson is not well formatted. */ public static AbstractAsset create(String assetJson) throws AssociationServiceException { JsonReader reader = new JsonReader(new StringReader(assetJson)); reader.setLenient(false); try { return AssetFactory.create(JsonParser.parse(reader)); } catch (JSONException | IOException e) { throw new AssociationServiceException( "Input is not a well formatted asset descriptor.", e); } } /** * If this is the source asset of a statement file, should the retriever follow * any insecure (non-HTTPS) include statements made by the asset. */ public abstract boolean followInsecureInclude(); }
1,005
348
<filename>docs/data/t2/014/14183.json {"nom":"Cossesseville","dpt":"Calvados","inscrits":85,"abs":13,"votants":72,"blancs":7,"nuls":4,"exp":61,"res":[{"panneau":"1","voix":37},{"panneau":"2","voix":24}]}
86
1,190
<reponame>freearhey/TableTool // // Constants.h // Table Tool // // Created by <NAME> on 12.06.17. // Copyright (c) 2017 Egger Apps. All rights reserved. // #include <Cocoa/Cocoa.h> #pragma mark Pasteboard Types extern NSString *TTRowInternalPboardType;
102
496
<filename>test/types-tests.cpp #include "gtest/gtest.h" #include <vector> #include "types.h" using namespace std; using namespace simit::ir; using namespace simit; TEST(IndexSet, constructor) { ASSERT_EQ(IndexSet(4), 4); ASSERT_EQ(IndexSet(42), 42); } TEST(IndexSet, eq) { auto r0 = IndexSet(4); ASSERT_EQ(r0, r0); ASSERT_EQ(r0, IndexSet(4)); ASSERT_NE(r0, IndexSet(8)); } TEST(IndexDomain, getSize) { vector<IndexSet> indices; indices.push_back(IndexSet(1)); ASSERT_EQ(IndexDomain(indices).getSize(), 1); indices.clear(); indices.push_back(IndexSet(3)); ASSERT_EQ(IndexDomain(indices).getSize(), 3); indices.push_back(IndexSet(4)); ASSERT_EQ(IndexDomain(indices).getSize(), 12); } TEST(IndexDomain, eq) { vector<IndexSet> indices0; vector<IndexSet> indices1; vector<IndexSet> indices2; indices0.push_back(IndexSet(1)); indices1.push_back(IndexSet(1)); indices2.push_back(IndexSet(2)); ASSERT_EQ(IndexDomain(indices0), IndexDomain(indices1)); ASSERT_NE(IndexDomain(indices0), IndexDomain(indices2)); indices0.push_back(IndexSet(2)); indices1.push_back(IndexSet(2)); ASSERT_EQ(IndexDomain(indices0), IndexDomain(indices1)); indices1.pop_back(); indices1.push_back(IndexSet(3)); ASSERT_NE(IndexDomain(indices0), IndexDomain(indices1)); } TEST(Type, getSize) { vector<IndexSet> indices0; vector<IndexSet> indices1; vector<IndexDomain> dimensions; indices0.push_back(IndexSet(2)); indices0.push_back(IndexSet(3)); dimensions.push_back(IndexDomain(indices0)); indices1.push_back(IndexSet(5)); indices1.push_back(IndexSet(7)); dimensions.push_back(IndexDomain(indices1)); Type type = TensorType::make(ScalarType(ScalarType::Float), dimensions); ASSERT_EQ(type.toTensor()->size(), 210u); } TEST(Type, eq) { vector<IndexSet> idxs0; vector<IndexSet> idxs1; vector<IndexDomain> dims0; vector<IndexDomain> dims1; idxs0.push_back(IndexSet(2)); idxs0.push_back(IndexSet(3)); dims0.push_back(IndexDomain(idxs0)); dims1.push_back(IndexDomain(idxs0)); ASSERT_EQ(TensorType::make(ScalarType(ScalarType::Float), dims0), TensorType::make(ScalarType(ScalarType::Float), dims1)); ASSERT_NE(TensorType::make(ScalarType(ScalarType::Float), dims0), TensorType::make(ScalarType(ScalarType::Int), dims1)); idxs1.push_back(IndexSet(3)); idxs1.push_back(IndexSet(2)); dims1.push_back(IndexDomain(idxs1)); ASSERT_NE(TensorType::make(ScalarType(ScalarType::Float), dims0), TensorType::make(ScalarType(ScalarType::Float), dims1)); } TEST(Type, blocking) { }
1,063
348
{"nom":"Les Baux-de-Provence","circ":"15ème circonscription","dpt":"Bouches-du-Rhône","inscrits":358,"abs":154,"votants":204,"blancs":12,"nuls":4,"exp":188,"res":[{"nuance":"LR","nom":"<NAME>","voix":110},{"nuance":"REM","nom":"<NAME>","voix":78}]}
101
5,169
<reponame>Gantios/Specs<gh_stars>1000+ { "name": "TapeTextView", "version": "1.0.0", "summary": "Create a marker pen effect to your textview's text", "description": "This view wants to create a marker pen effect to your text and you can control the slope and the height of the marker as well as the line spacing between multiple lines", "homepage": "https://github.com/ndPPPhz/TapeTextView", "license": "MIT", "authors": { "ndPPPhz": "<EMAIL>" }, "social_media_url": "https://twitter.com/ndPPPhz", "platforms": { "ios": "12.0" }, "swift_versions": "4", "source": { "git": "https://github.com/ndPPPhz/TapeTextView.git", "tag": "1.0.0" }, "source_files": "Classes/", "exclude_files": "Classes/Exclude", "swift_version": "4" }
299
6,958
<gh_stars>1000+ // // PackTf.cpp // MNNConverter // // Created by MNN on 2019/01/31. // Copyright © 2018, Alibaba Group Holding Limited // #include "TfUtils.hpp" #include "tfOpConverter.hpp" #include "graph.pb.h" DECLARE_OP_CONVERTER(PackTf); MNN::OpType PackTf::opType() { return MNN::OpType_Pack; } MNN::OpParameter PackTf::type() { return MNN::OpParameter_PackParam; } void PackTf::run(MNN::OpT *dstOp, TmpNode *srcNode) { auto pack = new MNN::PackParamT; tensorflow::AttrValue value; find_attr_value(srcNode->tfNode, "T", value); MNN::DataType dataType = (MNN::DataType)value.type(); pack->dataType = dataType; find_attr_value(srcNode->tfNode, "axis", value); pack->axis = value.i(); dstOp->main.value = pack; } REGISTER_CONVERTER(PackTf, Pack);
349
892
<filename>advisories/unreviewed/2022/05/GHSA-483m-372f-vxc5/GHSA-483m-372f-vxc5.json { "schema_version": "1.2.0", "id": "GHSA-483m-372f-vxc5", "modified": "2022-05-13T01:19:41Z", "published": "2022-05-13T01:19:41Z", "aliases": [ "CVE-2018-18850" ], "details": "In Octopus Deploy 2018.8.0 through 2018.9.x before 2018.9.1, an authenticated user with permission to modify deployment processes could upload a maliciously crafted YAML configuration, potentially allowing for remote execution of arbitrary code, running in the same context as the Octopus Server (for self-hosted installations by default, SYSTEM).", "severity": [ { "type": "CVSS_V3", "score": "CVSS:3.0/AV:N/AC:L/PR:L/UI:N/S:U/C:H/I:H/A:H" } ], "affected": [ ], "references": [ { "type": "ADVISORY", "url": "https://nvd.nist.gov/vuln/detail/CVE-2018-18850" }, { "type": "WEB", "url": "https://github.com/OctopusDeploy/Issues/issues/5042" } ], "database_specific": { "cwe_ids": [ ], "severity": "HIGH", "github_reviewed": false } }
483
301
/* * Copyright (c) 2018-2019 "Neo4j, Inc." [https://neo4j.com] * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.opencypher.gremlin.queries; import static org.assertj.core.api.Assertions.assertThat; import java.util.List; import java.util.Map; import org.junit.ClassRule; import org.junit.Test; import org.opencypher.gremlin.rules.GremlinServerExternalResource; public class ExplainTest { @ClassRule public static final GremlinServerExternalResource gremlinServer = new GremlinServerExternalResource(); private List<Map<String, Object>> submitAndGet(String cypher) { return gremlinServer.cypherGremlinClient().submit(cypher).all(); } @Test public void explainIntegration() throws Exception { String cypher = "EXPLAIN\n" + "MATCH (s)-[:MEMBER_OF]->(ss)\n" + "RETURN ss.name AS system, collect(s.name) AS stars"; List<Map<String, Object>> result = submitAndGet(cypher); assertThat(result).hasSize(1); Map<String, Object> explain = result.get(0); assertThat(explain.keySet()) .containsExactly( "translation", "options" ); assertThat(explain) .extracting( "options" ) .containsExactly( "[EXPLAIN]" ); } }
724
3,200
#!/usr/bin/python # # Copyright 2018-2021 Polyaxon, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import pytest from marshmallow import ValidationError from polyaxon import types from polyaxon.polyflow import V1Component, V1RunKind, ops_params from polyaxon.utils.tz_utils import local_datetime, now from tests.utils import BaseTestCase @pytest.mark.components_mark class TestComponentsConfigs(BaseTestCase): def test_passing_params_declarations_raises(self): config_dict = { "params": {"foo": {"value": "bar"}}, "declarations": {"foo": "bar"}, } with self.assertRaises(ValidationError): V1Component.from_dict(config_dict) def test_passing_wrong_params(self): config_dict = {"params": {"foo": "bar"}} with self.assertRaises(ValidationError): V1Component.from_dict(config_dict) def test_passing_params_raises(self): config_dict = {"params": {"foo": "bar"}} with self.assertRaises(ValidationError): V1Component.from_dict(config_dict) def test_param_validation_with_inputs(self): config_dict = { "inputs": [ {"name": "param1", "type": types.STR}, {"name": "param2", "type": types.INT}, {"name": "param3", "type": types.FLOAT}, {"name": "param4", "type": types.BOOL}, {"name": "param5", "type": types.DICT}, {"name": "param6", "type": types.LIST}, {"name": "param7", "type": types.GCS}, {"name": "param8", "type": types.S3}, {"name": "param9", "type": types.WASB}, {"name": "param10", "type": types.PATH}, ], "run": {"kind": V1RunKind.JOB, "container": {"image": "test"}}, } component = V1Component.from_dict(config_dict) params = { "param1": {"value": "text"}, "param2": {"value": 12}, "param3": {"value": 13.3}, "param4": {"value": False}, "param5": {"value": {"foo": "bar"}}, "param6": {"value": [1, 3, 45, 5]}, "param7": {"value": "gs://bucket/path/to/blob/"}, "param8": {"value": "s3://test/this/is/bad/key.txt"}, "param9": {"value": "wasbs://[email protected]/"}, "param10": {"value": "/foo/bar"}, } validated_params = ops_params.validate_params( params=params, inputs=component.inputs, outputs=None, is_template=False ) assert params == {p.name: {"value": p.param.value} for p in validated_params} # Passing missing params params.pop("param1") params.pop("param2") with self.assertRaises(ValidationError): ops_params.validate_params( params=params, inputs=component.inputs, outputs=None, is_template=False ) def test_param_validation_with_outputs(self): config_dict = { "outputs": [ {"name": "param1", "type": types.STR}, {"name": "param2", "type": types.INT}, {"name": "param3", "type": types.FLOAT}, {"name": "param4", "type": types.BOOL}, {"name": "param5", "type": types.DICT}, {"name": "param6", "type": types.LIST}, {"name": "param7", "type": types.GCS}, {"name": "param8", "type": types.S3}, {"name": "param9", "type": types.WASB}, {"name": "param10", "type": types.PATH}, {"name": "param11", "type": types.METRIC}, {"name": "param12", "type": types.METADATA}, {"name": "param13", "type": types.METADATA}, {"name": "param14", "type": types.METADATA}, ], "run": {"kind": V1RunKind.JOB, "container": {"image": "test"}}, } component = V1Component.from_dict(config_dict) params = { "param1": {"value": "text"}, "param2": {"value": 12}, "param3": {"value": 13.3}, "param4": {"value": False}, "param5": {"value": {"foo": "bar"}}, "param6": {"value": [1, 3, 45, 5]}, "param7": {"value": "gs://bucket/path/to/blob/"}, "param8": {"value": "s3://test/this/is/bad/key.txt"}, "param9": {"value": "wasbs://[email protected]/"}, "param10": {"value": "/foo/bar"}, "param11": {"value": 124.4}, "param12": {"value": {"foo": 124.4}}, "param13": {"value": {"foo": "bar"}}, "param14": {"value": {"foo": ["foo", 124.4]}}, } validated_params = ops_params.validate_params( params=params, inputs=None, outputs=component.outputs, is_template=False ) assert params == {p.name: {"value": p.param.value} for p in validated_params} # Passing missing params params.pop("param1") params.pop("param2") validated_params = ops_params.validate_params( params=params, inputs=None, outputs=component.outputs, is_template=False ) params["param1"] = {"value": None} params["param2"] = {"value": None} assert params == {p.name: {"value": p.param.value} for p in validated_params} def test_required_input_no_param_only_validated_on_run(self): # Inputs config_dict = { "inputs": [ {"name": "param1", "type": types.STR}, {"name": "param10", "type": types.PATH}, ], "run": {"kind": V1RunKind.JOB, "container": {"image": "test"}}, } config = V1Component.from_dict(config_dict) with self.assertRaises(ValidationError): ops_params.validate_params( params={"param1": {"value": "text"}}, inputs=config.inputs, outputs=config.outputs, is_template=False, ) # Outputs config_dict = { "outputs": [ {"name": "param1", "type": types.STR}, {"name": "param10", "type": types.PATH}, ], "run": {"kind": V1RunKind.JOB, "container": {"image": "test"}}, } config = V1Component.from_dict(config_dict) ops_params.validate_params( params={"param1": {"value": "text"}}, inputs=config.inputs, outputs=config.outputs, is_template=False, ) # IO config_dict = { "inputs": [{"name": "param1", "type": types.STR}], "outputs": [{"name": "param10", "type": types.PATH}], "run": {"kind": V1RunKind.JOB, "container": {"image": "test"}}, } config = V1Component.from_dict(config_dict) ops_params.validate_params( params={"param1": {"value": "text"}}, inputs=config.inputs, outputs=config.outputs, is_template=False, ) def test_incomplete_params(self): config_dict = { "inputs": [ {"name": "param1", "type": types.INT}, {"name": "param2", "type": types.INT}, ], "run": {"kind": V1RunKind.JOB, "container": {"image": "test"}}, } config = V1Component.from_dict(config_dict) with self.assertRaises(ValidationError): ops_params.validate_params( params={"param1": {"value": 1}}, inputs=config.inputs, outputs=config.outputs, is_template=False, ) config_dict = { "outputs": [ {"name": "param1", "type": types.INT, "value": 12, "isOptional": True}, {"name": "param2", "type": types.INT}, ], "run": {"kind": V1RunKind.JOB, "container": {"image": "test"}}, } config = V1Component.from_dict(config_dict) ops_params.validate_params( params={"param1": {"value": 1}}, inputs=config.inputs, outputs=config.outputs, is_template=False, ) def test_extra_params(self): # inputs config_dict = { "inputs": [{"name": "param1", "type": types.INT}], "run": {"kind": V1RunKind.JOB, "container": {"image": "test"}}, } config = V1Component.from_dict(config_dict) with self.assertRaises(ValidationError): ops_params.validate_params( params={"param1": {"value": 1}, "param2": {"value": 2}}, inputs=config.inputs, outputs=config.outputs, is_template=False, ) # outputs config_dict = { "outputs": [{"name": "param1", "type": types.INT}], "run": {"kind": V1RunKind.JOB, "container": {"image": "test"}}, } config = V1Component.from_dict(config_dict) with self.assertRaises(ValidationError): ops_params.validate_params( params={"param1": {"value": 1}, "param2": {"value": 2}}, inputs=config.inputs, outputs=config.outputs, is_template=False, ) def test_param_validation_with_mismatched_inputs(self): config_dict = { "inputs": [{"name": "param1", "type": types.INT}], "run": {"kind": V1RunKind.JOB, "container": {"image": "test"}}, } config = V1Component.from_dict(config_dict) # Passing correct param ops_params.validate_params( params={"param1": {"value": 1}}, inputs=config.inputs, outputs=config.outputs, is_template=False, ) ops_params.validate_params( params={"param1": {"value": "-1"}}, inputs=config.inputs, outputs=config.outputs, is_template=False, ) ops_params.validate_params( params={"param1": {"value": 12.0}}, inputs=config.inputs, outputs=config.outputs, is_template=False, ) ops_params.validate_params( params={"param1": {"value": "12."}}, inputs=config.inputs, outputs=config.outputs, is_template=False, ) ops_params.validate_params( params={"param1": {"value": 12.0}}, inputs=config.inputs, outputs=config.outputs, is_template=False, ) ops_params.validate_params( params={"param1": {"value": "12.0"}}, inputs=config.inputs, outputs=config.outputs, is_template=False, ) # Passing wrong type with self.assertRaises(ValidationError): ops_params.validate_params( params={"param1": {"value": "text"}}, inputs=config.inputs, outputs=config.outputs, is_template=False, ) with self.assertRaises(ValidationError): ops_params.validate_params( params={"param1": {"value": 12.1}}, inputs=config.inputs, outputs=config.outputs, is_template=False, ) with self.assertRaises(ValidationError): ops_params.validate_params( params={"param1": {"value": "12.1"}}, inputs=config.inputs, outputs=config.outputs, is_template=False, ) with self.assertRaises(ValidationError): ops_params.validate_params( params={"param1": {"value": {"foo": "bar"}}}, inputs=config.inputs, outputs=config.outputs, is_template=False, ) with self.assertRaises(ValidationError): ops_params.validate_params( params={"param1": {"value": "gs://bucket/path/to/blob/"}}, inputs=config.inputs, outputs=config.outputs, is_template=False, ) config_dict = { "inputs": [{"name": "param2", "type": types.FLOAT}], "run": {"kind": V1RunKind.JOB, "container": {"image": "test"}}, } config = V1Component.from_dict(config_dict) # Passing correct param ops_params.validate_params( params={"param2": {"value": 1}}, inputs=config.inputs, outputs=config.outputs, is_template=False, ) ops_params.validate_params( params={"param2": {"value": False}}, # auto-conversion (int to 0 to 0.0) inputs=config.inputs, outputs=config.outputs, is_template=False, ) # Passing wrong type with self.assertRaises(ValidationError): ops_params.validate_params( params={"param2": {"value": "test"}}, inputs=config.inputs, outputs=config.outputs, is_template=False, ) with self.assertRaises(ValidationError): ops_params.validate_params( params={"param2": {"foo": "bar"}}, inputs=config.inputs, outputs=config.outputs, is_template=False, ) with self.assertRaises(ValidationError): ops_params.validate_params( params={"param2": {"value": ["gs://bucket/path/to/blob/"]}}, inputs=config.inputs, outputs=config.outputs, is_template=False, ) config_dict = { "inputs": [{"name": "param7", "type": types.WASB}], "run": {"kind": V1RunKind.JOB, "container": {"image": "test"}}, } config = V1Component.from_dict(config_dict) # Passing correct param ops_params.validate_params( params={ "param7": {"value": "wasbs://[email protected]/"} }, inputs=config.inputs, outputs=config.outputs, is_template=False, ) # Passing wrong param with self.assertRaises(ValidationError): ops_params.validate_params( params={"param7": {"value": "gs://bucket/path/to/blob/"}}, inputs=config.inputs, outputs=config.outputs, is_template=False, ) with self.assertRaises(ValidationError): ops_params.validate_params( params={"param7": {"value": "s3://test/this/is/bad/key.txt"}}, inputs=config.inputs, outputs=config.outputs, is_template=False, ) with self.assertRaises(ValidationError): ops_params.validate_params( params={"param7": {"value": 1}}, inputs=config.inputs, outputs=config.outputs, is_template=False, ) def test_param_validation_with_mismatched_outputs(self): config_dict = { "outputs": [{"name": "param1", "type": types.INT}], "run": {"kind": V1RunKind.JOB, "container": {"image": "test"}}, } config = V1Component.from_dict(config_dict) # Passing correct param ops_params.validate_params( params={"param1": {"value": 1}}, inputs=config.inputs, outputs=config.outputs, is_template=False, ) ops_params.validate_params( params={"param1": {"value": 12.0}}, inputs=config.inputs, outputs=config.outputs, is_template=False, ) # Passing wrong type with self.assertRaises(ValidationError): ops_params.validate_params( params={"param1": {"value": "text"}}, inputs=config.inputs, outputs=config.outputs, is_template=False, ) with self.assertRaises(ValidationError): ops_params.validate_params( params={"param1": {"value": 12.1}}, inputs=config.inputs, outputs=config.outputs, is_template=False, ) with self.assertRaises(ValidationError): ops_params.validate_params( params={"param1": {"value": {"foo": "bar"}}}, inputs=config.inputs, outputs=config.outputs, is_template=False, ) with self.assertRaises(ValidationError): ops_params.validate_params( params={"param1": {"value": "gs://bucket/path/to/blob/"}}, inputs=config.inputs, outputs=config.outputs, is_template=False, ) config_dict = { "outputs": [{"name": "param2", "type": types.FLOAT}], "run": {"kind": V1RunKind.JOB, "container": {"image": "test"}}, } config = V1Component.from_dict(config_dict) # Passing correct param ops_params.validate_params( params={"param2": {"value": "1.1"}}, inputs=config.inputs, outputs=config.outputs, is_template=False, ) ops_params.validate_params( params={"param2": {"value": False}}, # auto-conversion (int to 0 to 0.0) inputs=config.inputs, outputs=config.outputs, is_template=False, ) # Passing wrong type with self.assertRaises(ValidationError): ops_params.validate_params( params={"param2": {"value": "test"}}, inputs=config.inputs, outputs=config.outputs, is_template=False, ) with self.assertRaises(ValidationError): ops_params.validate_params( params={"param2": {"value": {"foo": "bar"}}}, inputs=config.inputs, outputs=config.outputs, is_template=False, ) with self.assertRaises(ValidationError): ops_params.validate_params( params={"param2": {"value": ["gs://bucket/path/to/blob/"]}}, inputs=config.inputs, outputs=config.outputs, is_template=False, ) config_dict = { "outputs": [{"name": "param7", "type": types.WASB}], "run": {"kind": V1RunKind.JOB, "container": {"image": "test"}}, } config = V1Component.from_dict(config_dict) # Passing correct param ops_params.validate_params( params={ "param7": {"value": "wasbs://[email protected]/"} }, inputs=config.inputs, outputs=config.outputs, is_template=False, ) # Passing wrong param with self.assertRaises(ValidationError): ops_params.validate_params( params={"param7": {"value": "gs://bucket/path/to/blob/"}}, inputs=config.inputs, outputs=config.outputs, is_template=False, ) with self.assertRaises(ValidationError): ops_params.validate_params( params={"param7": {"value": "s3://test/this/is/bad/key.txt"}}, inputs=config.inputs, outputs=config.outputs, is_template=False, ) with self.assertRaises(ValidationError): ops_params.validate_params( params={"param7": {"value": 1}}, inputs=config.inputs, outputs=config.outputs, is_template=False, ) def test_experiment_and_job_refs_params(self): config_dict = { "inputs": [ {"name": "param1", "type": types.INT}, {"name": "param2", "type": types.FLOAT}, {"name": "param9", "type": types.WASB}, {"name": "param11", "type": types.METRIC}, ], "run": {"kind": V1RunKind.JOB, "container": {"image": "test"}}, } op = V1Component.from_dict(config_dict) params = { "param1": { "ref": "runs.64332180bfce46eba80a65caf73c5396", "value": "outputs.foo", }, "param2": { "ref": "runs.0de53b5bf8b04a219d12a39c6b92bcce", "value": "outputs.foo", }, "param9": {"value": "wasbs://[email protected]/"}, "param11": { "ref": "runs.fcc462d764104eb698d3cca509f34154", "value": "outputs.accuracy", }, } validated_params = ops_params.validate_params( params=params, inputs=op.inputs, outputs=None, is_template=False ) assert {p.name: p.param.to_dict() for p in validated_params} == { "param1": { "ref": "runs.64332180bfce46eba80a65caf73c5396", "value": "outputs.foo", }, "param2": { "ref": "runs.0de53b5bf8b04a219d12a39c6b92bcce", "value": "outputs.foo", }, "param9": {"value": "wasbs://[email protected]/"}, "param11": { "ref": "runs.fcc462d764104eb698d3cca509f34154", "value": "outputs.accuracy", }, } def test_job_refs_params(self): config_dict = { "inputs": [ {"name": "param1", "type": types.INT}, {"name": "param9", "type": types.FLOAT}, ], "run": {"kind": V1RunKind.JOB, "container": {"image": "test"}}, } params = { "param1": {"ref": "job.A", "value": "outputs.foo"}, "param9": {"value": 13.1}, } config = V1Component.from_dict(config_dict) # Validation outside the context of a pipeline with self.assertRaises(ValidationError): ops_params.validate_params( params=params, inputs=config.inputs, outputs=None, is_template=False ) def test_component_base_attrs(self): config_dict = { "concurrency": "foo", "run": {"kind": V1RunKind.JOB, "container": {"image": "test"}}, } with self.assertRaises(ValidationError): V1Component.from_dict(config_dict) config_dict = { "concurrency": 2, "run": {"kind": V1RunKind.JOB, "container": {"image": "test"}}, } with self.assertRaises(ValidationError): V1Component.from_dict(config_dict) config_dict = { "kind": "component", "matrix": { "concurrency": 2, "kind": "mapping", "values": [{"a": 1}, {"a": 1}], }, "run": {"kind": V1RunKind.JOB, "container": {"image": "test"}}, } with self.assertRaises(ValidationError): V1Component.from_dict(config_dict) config_dict = { "kind": "component", "matrix": { "concurrency": 2, "kind": "mapping", "values": [{"a": 1}, {"a": 1}], }, "schedule": { "kind": "datetime", "startAt": local_datetime(now()).isoformat(), }, "termination": {"timeout": 1000}, "run": {"kind": V1RunKind.JOB, "container": {"image": "test"}}, } with self.assertRaises(ValidationError): V1Component.from_dict(config_dict) config_dict = { "kind": "component", "termination": {"timeout": 1000}, "run": {"kind": V1RunKind.JOB, "container": {"image": "test"}}, } config = V1Component.from_dict(config_dict) assert config.to_dict() == config_dict def test_component_and_hooks(self): config_dict = { "kind": "component", "hooks": [ {"trigger": "succeeded", "connection": "connection1", "hubRef": "ref1"}, {"connection": "connection1", "hubRef": "ref2"}, ], "run": {"kind": V1RunKind.JOB, "container": {"image": "test"}}, } config = V1Component.from_dict(config_dict) assert config.to_dict() == config_dict def test_component_template(self): config_dict = { "kind": "component", "hooks": [ {"trigger": "succeeded", "connection": "connection1", "hubRef": "ref2"}, {"connection": "connection1", "hubRef": "ref2"}, ], "run": {"kind": V1RunKind.JOB, "container": {"image": "test"}}, "template": { "description": "This is a template, check the fields", "fields": ["actions[1].hubRef", "hooks[0].trigger"], }, } config = V1Component.from_dict(config_dict) assert config.to_dict() == config_dict
13,485
3,576
#pragma once #include "Winheaders.h" namespace blackbone { #pragma warning(push) #pragma warning(disable : 4201) typedef struct _IMAGE_DELAYLOAD_DESCRIPTOR { union { DWORD AllAttributes; struct { DWORD RvaBased : 1; // Delay load version 2 DWORD ReservedAttributes : 31; }; } Attributes; DWORD DllNameRVA; // RVA to the name of the target library (NULL-terminate ASCII string) DWORD ModuleHandleRVA; // RVA to the HMODULE caching location (PHMODULE) DWORD ImportAddressTableRVA; // RVA to the start of the IAT (PIMAGE_THUNK_DATA) DWORD ImportNameTableRVA; // RVA to the start of the name table (PIMAGE_THUNK_DATA::AddressOfData) DWORD BoundImportAddressTableRVA; // RVA to an optional bound IAT DWORD UnloadInformationTableRVA; // RVA to an optional unload info table DWORD TimeDateStamp; // 0 if not bound, Otherwise, date/time of the target DLL } IMAGE_DELAYLOAD_DESCRIPTOR, *PIMAGE_DELAYLOAD_DESCRIPTOR; #pragma warning(pop) typedef struct _EXCEPTION_REGISTRATION_RECORD { _EXCEPTION_REGISTRATION_RECORD *Next; PEXCEPTION_ROUTINE Handler; } EXCEPTION_REGISTRATION_RECORD, *PEXCEPTION_REGISTRATION_RECORD; }
587
529
<filename>setup/CustomActions/DetectVSRunning/DevEnvNotRunning.cpp #include "StdAfx.h" #include "ToolsMsmCA.h" const int messageIdCloseDevEnv = 25000; const wchar_t* const DevEnvExe = L"devenv.exe"; const wchar_t* const DexploreExe = L"dexplore.exe"; const wchar_t* const WDExpressExe = L"WDExpress.exe"; #pragma comment(linker, "/EXPORT:DevEnvNotRunning=_DevEnvNotRunning@4") enum DevEnvStatus { StatusNotRunning, StatusRunning, StatusError, }; DevEnvStatus GetDevEnvStatus(MSIHANDLE hInstall) { DevEnvStatus status = StatusNotRunning; const DWORD MaxProcessIds = 8192; DWORD* processIds = new DWORD[MaxProcessIds]; const DWORD cbProcessIdsAllocated = MaxProcessIds * sizeof(*processIds); DWORD cbProcessIdsUsed; DWORD cProcesses; const int cchProcessName = MAX_PATH; wchar_t szProcessName[MAX_PATH + 1]; if (!EnumProcesses(processIds, cbProcessIdsAllocated, &cbProcessIdsUsed)) { status = StatusError; Log(LogError, hInstall, L"EnumProcesses failed, GetLastError() = %u.", GetLastError()); goto Done; } cProcesses = cbProcessIdsUsed / sizeof(*processIds); for (DWORD i = 0; i < cProcesses; i++) { HANDLE hProcess = OpenProcess(PROCESS_QUERY_INFORMATION | PROCESS_VM_READ, FALSE, processIds[i]); if (hProcess) { HMODULE hMod; DWORD cbNeeded; if (EnumProcessModules(hProcess, &hMod, sizeof(hMod), &cbNeeded)) { if (GetModuleBaseNameW(hProcess, hMod, szProcessName, cchProcessName)) { szProcessName[cchProcessName] = 0; if (0 == _wcsicmp(szProcessName, DevEnvExe)) { Log(LogInfo, hInstall, L"DevEnv.exe found (PID %u).", processIds[i]); status = StatusRunning; break; } if (0 == _wcsicmp(szProcessName, DexploreExe)) { Log(LogInfo, hInstall, L"dexplore.exe found (PID %u).", processIds[i]); status = StatusRunning; break; } if (0 == _wcsicmp(szProcessName, WDExpressExe)) { Log(LogInfo, hInstall, L"WDExpress.exe found (PID %u).", processIds[i]); status = StatusRunning; break; } } else { Log(LogInfo, hInstall, L"GetModuleBaseNameW failed for PID %u, GetLastError() = %u.", processIds[i], GetLastError()); } } else { Log(LogInfo, hInstall, L"EnumProcessModules failed for PID %u, GetLastError() = %u.", processIds[i], GetLastError()); } } else { Log(LogInfo, hInstall, L"OpenProcess failed for PID %u, GetLastError() = %u.", processIds[i], GetLastError()); } } Done: delete[] processIds; return status; } extern "C" UINT __stdcall DevEnvNotRunning(MSIHANDLE hInstall) { UINT errCode; DevEnvStatus status; bool cancelled = false; MSIHANDLE hMessage = NULL; status = GetDevEnvStatus(hInstall); while (!cancelled && status == StatusRunning) { if (hMessage == NULL) { hMessage = MsiCreateRecord(2); } MsiRecordClearData(hMessage); MsiRecordSetInteger(hMessage, 1, messageIdCloseDevEnv); int result = MsiProcessMessage( hInstall, static_cast<INSTALLMESSAGE>(INSTALLMESSAGE_WARNING | MB_RETRYCANCEL), hMessage); cancelled = (result == IDCANCEL); if (!cancelled) { status = GetDevEnvStatus(hInstall); } } switch (status) { case StatusNotRunning: errCode = ERROR_SUCCESS; break; case StatusRunning: errCode = ERROR_INSTALL_USEREXIT; break; case StatusError: default: Log(LogError, hInstall, L"Failed to search for running DevEnv.exe process."); errCode = ERROR_INSTALL_FAILURE; break; } if (hMessage != NULL) MsiCloseHandle(hMessage); return errCode; }
2,362
9,095
<gh_stars>1000+ """ ========================================== Miscellaneous routines (:mod:`scipy.misc`) ========================================== .. currentmodule:: scipy.misc Various utilities that don't have another home. .. autosummary:: :toctree: generated/ ascent - Get example image for processing central_diff_weights - Weights for an n-point central mth derivative derivative - Find the nth derivative of a function at a point face - Get example image for processing electrocardiogram - Load an example of a 1-D signal. """ from ._common import * from . import _common # Deprecated namespaces, to be removed in v2.0.0 from . import common, doccer __all__ = _common.__all__ del _common from scipy._lib._testutils import PytestTester test = PytestTester(__name__) del PytestTester
240
379
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ This module test dyanmic_gru op. """ import unittest from multiprocessing import Manager import test_op_base import numpy as np import paddle.fluid as fluid import paddle_fl.mpc as pfl_mpc from paddle_fl.mpc.data_utils.data_utils import get_datautils aby3 = get_datautils('aby3') class TestInput(test_op_base.TestOpBase): def dyanmic_gru_op(self, **kwargs): role = kwargs['role'] data = kwargs['data'] data_share = kwargs['data_share'][role] weight = kwargs['weight'] weight_share = kwargs['weight_share'][role] return_results = kwargs['return_results'] return_results_cheb = kwargs['return_results_cheb'] expected_result = kwargs['expect_results'] pfl_mpc.init("aby3", role, "localhost", self.server, int(self.port)) hidden_dim = 1 data_paddle = fluid.data(name='input_paddle', shape=[3, 3], dtype='float32', lod_level=1) ldata_paddle = fluid.create_lod_tensor(data, [[3]], fluid.CPUPlace()) w_param_attrs = fluid.ParamAttr(name='gru_weight', learning_rate=0.5, initializer=fluid.initializer.NumpyArrayInitializer(weight), trainable=True) hidden_paddle = fluid.layers.dynamic_gru(input=data_paddle, size=hidden_dim, param_attr=w_param_attrs, gate_activation='sigmoid', candidate_activation='relu') data_mpc = fluid.data(name='input_mpc', shape=[3, 2, 3], dtype='int64', lod_level=1) # trans batch information to shape[0] data_share_trans = np.transpose(data_share, [1, 0, 2]) ldata_mpc = fluid.create_lod_tensor(data_share_trans, [[3]], fluid.CPUPlace()) w_param_attrs1 = fluid.ParamAttr(name='mpc_gru_weight', learning_rate=0.5, initializer=pfl_mpc.initializer.NumpyArrayInitializer(weight_share), trainable=True) w_param_attrs2 = fluid.ParamAttr(name='mpc_gru_weight_cheb', learning_rate=0.5, initializer=pfl_mpc.initializer.NumpyArrayInitializer(weight_share), trainable=True) hidden_mpc = pfl_mpc.layers.dynamic_gru(input=data_mpc, size=hidden_dim, param_attr=w_param_attrs1) hidden_mpc_cheb = pfl_mpc.layers.dynamic_gru(input=data_mpc, size=hidden_dim, param_attr=w_param_attrs2, gate_activation='sigmoid_chebyshev') exe = fluid.Executor(place=fluid.CPUPlace()) exe.run(fluid.default_startup_program()) results = exe.run(feed={'input_paddle': ldata_paddle, 'input_mpc': ldata_mpc}, fetch_list=[hidden_paddle, hidden_mpc, hidden_mpc_cheb], return_numpy=False) return_results.append(np.array(results[1])) return_results_cheb.append(np.array(results[2])) expected_result.append(np.array(results[0])) def test_dyanmic_gru_op(self): data = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [-1.0, -2.0, -3.0]]).astype('float32') data_share = aby3.make_shares(data) data_all3shares = np.array([aby3.get_shares(data_share, i) for i in range(3)]) weight = np.array([[0.0, 0.0, 0.0]]).astype('float32') weight_share = aby3.make_shares(weight) weight_all3shares = np.array([aby3.get_shares(weight_share, i) for i in range(3)]) return_results = Manager().list() return_results_cheb = Manager().list() expect_results = Manager().list() ret = self.multi_party_run(target=self.dyanmic_gru_op, data=data, data_share = data_all3shares, weight=weight, weight_share=weight_all3shares, return_results=return_results, return_results_cheb=return_results_cheb, expect_results=expect_results) self.assertEqual(ret[0], True) revealed = aby3.reconstruct(np.array(return_results)) revealed_cheb = aby3.reconstruct(np.array(return_results_cheb)) print("expected:", expect_results[0]) print("reveal: ", revealed) print("reveal_cheb: ", revealed_cheb) self.assertTrue(np.allclose(revealed, expect_results[0], atol=1e-1*5)) self.assertTrue(np.allclose(revealed_cheb, expect_results[0], atol=1e-1*5)) if __name__ == '__main__': unittest.main()
2,661
3,486
package com.thinkaurelius.titan.util.system; import java.net.InetAddress; import java.net.UnknownHostException; import java.util.Collection; public class NetworkUtil { public static String getLoopbackAddress() { // InetAddress.getLoopbackAddress() is @since 1.7 // // Aside from that, getLoopbackAddress() seems preferable to // InetAddress.getByName("localhost") since the former doesn't seem to // require the local resolver to be sane. //return InetAddress.getLoopbackAddress().getHostAddress(); try { return InetAddress.getByName("localhost").getHostAddress(); } catch (UnknownHostException e) { throw new RuntimeException(e); } } public static InetAddress getLocalHost() { try { return InetAddress.getLocalHost(); } catch (UnknownHostException e) { throw new AssertionError(e); } } public static String getLocalAddress() { return getLocalHost().getHostAddress(); } public static String getLocalHostName() { return getLocalHost().getHostName(); } public static boolean hasLocalAddress(Collection<String> endpoints) { return endpoints.contains(getLoopbackAddress()) || endpoints.contains(getLocalAddress()) || endpoints.contains(getLocalHostName()); } public static boolean isLocalConnection(String hostname) { InetAddress localhost = NetworkUtil.getLocalHost(); return hostname.equalsIgnoreCase(NetworkUtil.getLoopbackAddress()) || hostname.equals(localhost.getHostAddress()) || hostname.equals(localhost.getHostName()) || hostname.equals(localhost.getCanonicalHostName()); } }
670
971
<filename>dl-flinker/dl-flinker-eswriter/src/main/java/com/ucar/datalink/flinker/plugin/writer/eswriter/client/rest/vo/search/dsl/DSLSearchVo.java package com.ucar.datalink.flinker.plugin.writer.eswriter.client.rest.vo.search.dsl; import com.ucar.datalink.flinker.plugin.writer.eswriter.client.rest.constant.CharacterConstant; import com.ucar.datalink.flinker.plugin.writer.eswriter.client.rest.exception.ElasticSearchException; import com.ucar.datalink.flinker.plugin.writer.eswriter.client.rest.vo.VoItf; import org.apache.commons.lang.StringUtils; /** * * Description: 结构化查询vo * All Rights Reserved. * Created on 2016-6-30 上午11:09:59 * @author 孔增(<EMAIL>) */ public class DSLSearchVo extends VoItf{ /** * 可指定自定义的json串,若已指定searchContext,则以searchContext为准 */ private String content ; private String metaType = "_search"; /** * 结构化查询上下文 */ private SearchContext searchContext; private String templateName; /** * routing值 */ private String routingValue; public DSLSearchVo() {} public DSLSearchVo(String clusterName) { super.clusterName = clusterName; } public String getContent() { return content; } public void setContent(String content) { this.content = content; } public SearchContext getSearchContext() { return searchContext; } public void setSearchContext(SearchContext searchContext) { this.searchContext = searchContext; } public String getMetaType() { return metaType; } public void setMetaType(String metaType) { this.metaType = metaType; } public String getCondition() { if(searchContext != null) { searchContext.checkSearchContext(); return searchContext.toString(); } return content; } public String getTemplateName() { return templateName; } public void setTemplateName(String templateName) { this.templateName = templateName; } public String getRoutingValue() { return routingValue; } public void setRoutingValue(String routingValue) { this.routingValue = routingValue; } @Override public String getUrl() { StringBuffer lastUrl = new StringBuffer("http://" + host); if(index == null && type != null) { throw new ElasticSearchException("when type is not null ,the index is required"); } if(index != null) { lastUrl.append("/"+index); } if(!StringUtils.isBlank(type)) { lastUrl.append("/"+ type); } lastUrl.append("/").append(metaType); if (lastUrl.indexOf(CharacterConstant.QUEST) < 0) { // 不包含?时, lastUrl.append(CharacterConstant.QUEST); } else { lastUrl.append(CharacterConstant.AND); } if (!StringUtils.isBlank(routingValue)) { lastUrl.append("routing="+routingValue); } return lastUrl.toString(); } }
1,020
4,461
from agents.Base_Agent import Base_Agent from utilities.OU_Noise import OU_Noise from utilities.data_structures.Replay_Buffer import Replay_Buffer from torch.optim import Adam import torch import torch.nn.functional as F from torch.distributions import Normal import numpy as np LOG_SIG_MAX = 2 LOG_SIG_MIN = -20 TRAINING_EPISODES_PER_EVAL_EPISODE = 10 EPSILON = 1e-6 class SAC(Base_Agent): """Soft Actor-Critic model based on the 2018 paper https://arxiv.org/abs/1812.05905 and on this github implementation https://github.com/pranz24/pytorch-soft-actor-critic. It is an actor-critic algorithm where the agent is also trained to maximise the entropy of their actions as well as their cumulative reward""" agent_name = "SAC" def __init__(self, config): Base_Agent.__init__(self, config) assert self.action_types == "CONTINUOUS", "Action types must be continuous. Use SAC Discrete instead for discrete actions" assert self.config.hyperparameters["Actor"]["final_layer_activation"] != "Softmax", "Final actor layer must not be softmax" self.hyperparameters = config.hyperparameters self.critic_local = self.create_NN(input_dim=self.state_size + self.action_size, output_dim=1, key_to_use="Critic") self.critic_local_2 = self.create_NN(input_dim=self.state_size + self.action_size, output_dim=1, key_to_use="Critic", override_seed=self.config.seed + 1) self.critic_optimizer = torch.optim.Adam(self.critic_local.parameters(), lr=self.hyperparameters["Critic"]["learning_rate"], eps=1e-4) self.critic_optimizer_2 = torch.optim.Adam(self.critic_local_2.parameters(), lr=self.hyperparameters["Critic"]["learning_rate"], eps=1e-4) self.critic_target = self.create_NN(input_dim=self.state_size + self.action_size, output_dim=1, key_to_use="Critic") self.critic_target_2 = self.create_NN(input_dim=self.state_size + self.action_size, output_dim=1, key_to_use="Critic") Base_Agent.copy_model_over(self.critic_local, self.critic_target) Base_Agent.copy_model_over(self.critic_local_2, self.critic_target_2) self.memory = Replay_Buffer(self.hyperparameters["Critic"]["buffer_size"], self.hyperparameters["batch_size"], self.config.seed) self.actor_local = self.create_NN(input_dim=self.state_size, output_dim=self.action_size * 2, key_to_use="Actor") self.actor_optimizer = torch.optim.Adam(self.actor_local.parameters(), lr=self.hyperparameters["Actor"]["learning_rate"], eps=1e-4) self.automatic_entropy_tuning = self.hyperparameters["automatically_tune_entropy_hyperparameter"] if self.automatic_entropy_tuning: self.target_entropy = -torch.prod(torch.Tensor(self.environment.action_space.shape).to(self.device)).item() # heuristic value from the paper self.log_alpha = torch.zeros(1, requires_grad=True, device=self.device) self.alpha = self.log_alpha.exp() self.alpha_optim = Adam([self.log_alpha], lr=self.hyperparameters["Actor"]["learning_rate"], eps=1e-4) else: self.alpha = self.hyperparameters["entropy_term_weight"] self.add_extra_noise = self.hyperparameters["add_extra_noise"] if self.add_extra_noise: self.noise = OU_Noise(self.action_size, self.config.seed, self.hyperparameters["mu"], self.hyperparameters["theta"], self.hyperparameters["sigma"]) self.do_evaluation_iterations = self.hyperparameters["do_evaluation_iterations"] def save_result(self): """Saves the result of an episode of the game. Overriding the method in Base Agent that does this because we only want to keep track of the results during the evaluation episodes""" if self.episode_number == 1 or not self.do_evaluation_iterations: self.game_full_episode_scores.extend([self.total_episode_score_so_far]) self.rolling_results.append(np.mean(self.game_full_episode_scores[-1 * self.rolling_score_window:])) self.save_max_result_seen() elif (self.episode_number - 1) % TRAINING_EPISODES_PER_EVAL_EPISODE == 0: self.game_full_episode_scores.extend([self.total_episode_score_so_far for _ in range(TRAINING_EPISODES_PER_EVAL_EPISODE)]) self.rolling_results.extend([np.mean(self.game_full_episode_scores[-1 * self.rolling_score_window:]) for _ in range(TRAINING_EPISODES_PER_EVAL_EPISODE)]) self.save_max_result_seen() def reset_game(self): """Resets the game information so we are ready to play a new episode""" Base_Agent.reset_game(self) if self.add_extra_noise: self.noise.reset() def step(self): """Runs an episode on the game, saving the experience and running a learning step if appropriate""" eval_ep = self.episode_number % TRAINING_EPISODES_PER_EVAL_EPISODE == 0 and self.do_evaluation_iterations self.episode_step_number_val = 0 while not self.done: self.episode_step_number_val += 1 self.action = self.pick_action(eval_ep) self.conduct_action(self.action) if self.time_for_critic_and_actor_to_learn(): for _ in range(self.hyperparameters["learning_updates_per_learning_session"]): self.learn() mask = False if self.episode_step_number_val >= self.environment._max_episode_steps else self.done if not eval_ep: self.save_experience(experience=(self.state, self.action, self.reward, self.next_state, mask)) self.state = self.next_state self.global_step_number += 1 print(self.total_episode_score_so_far) if eval_ep: self.print_summary_of_latest_evaluation_episode() self.episode_number += 1 def pick_action(self, eval_ep, state=None): """Picks an action using one of three methods: 1) Randomly if we haven't passed a certain number of steps, 2) Using the actor in evaluation mode if eval_ep is True 3) Using the actor in training mode if eval_ep is False. The difference between evaluation and training mode is that training mode does more exploration""" if state is None: state = self.state if eval_ep: action = self.actor_pick_action(state=state, eval=True) elif self.global_step_number < self.hyperparameters["min_steps_before_learning"]: action = self.environment.action_space.sample() print("Picking random action ", action) else: action = self.actor_pick_action(state=state) if self.add_extra_noise: action += self.noise.sample() return action def actor_pick_action(self, state=None, eval=False): """Uses actor to pick an action in one of two ways: 1) If eval = False and we aren't in eval mode then it picks an action that has partly been randomly sampled 2) If eval = True then we pick the action that comes directly from the network and so did not involve any random sampling""" if state is None: state = self.state state = torch.FloatTensor([state]).to(self.device) if len(state.shape) == 1: state = state.unsqueeze(0) if eval == False: action, _, _ = self.produce_action_and_action_info(state) else: with torch.no_grad(): _, z, action = self.produce_action_and_action_info(state) action = action.detach().cpu().numpy() return action[0] def produce_action_and_action_info(self, state): """Given the state, produces an action, the log probability of the action, and the tanh of the mean action""" actor_output = self.actor_local(state) mean, log_std = actor_output[:, :self.action_size], actor_output[:, self.action_size:] std = log_std.exp() normal = Normal(mean, std) x_t = normal.rsample() #rsample means it is sampled using reparameterisation trick action = torch.tanh(x_t) log_prob = normal.log_prob(x_t) log_prob -= torch.log(1 - action.pow(2) + EPSILON) log_prob = log_prob.sum(1, keepdim=True) return action, log_prob, torch.tanh(mean) def time_for_critic_and_actor_to_learn(self): """Returns boolean indicating whether there are enough experiences to learn from and it is time to learn for the actor and critic""" return self.global_step_number > self.hyperparameters["min_steps_before_learning"] and \ self.enough_experiences_to_learn_from() and self.global_step_number % self.hyperparameters["update_every_n_steps"] == 0 def learn(self): """Runs a learning iteration for the actor, both critics and (if specified) the temperature parameter""" state_batch, action_batch, reward_batch, next_state_batch, mask_batch = self.sample_experiences() qf1_loss, qf2_loss = self.calculate_critic_losses(state_batch, action_batch, reward_batch, next_state_batch, mask_batch) self.update_critic_parameters(qf1_loss, qf2_loss) policy_loss, log_pi = self.calculate_actor_loss(state_batch) if self.automatic_entropy_tuning: alpha_loss = self.calculate_entropy_tuning_loss(log_pi) else: alpha_loss = None self.update_actor_parameters(policy_loss, alpha_loss) def sample_experiences(self): return self.memory.sample() def calculate_critic_losses(self, state_batch, action_batch, reward_batch, next_state_batch, mask_batch): """Calculates the losses for the two critics. This is the ordinary Q-learning loss except the additional entropy term is taken into account""" with torch.no_grad(): next_state_action, next_state_log_pi, _ = self.produce_action_and_action_info(next_state_batch) qf1_next_target = self.critic_target(torch.cat((next_state_batch, next_state_action), 1)) qf2_next_target = self.critic_target_2(torch.cat((next_state_batch, next_state_action), 1)) min_qf_next_target = torch.min(qf1_next_target, qf2_next_target) - self.alpha * next_state_log_pi next_q_value = reward_batch + (1.0 - mask_batch) * self.hyperparameters["discount_rate"] * (min_qf_next_target) qf1 = self.critic_local(torch.cat((state_batch, action_batch), 1)) qf2 = self.critic_local_2(torch.cat((state_batch, action_batch), 1)) qf1_loss = F.mse_loss(qf1, next_q_value) qf2_loss = F.mse_loss(qf2, next_q_value) return qf1_loss, qf2_loss def calculate_actor_loss(self, state_batch): """Calculates the loss for the actor. This loss includes the additional entropy term""" action, log_pi, _ = self.produce_action_and_action_info(state_batch) qf1_pi = self.critic_local(torch.cat((state_batch, action), 1)) qf2_pi = self.critic_local_2(torch.cat((state_batch, action), 1)) min_qf_pi = torch.min(qf1_pi, qf2_pi) policy_loss = ((self.alpha * log_pi) - min_qf_pi).mean() return policy_loss, log_pi def calculate_entropy_tuning_loss(self, log_pi): """Calculates the loss for the entropy temperature parameter. This is only relevant if self.automatic_entropy_tuning is True.""" alpha_loss = -(self.log_alpha * (log_pi + self.target_entropy).detach()).mean() return alpha_loss def update_critic_parameters(self, critic_loss_1, critic_loss_2): """Updates the parameters for both critics""" self.take_optimisation_step(self.critic_optimizer, self.critic_local, critic_loss_1, self.hyperparameters["Critic"]["gradient_clipping_norm"]) self.take_optimisation_step(self.critic_optimizer_2, self.critic_local_2, critic_loss_2, self.hyperparameters["Critic"]["gradient_clipping_norm"]) self.soft_update_of_target_network(self.critic_local, self.critic_target, self.hyperparameters["Critic"]["tau"]) self.soft_update_of_target_network(self.critic_local_2, self.critic_target_2, self.hyperparameters["Critic"]["tau"]) def update_actor_parameters(self, actor_loss, alpha_loss): """Updates the parameters for the actor and (if specified) the temperature parameter""" self.take_optimisation_step(self.actor_optimizer, self.actor_local, actor_loss, self.hyperparameters["Actor"]["gradient_clipping_norm"]) if alpha_loss is not None: self.take_optimisation_step(self.alpha_optim, None, alpha_loss, None) self.alpha = self.log_alpha.exp() def print_summary_of_latest_evaluation_episode(self): """Prints a summary of the latest episode""" print(" ") print("----------------------------") print("Episode score {} ".format(self.total_episode_score_so_far)) print("----------------------------")
5,586
14,668
<reponame>zealoussnow/chromium<filename>chrome/browser/chromeos/printing/print_servers_provider_factory.h // Copyright 2019 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef CHROME_BROWSER_CHROMEOS_PRINTING_PRINT_SERVERS_PROVIDER_FACTORY_H_ #define CHROME_BROWSER_CHROMEOS_PRINTING_PRINT_SERVERS_PROVIDER_FACTORY_H_ #include <map> #include <memory> #include "base/memory/weak_ptr.h" class AccountId; class Profile; namespace chromeos { class PrintServersProvider; // Dispenses PrintServersProvider objects based on account id. Access to this // object should be sequenced. All methods are called from UI thread. class PrintServersProviderFactory { public: static PrintServersProviderFactory* Get(); PrintServersProviderFactory(); PrintServersProviderFactory(const PrintServersProviderFactory&) = delete; PrintServersProviderFactory& operator=(const PrintServersProviderFactory&) = delete; // Returns a WeakPtr to the PrintServersProvider registered for // |account_id|. If an PrintServersProvider does not exist, one will be // created for |account_id|. The returned object remains valid until // RemoveForUserId or Shutdown is called. base::WeakPtr<PrintServersProvider> GetForAccountId( const AccountId& account_id); // Returns a WeakPtr to the PrintServersProvider registered for |profile| // which could be nullptr if |profile| does not map to a valid AccountId. The // returned object remains valid until RemoveForUserId or Shutdown is called. base::WeakPtr<PrintServersProvider> GetForProfile(Profile* profile); // Returns a WeakPtr to the PrintServersProvider registered for the device. // If requested PrintServersProvider does not exist, the object is // created and registered. The returned object remains valid until Shutdown is // called. Returns nullptr if called after Shutdown or during unit tests. base::WeakPtr<PrintServersProvider> GetForDevice(); // Deletes the PrintServersProvider registered for |account_id|. void RemoveForAccountId(const AccountId& account_id); // Tear down all PrintServersProviders. void Shutdown(); private: ~PrintServersProviderFactory(); std::map<AccountId, std::unique_ptr<PrintServersProvider>> providers_by_user_; std::unique_ptr<PrintServersProvider> device_provider_; }; } // namespace chromeos #endif // CHROME_BROWSER_CHROMEOS_PRINTING_PRINT_SERVERS_PROVIDER_FACTORY_H_
724
519
<filename>neuraxle/pipeline.py<gh_stars>100-1000 """ Neuraxle's Pipeline Classes ==================================== This is the core of Neuraxle's pipelines. You can chain steps to call them one after an other. .. Copyright 2019, Neuraxio Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. .. Thanks to Umaneo Technologies Inc. for their contributions to this Machine Learning project, visit https://www.umaneo.com/ for more information on Umaneo Technologies Inc. """ import shutil import warnings from abc import ABC, abstractmethod from copy import copy from typing import Any, Tuple, List, Union from neuraxle.base import BaseStep, TruncableSteps, NamedTupleList, ResumableStepMixin, ExecutionContext, ExecutionMode, \ MetaStep, _CustomHandlerMethods, \ ForceHandleMixin, Identity from neuraxle.checkpoints import Checkpoint from neuraxle.data_container import DataContainer, ListDataContainer, AbsentValuesNullObject, ZipDataContainer DEFAULT_CACHE_FOLDER = 'cache' class BasePipeline(TruncableSteps, ABC): def __init__(self, steps: NamedTupleList): TruncableSteps.__init__(self, steps_as_tuple=steps) @abstractmethod def fit(self, data_inputs, expected_outputs=None) -> 'BasePipeline': raise NotImplementedError() @abstractmethod def transform(self, data_inputs): raise NotImplementedError() @abstractmethod def fit_transform(self, data_inputs, expected_outputs=None) -> ('BasePipeline', Any): raise NotImplementedError() class Pipeline(BasePipeline): """ Fits and transform steps """ def __init__(self, steps: NamedTupleList, cache_folder=DEFAULT_CACHE_FOLDER): BasePipeline.__init__(self, steps=steps) self.cache_folder = cache_folder def transform(self, data_inputs: Any): """ After loading the last checkpoint, transform each pipeline steps :param data_inputs: the data input to transform :return: transformed data inputs """ data_container = self.transform_data_container(DataContainer(data_inputs=data_inputs, current_ids=None)) return data_container.data_inputs def transform_data_container(self, data_container: DataContainer): data_container = self.handle_transform( data_container, ExecutionContext(root=self.cache_folder, execution_mode=ExecutionMode.TRANSFORM) ) return data_container def fit_transform(self, data_inputs, expected_outputs=None) -> ('Pipeline', Any): """ After loading the last checkpoint, fit transform each pipeline steps :param data_inputs: the data input to fit on :param expected_outputs: the expected data output to fit on :return: the pipeline itself """ new_self, data_container = self.fit_transform_data_container( DataContainer(data_inputs=data_inputs, current_ids=None, expected_outputs=expected_outputs)) return new_self, data_container.data_inputs def fit_transform_data_container(self, data_container) -> Tuple['Pipeline', DataContainer]: context = ExecutionContext(root=self.cache_folder, execution_mode=ExecutionMode.FIT_TRANSFORM) new_self, data_container = self.handle_fit_transform(data_container, context) return new_self, data_container def fit(self, data_inputs, expected_outputs=None) -> 'Pipeline': """ After loading the last checkpoint, fit each pipeline steps :param data_inputs: the data input to fit on :param expected_outputs: the expected data output to fit on :return: the pipeline itself """ return self.fit_data_container( DataContainer(data_inputs=data_inputs, current_ids=None, expected_outputs=expected_outputs)) def fit_data_container(self, data_container) -> 'Pipeline': return self.handle_fit(data_container, ExecutionContext(self.cache_folder, ExecutionMode.FIT)) def inverse_transform(self, processed_outputs) -> Any: """ After transforming all data inputs, and obtaining a prediction, we can inverse transform the processed outputs :param processed_outputs: the forward transformed data input :return: backward transformed processed outputs """ data_container = DataContainer(data_inputs=processed_outputs) context = ExecutionContext(root=self.cache_folder, execution_mode=ExecutionMode.INVERSE_TRANSFORM) for step_name, step in list(reversed(self.items())): data_container = step.handle_inverse_transform(data_container, context) return data_container.data_inputs def _inverse_transform_data_container(self, data_container: DataContainer, context: ExecutionContext) -> DataContainer: """ After transforming all data inputs, and obtaining a prediction, we can inverse transform the processed outputs """ for step_name, step in list(reversed(self.items())): data_container = step.handle_inverse_transform(data_container, context) return data_container def _fit_data_container(self, data_container: DataContainer, context: ExecutionContext) -> 'Pipeline': """ After loading the last checkpoint, fit transform each pipeline steps, but only fit the last pipeline step. :param data_container: the data container to fit transform on :param context: execution context :return: tuple(pipeline, data_container) """ steps_left_to_do, data_container = self._load_checkpoint(data_container, context) index_last_step = len(steps_left_to_do) - 1 new_steps_as_tuple: NamedTupleList = [] for index, (step_name, step) in enumerate(steps_left_to_do): if index != index_last_step: step, data_container = step.handle_fit_transform(data_container, context) else: step = step.handle_fit(data_container, context) new_steps_as_tuple.append((step_name, step)) self.steps_as_tuple = self.steps_as_tuple[ :len(self.steps_as_tuple) - len(steps_left_to_do)] + new_steps_as_tuple return self def _fit_transform_data_container(self, data_container: DataContainer, context: ExecutionContext) -> ( 'Pipeline', DataContainer): """ After loading the last checkpoint, fit transform each pipeline steps :param data_container: the data container to fit transform on :param context: execution context :return: tuple(pipeline, data_container) """ steps_left_to_do, data_container = self._load_checkpoint(data_container, context) new_steps_as_tuple: NamedTupleList = [] for step_name, step in steps_left_to_do: step, data_container = step.handle_fit_transform(data_container, context) new_steps_as_tuple.append((step_name, step)) self.steps_as_tuple = self.steps_as_tuple[:len(self.steps_as_tuple) - len(steps_left_to_do)] + \ new_steps_as_tuple return self, data_container def _transform_data_container(self, data_container: DataContainer, context: ExecutionContext) -> DataContainer: """ After loading the last checkpoint, transform each pipeline steps :param data_container: the data container to transform :return: transformed data container """ steps_left_to_do, data_container = self._load_checkpoint(data_container, context) for step_name, step in steps_left_to_do: data_container = step.handle_transform(data_container, context) return data_container def _load_checkpoint(self, data_container: DataContainer, context: ExecutionContext) -> Tuple[ NamedTupleList, DataContainer]: """ Try loading a pipeline cache with the passed data container. If pipeline cache loading succeeds, find steps left to do, and load the latest data container. :param data_container: the data container to resume :param context: the execution context to resume :return: tuple(steps left to do, last checkpoint data container) """ return self.steps_as_tuple, data_container def flush_all_cache(self): shutil.rmtree(self.cache_folder) class ResumablePipeline(ResumableStepMixin, Pipeline): """ Fits and transform steps after latest checkpoint """ def _load_checkpoint( self, data_container: DataContainer, context: ExecutionContext ) -> Tuple[NamedTupleList, DataContainer]: """ Try loading a pipeline cache with the passed data container. If pipeline cache loading succeeds, find steps left to do, and load the latest data container. :param data_container: the data container to resume :param context: the execution context to resume :return: tuple(steps left to do, last checkpoint data container) """ new_starting_step_index, starting_step_data_container = \ self._get_starting_step_info(data_container, context) loading_context = context.copy() loading_context.pop() loaded_pipeline = self.load(loading_context) if not self.are_steps_before_index_the_same(loaded_pipeline, new_starting_step_index): return self.steps_as_tuple, data_container self._assign_loaded_pipeline_into_self(loaded_pipeline) step = self[new_starting_step_index] if isinstance(step, Checkpoint) or (isinstance(step, MetaStep) and isinstance(step.wrapped, Checkpoint)): starting_step_data_container = step.resume(starting_step_data_container, context) return self[new_starting_step_index:], starting_step_data_container def _assign_loaded_pipeline_into_self(self, loaded_self): self.steps_as_tuple = loaded_self.steps_as_tuple self._refresh_steps() self.hyperparams = loaded_self.hyperparams self.hyperparams_space = loaded_self.hyperparams_space def _get_starting_step_info(self, data_container: DataContainer, context: ExecutionContext) -> Tuple[ int, DataContainer]: """ Find the index of the latest step that can be resumed :param data_container: the data container to resume :return: index of the latest resumable step, data container at starting step """ starting_step_data_container = copy(data_container) starting_step_context = copy(context) current_data_container = copy(data_container) index_latest_checkpoint = 0 for index, (step_name, step) in enumerate(self.items()): if hasattr(step, 'should_resume') and step.should_resume(current_data_container.copy(), starting_step_context): index_latest_checkpoint = index starting_step_data_container = copy(current_data_container) current_data_container = step.hash_data_container(current_data_container) return index_latest_checkpoint, starting_step_data_container def should_resume(self, data_container: DataContainer, context: ExecutionContext) -> bool: """ Return True if the pipeline has a saved checkpoint that it can resume from :param context: execution context :param data_container: the data container to resume :return: bool """ context = context.push(self) for index, (step_name, step) in enumerate(reversed(self.items())): if hasattr(step, 'should_resume') and step.should_resume(data_container, context): return True return False class MiniBatchSequentialPipeline(_CustomHandlerMethods, ForceHandleMixin, Pipeline): """ Mini Batch Sequential Pipeline class to create a pipeline processing data inputs in batch. Provide a default batch size : .. code-block:: python data_inputs = np.array(list(range(10))) pipeline = MiniBatchSequentialPipeline([ SomeStep() ], batch_size=2) pipeline.transform(data_inputs) # SomeStep will receive [array([0, 1]), array([2, 3]), ..., array([8, 9])] pipeline = MiniBatchSequentialPipeline([ SomeStep() ], batch_size=3, keep_incomplete_batch=False) pipeline.transform(data_inputs) # SomeStep will receive: [array([0, 1, 2]), array([3, 4, 5]), array([6, 7, 8])] pipeline = MiniBatchSequentialPipeline( [SomeStep()], batch_size=3, keep_incomplete_batch=True, keep_incomplete_batch=True, default_value_data_inputs=None, default_value_expected_outputs=None ) pipeline.transform(data_inputs) # SomeStep will receive: [array([0, 1, 2]), array([3, 4, 5]), array([6, 7, 8]), array([9, None, None])] pipeline = MiniBatchSequentialPipeline( [SomeStep()], batch_size=3, keep_incomplete_batch=True, default_value_data_inputs=AbsentValuesNullObject() ) pipeline.transform(data_inputs) # SomeStep will receive: [array([0, 1, 2]), array([3, 4, 5]), array([6, 7, 8]), array([9])] Or manually add one or multiple :class`Barrier` steps to the mini batch sequential pipeline : .. code-block:: python data_inputs = np.array(list(range(10))) pipeline = MiniBatchSequentialPipeline([ SomeStep(), Joiner(batch_size=2) ]) pipeline.transform(data_inputs) # SomeStep will receive [array([0, 1]), array([2, 3]), ..., array([8, 9])] pipeline = MiniBatchSequentialPipeline([ SomeStep(), Joiner(batch_size=3, keep_incomplete_batch=False) ]) pipeline.transform(data_inputs) # SomeStep will receive: [array([0, 1, 2]), array([3, 4, 5]), array([6, 7, 8])] pipeline = MiniBatchSequentialPipeline([ SomeStep(), Joiner( batch_size=3, keep_incomplete_batch=True, keep_incomplete_batch=True, default_value_data_inputs=None, default_value_expected_outputs=None ) ]) pipeline.transform(data_inputs) # SomeStep will receive: [array([0, 1, 2]), array([3, 4, 5]), array([6, 7, 8]), array([9, None, None])] pipeline = MiniBatchSequentialPipeline([ SomeStep(), Joiner( batch_size=3, keep_incomplete_batch=True, default_value_data_inputs=AbsentValuesNullObject() ) ]) pipeline.transform(data_inputs) # SomeStep will receive: [array([0, 1, 2]), array([3, 4, 5]), array([6, 7, 8]), array([9])] :param steps: pipeline steps :param batch_size: number of elements to combine into a single batch :param keep_incomplete_batch: (Optional.) A bool representing whether the last batch should be dropped in the case it has fewer than `batch_size` elements; the default behavior is not to drop the smaller batch. :param default_value_data_inputs: expected_outputs default fill value for padding and values outside iteration range, or :class:`~neuraxle.data_container.DataContainer.AbsentValuesNullObject` to trim absent values from the batch :param default_value_expected_outputs: expected_outputs default fill value for padding and values outside iteration range, or :class:`~neuraxle.data_container.DataContainer.AbsentValuesNullObject` to trim absent values from the batch :param cache_folder: cache_folder if its at the root of the pipeline :param mute_joiner_batch_size_warning: If False, will log a warning when automatically setting the joiner batch_size attribut. .. seealso:: :func:`~neuraxle.data_container.DataContainer.minibatches`, :class:`~neuraxle.data_container.DataContainer.AbsentValuesNullObject`, :class:`Pipeline`, :class:`Barrier`, :class:`Joiner`, :class:`~neuraxle.data_container.DataContainer`, :class:`~neuraxle.base.ExecutionContext` """ def __init__( self, steps: NamedTupleList, batch_size=None, keep_incomplete_batch: bool = None, default_value_data_inputs=AbsentValuesNullObject(), default_value_expected_outputs=None, cache_folder=None, mute_joiner_batch_size_warning: bool = True ): Pipeline.__init__(self, steps=steps, cache_folder=cache_folder) ForceHandleMixin.__init__(self) self.default_value_data_inputs = default_value_data_inputs self.default_value_expected_outputs = default_value_expected_outputs self.__validate_barriers_batch_size(batch_size=batch_size) self.__patch_missing_barrier( batch_size=batch_size, keep_incomplete_batch=keep_incomplete_batch, default_value_data_inputs=default_value_data_inputs, default_value_expected_outputs=default_value_expected_outputs ) self.mute_joiner_batch_size_warning = mute_joiner_batch_size_warning self.__patch_barriers_batch_size(batch_size) def set_batch_size(self, batch_size): self.__patch_barriers_batch_size(batch_size) def __validate_barriers_batch_size(self, batch_size): if batch_size is not None: return for _, step in self: if isinstance(step, Barrier): if step.batch_size is None: raise Exception( 'Invalid Joiner batch size {}[{}]. Please provide a default batch size to MiniBatchSequentialPipeline, or add a batch size to {}[{}].'.format( self.name, step.name, self.name, step.name)) def __patch_barriers_batch_size(self, batch_size): if batch_size is None: return for _, step in self: if isinstance(step, Barrier): if step.batch_size is not None and not self.mute_joiner_batch_size_warning: warnings.warn( 'Replacing {}[{}].batch_size by {}.batch_size.'.format(self.name, step.name, self.name)) step.batch_size = batch_size def __patch_missing_barrier( self, batch_size: int, keep_incomplete_batch: bool, default_value_data_inputs: Union[Any, AbsentValuesNullObject] = None, default_value_expected_outputs: Union[Any, AbsentValuesNullObject] = None ): has_barrier: bool = False for _, step in self: if isinstance(step, Barrier): has_barrier = True if not has_barrier: self.steps_as_tuple.append(( 'Joiner', Joiner( batch_size=batch_size, keep_incomplete_batch=keep_incomplete_batch, default_value_data_inputs=default_value_data_inputs, default_value_expected_outputs=default_value_expected_outputs ) )) self._refresh_steps() def transform_data_container(self, data_container: DataContainer, context: ExecutionContext) -> DataContainer: """ Transform all sub pipelines splitted by the Barrier steps. :param data_container: data container to transform. :param context: execution context :return: data container """ sub_pipelines = self._create_sub_pipelines() for sub_pipeline in sub_pipelines: barrier = sub_pipeline[-1] data_container = barrier.join_transform( step=sub_pipeline, data_container=data_container, context=context ) data_container = self.hash_data_container(data_container) return data_container def fit_data_container(self, data_container: DataContainer, context: ExecutionContext) -> BaseStep: """ Fit all sub pipelines splitted by the Barrier steps. :param data_container: data container to transform. :param context: execution context :return: data container """ sub_pipelines = self._create_sub_pipelines() index_start = 0 for sub_pipeline in sub_pipelines: sub_pipeline.setup(context=context) barrier = sub_pipeline[-1] sub_pipeline, data_container = barrier.join_fit_transform( step=sub_pipeline, data_container=data_container, context=context ) current_ids = self.hash(data_container) data_container.set_current_ids(current_ids) new_self = self[:index_start] + sub_pipeline if index_start + len(sub_pipeline) < len(self): new_self += self[index_start + len(sub_pipeline):] self.steps_as_tuple = new_self.steps_as_tuple index_start += len(sub_pipeline) return self def fit_transform_data_container(self, data_container: DataContainer, context: ExecutionContext) -> Tuple[ BaseStep, DataContainer]: """ Transform all sub pipelines splitted by the Barrier steps. :param data_container: data container to transform. :param context: execution context :return: data container """ sub_pipelines = self._create_sub_pipelines() index_start = 0 for sub_pipeline in sub_pipelines: sub_pipeline.setup(context=context) barrier = sub_pipeline[-1] sub_pipeline, data_container = barrier.join_fit_transform( step=sub_pipeline, data_container=data_container, context=context ) data_container = self.hash_data_container(data_container) new_self = self[:index_start] + sub_pipeline if index_start + len(sub_pipeline) < len(self): new_self += self[index_start + len(sub_pipeline):] self.steps_as_tuple = new_self.steps_as_tuple index_start += len(sub_pipeline) return self, data_container def _create_sub_pipelines(self) -> List['MiniBatchSequentialPipeline']: """ Create sub pipelines by splitting the steps by the join type name. :return: list of sub pipelines """ sub_pipelines: List[MiniBatchSequentialPipeline] = self.split(Barrier) for sub_pipeline in sub_pipelines: if not sub_pipeline.ends_with(Barrier): raise Exception('At least one Barrier step needs to be at the end of a streaming pipeline.') return sub_pipelines class Barrier(Identity, ABC): """ A Barrier step to be used in a minibatch sequential pipeline. It forces all the data inputs to get to the barrier in a sub pipeline before going through to the next sub-pipeline. .. code-block:: python p = MiniBatchSequentialPipeline([ SomeStep(), SomeStep(), Barrier(), # must be a concrete Barrier ex: Joiner() SomeStep(), SomeStep(), Barrier(), # must be a concrete Barrier ex: Joiner() ], batch_size=10) .. seealso:: :class:`~neuraxle.base.NonTransformableMixin`, :class:`~neuraxle.base.BaseStep` """ @abstractmethod def join_transform(self, step: TruncableSteps, data_container: DataContainer, context: ExecutionContext) -> DataContainer: """ Execute the given pipeline :func:`~neuraxle.pipeline.Pipeline.transform` with the given data container, and execution context. :param step: truncable steps to execute :type step: TruncableSteps :param data_container: data container :type data_container: DataContainer :param context: execution context :type context: ExecutionContext :return: transformed data container :rtype: DataContainer """ raise NotImplementedError() @abstractmethod def join_fit_transform(self, step: Pipeline, data_container: DataContainer, context: ExecutionContext) -> Tuple[ 'Any', DataContainer]: """ Execute the given pipeline :func:`~neuraxle.pipeline.Pipeline.fit_transform` with the given data container, and execution context. :param step: truncable steps to execute :param data_container: data container :param context: execution context :return: (fitted step, transformed data container) """ raise NotImplementedError() class Joiner(Barrier): """ A Special Barrier step that joins the transformed mini batches together with list.extend method. .. seealso:: :class:`~neuraxle.data_container.DataContainer`, :func:`~neuraxle.data_container.DataContainer.batch` """ def __init__( self, batch_size: int, keep_incomplete_batch: bool = True, default_value_data_inputs=AbsentValuesNullObject(), default_value_expected_outputs=None ): Barrier.__init__(self) self.batch_size: int = batch_size self.keep_incomplete_batch: bool = keep_incomplete_batch self.default_value_data_inputs: Union[Any, AbsentValuesNullObject] = default_value_data_inputs self.default_value_expected_outputs: Union[Any, AbsentValuesNullObject] = default_value_expected_outputs def join_transform(self, step: Pipeline, data_container: DataContainer, context: ExecutionContext) -> DataContainer: """ Concatenate the pipeline transform output of each batch of self.batch_size together. :param step: pipeline to transform on :type step: Pipeline :param data_container: data container to transform :type data_container: DataContainer :param context: execution context :return: transformed data container :rtype: DataContainer """ context = context.push(step) data_container_batches = data_container.minibatches( batch_size=self.batch_size, keep_incomplete_batch=self.keep_incomplete_batch, default_value_data_inputs=self.default_value_data_inputs, default_value_expected_outputs=self.default_value_expected_outputs ) output_data_container = ListDataContainer.empty() for data_container_batch in data_container_batches: output_data_container.concat(step._transform_data_container(data_container_batch, context)) return output_data_container def join_fit_transform(self, step: Pipeline, data_container: DataContainer, context: ExecutionContext) -> \ Tuple['Any', DataContainer]: """ Concatenate the pipeline fit transform output of each batch of self.batch_size together. :param step: pipeline to fit transform on :type step: Pipeline :param data_container: data container to fit transform on :type data_container: DataContainer :param context: execution context :return: fitted self, transformed data inputs :rtype: Tuple[Any, DataContainer] """ context = context.push(step) data_container_batches = data_container.minibatches( batch_size=self.batch_size, keep_incomplete_batch=self.keep_incomplete_batch, default_value_data_inputs=self.default_value_data_inputs, default_value_expected_outputs=self.default_value_expected_outputs ) output_data_container = ListDataContainer.empty() for data_container_batch in data_container_batches: step, data_container_batch = step._fit_transform_data_container(data_container_batch, context) output_data_container.concat(data_container_batch) return step, output_data_container class ZipMinibatchJoiner(Joiner): """ Zips together minibatch outputs, i.e. returns a DataContainer where the first element is a tuple of every minibatches first element and so on. """ def join_transform(self, step: TruncableSteps, data_container: DataContainer, context: ExecutionContext) -> ZipDataContainer: context = context.push(step) data_container_batches = data_container.minibatches( batch_size=self.batch_size, keep_incomplete_batch=self.keep_incomplete_batch, default_value_data_inputs=self.default_value_data_inputs, default_value_expected_outputs=self.default_value_expected_outputs ) output_data_container = [] for data_container_batch in data_container_batches: output_data_container.append(step._transform_data_container(data_container_batch, context)) return ZipDataContainer.create_from(*output_data_container) def join_fit_transform(self, step: Pipeline, data_container: DataContainer, context: ExecutionContext) -> \ Tuple['Any', DataContainer]: context = context.push(step) data_container_batches = data_container.minibatches( batch_size=self.batch_size, keep_incomplete_batch=self.keep_incomplete_batch, default_value_data_inputs=self.default_value_data_inputs, default_value_expected_outputs=self.default_value_expected_outputs ) output_data_container = [] for data_container_batch in data_container_batches: step, data_container_batch = step._fit_transform_data_container(data_container_batch, context) output_data_container.append(data_container_batch) return step, ZipDataContainer.create_from(*output_data_container)
12,376
1,155
<gh_stars>1000+ package org.zalando.intellij.swagger.completion.field.completion.openapi.json; import org.zalando.intellij.swagger.assertion.AssertableList; public class HeaderFileCompletionTest extends PartialFileCompletionTest { public void testThatSingleHeaderFileIsAutoCompleted() { withSpecFiles("pet.json", "header.json"); final AssertableList completions = new AssertableList(geCompletions("pet.json")); assertHeaderCompletions(completions); } public void testThatHeadersFileIsAutoCompleted() { withSpecFiles("components.json", "headers.json"); final AssertableList completions = new AssertableList(geCompletions("components.json")); assertHeaderCompletions(completions); } private void assertHeaderCompletions(final AssertableList completions) { completions .assertContains( "$ref", "description", "required", "deprecated", "allowEmptyValue", "style", "explode", "allowReserved", "schema", "example", "examples", "content") .isOfSize(12); } }
476
381
<filename>library/src/main/java/ru/tinkoff/decoro/watchers/DiffMeasures.java<gh_stars>100-1000 /* * Copyright © 2016 Tinkoff Bank * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package ru.tinkoff.decoro.watchers; import java.util.Locale; /** * @author <NAME> */ class DiffMeasures { private static final int INSERT = 1; private static final int REMOVE = 1 << 1; private static final int MASK_BOTH_TYPE = 3; private int diffStartPosition; private int diffInsertLength; private int diffRemoveLength; private int diffType; private int cursorPosition; private boolean trimmingSequence; public DiffMeasures() { } public void calculateBeforeTextChanged(int start, int count, int after) { diffStartPosition = start; diffRemoveLength = 0; diffType = 0; diffInsertLength = 0; cursorPosition = -1; if (after > 0) { diffType |= INSERT; diffInsertLength = after; } if (count > 0) { diffType |= REMOVE; diffRemoveLength = count; } trimmingSequence = diffInsertLength > 0 && diffRemoveLength > 0 && diffInsertLength < diffRemoveLength; } public void recalculateOnModifyingWord(int realDiffLen) { diffRemoveLength -= diffInsertLength; diffStartPosition += realDiffLen; diffType &= ~INSERT; } public boolean isInsertingChars() { return (diffType & INSERT) == INSERT; } public boolean isRemovingChars() { return (diffType & REMOVE) == REMOVE; } public int getInsertEndPosition() { return diffStartPosition + diffInsertLength; } public int getRemoveEndPosition() { return diffStartPosition + diffRemoveLength - 1; } public void setCursorPosition(int cursorPosition) { this.cursorPosition = cursorPosition; } public int getStartPosition() { return diffStartPosition; } public int getDiffInsertLength() { return diffInsertLength; } public int getRemoveLength() { return diffRemoveLength; } public int getDiffType() { return diffType; } public int getCursorPosition() { return cursorPosition; } public boolean isTrimmingSequence() { return trimmingSequence; } @Override public String toString() { String type = null; if ((MASK_BOTH_TYPE & diffType) == MASK_BOTH_TYPE) { type = "both"; } else if ((INSERT & diffType) == INSERT) { type = "insert"; } else if ((REMOVE & diffType) == REMOVE) { type = "remove"; } else if (diffType == 0) { type = "none"; } if (type == null) { throw new IllegalStateException("unknown behaviour for diffType " + diffType); } return String.format(Locale.getDefault(), "[ DiffMeasures type=%s, diffStartPosition=%d, diffInsertLength=%d, diffRemoveLength=%d, cursor: %d ]", type, diffStartPosition, diffInsertLength, diffRemoveLength, cursorPosition ); } }
1,546
765
// <NAME> // UE4 Version 4.20.2 // https://github.com/Harrison1/unrealcpp // https://severallevels.io // https://harrisonmcguire.com #pragma once #include "GameFramework/Actor.h" #include "ConsoleLog.generated.h" UCLASS() class UNREALCPP_API AConsoleLog : public AActor { GENERATED_BODY() public: // Sets default values for this actor's properties AConsoleLog(); UPROPERTY(EditAnywhere) class USceneComponent* DefaultSceneComponent; protected: // Called when the game starts or when spawned virtual void BeginPlay() override; };
186
1,178
/* * Copyright 2020 Makani Technologies LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef AVIONICS_COMMON_TETHER_MESSAGE_TYPES_H_ #define AVIONICS_COMMON_TETHER_MESSAGE_TYPES_H_ #include <stdbool.h> #include <stdint.h> #include <stddef.h> #include "avionics/common/avionics_messages.h" // Modulus to apply to frame_index fields in TetherUp and TetherDown messages. // Define rollover value to ensure consistent multiplexing for all fields. #define TETHER_FRAME_INDEX_BITS 12 #define TETHER_FRAME_INDEX_ROLLOVER 4080 // Number of frame indices beyond the current frame_index to accept as valid // messages. #define TETHER_FRAME_INDEX_ACCEPTANCE_WINDOW 500 // Decimate network message rate to determine radio message rate. #define TETHER_RADIO_DECIMATION 4 // Decimate the node_status field in TetherDown. #define TETHER_NODE_STATUS_DECIMATION (TETHER_RADIO_DECIMATION * 4) // Decimate control telemetry field in TetherDown. #define TETHER_CONTROL_TELEMETRY_DECIMATION (TETHER_RADIO_DECIMATION * 2) // Modulus to apply to AIO sequence number. Sequence numbers greater than or // equal to TETHER_SEQUENCE_ROLLOVER are not transmitted over the long range // radio. #define TETHER_SEQUENCE_BITS 12 #define TETHER_SEQUENCE_ROLLOVER (1 << TETHER_SEQUENCE_BITS) // Invalid value to mark stale GPS time data. #define TETHER_GPS_TIME_OF_WEEK_ROLLOVER (3600 * 24 * 7 * 1000) #define TETHER_GPS_TIME_OF_WEEK_INVALID TETHER_GPS_TIME_OF_WEEK_ROLLOVER typedef enum { kTetherMergeTrunkForceSigned = -1, kTetherMergeTrunkA, kTetherMergeTrunkB, kNumTetherMergeTrunks, } TetherMergeTrunk; // Do not reference the internals of this structure! typedef struct { // Inputs correspond to a particular TetherDownSource. TetherDownMessage input_messages[kNumTetherDownSources]; uint16_t input_sequence_numbers[kNumTetherDownSources]; int64_t input_timestamps[kNumTetherDownSources]; bool input_updated[kNumTetherDownSources]; // Controller inputs correspond to the ControllerCommand message. ControllerCommandMessage controller_message; uint16_t controller_sequence_number; int64_t controller_timestamp; bool controller_updated; // Trunks corresponds to a particular TetherMergeTrunk. TetherDownMessage trunk_messages[kNumTetherMergeTrunks]; int64_t trunk_timestamps[kNumTetherMergeTrunks]; // Output. TetherDownMessage output_message; int64_t output_timestamp; } TetherDownMergeState; // Do not reference the internals of this structure! typedef struct { // Inputs correspond to a particular TetherUpSource. TetherUpMessage input_messages[kNumTetherUpSources]; uint16_t input_sequence_numbers[kNumTetherUpSources]; int64_t input_timestamps[kNumTetherUpSources]; bool input_updated[kNumTetherUpSources]; // Joystick inputs correspond to the JoystickStatus message. JoystickStatusMessage joystick_message; uint16_t joystick_sequence_number; int64_t joystick_timestamp; bool joystick_updated; // Trunks corresponds to a particular TetherMergeTrunk. TetherUpMessage trunk_messages[kNumTetherMergeTrunks]; int64_t trunk_timestamps[kNumTetherMergeTrunks]; // Output. TetherUpMessage output_message; int64_t output_timestamp; } TetherUpMergeState; typedef struct { const char *name; size_t sizeof_field; int32_t offsetof_field; int32_t offsetof_no_update_count; // Set to -1 to disable. int32_t offsetof_sequence; // Set to -1 to disable. } TetherFieldInfo; typedef struct { size_t sizeof_message; int32_t offsetof_frame_index; int32_t offsetof_gps_time; const TetherFieldInfo *fields; int32_t num_fields; } TetherMessageInfo; #endif // AVIONICS_COMMON_TETHER_MESSAGE_TYPES_H_
1,403
1,768
<gh_stars>1000+ package org.lamport.tla.toolbox.tool.prover.ui.output.source; import java.util.HashMap; import java.util.Iterator; import java.util.LinkedList; import java.util.List; import org.eclipse.core.runtime.IPath; /** * Note that this class is not used. I originally created it * to copy the model of parsing and storing TLC's output, but we * have decided that for now there is no need to cache the output of * the prover. The parser installs markers on the appropriate resources * based on the output it receives. It does not need to use this registry. * * This is a singleton registry for output from the TLAPM. * * Listeners can register themselves to receive output by calling * {@link TLAPMOutputSourceRegistry#addListener(ITLAPMOutputSourceListener)}. * Listeners that add themselves will receive all output from the most recently * added {@link ITLAPMOutputSource} for which {@link ITLAPMOutputSource#getFullModulePath()} * is equal to {@link ITLAPMOutputSourceListener#getFullModulePath()} for equality * computed by {@link IPath#equals(Object)}. Such a listener will also receive all future output * for all {@link ITLAPMOutputSource} that also meet that condition and are added to this * registry after the listener is added. * * Sources implementing {@link ITLAPMOutputSource} add themselves to the registry * by calling {@link TLAPMOutputSourceRegistry#addSource(ITLAPMOutputSource)}. * * The singleton instance of this class can be accessed using {@link TLAPMOutputSourceRegistry#getInstance()}. * * @author <NAME> * */ public class TLAPMOutputSourceRegistry { // singleton instance private static TLAPMOutputSourceRegistry instance; /** * Container for sources, hashed by {@link IPath} pointing * to module for that source. */ private HashMap sources; /** * Map of {@link IPath} for modules to {@link List} of * {@link ITLAPMOutputSourceListener} for listeners * that have not yet been added to a source because * there has not been an appropriate source added to this * registry. */ private HashMap listenerLists; /** * Adds a source to this registry. This source will replace the most * recently added source such that * * existingSource.getFullModulePath().equals(newSource.getFullModulePath()). * * @param source */ public void addSource(ITLAPMOutputSource source) { /* * Get all listeners that should be attached to the new source. * * First, check if there is an existing source with listeners. If not, * check if there are listeners that have been added but not been connected * to a source. */ ITLAPMOutputSource existingSource = (ITLAPMOutputSource) sources.get(source.getFullModulePath()); if (existingSource != null) { ITLAPMOutputSourceListener[] existingListeners = existingSource.getListeners(); for (int i = 0; i < existingListeners.length; i++) { source.addListener(existingListeners[i]); existingSource.removeListener(existingListeners[i]); } } else { // get listeners that have not yet been added to a source List list = (List) listenerLists.get(source.getFullModulePath()); if (list != null) { for (Iterator it = list.iterator(); it.hasNext();) { source.addListener((ITLAPMOutputSourceListener) it.next()); it.remove(); } } } // replace the existing source, if any, with the new source sources.put(source.getFullModulePath(), source); } /** * Registers the listener. * * Listeners that add themselves will receive all output from the most recently * added {@link ITLAPMOutputSource} for which {@link ITLAPMOutputSource#getFullModulePath()} * is equal to {@link ITLAPMOutputSourceListener#getFullModulePath()} for equality * computed by {@link IPath#equals(Object)}. Such a listener will also receive all future output * for all {@link ITLAPMOutputSource} that also meet that condition and are added to this * registry after the listener is added. * * @param listener */ public void addListener(ITLAPMOutputSourceListener listener) { /* * First, check if there is an existing source to which * the listener should be connected. If not, add the listener * to the list of listeners that will be connected to a source * when an appropriate one is added. */ ITLAPMOutputSource source = (ITLAPMOutputSource) sources.get(listener.getFullModulePath()); if (source != null) { source.addListener(listener); } else { List list = (List) listenerLists.get(listener.getFullModulePath()); if (list == null) { list = new LinkedList(); } list.add(listener); listenerLists.put(listener.getFullModulePath(), list); } } /** * Removes the listener from any source to which it is listening and removes * it from listening to any sources added in the future. * * Does nothing if the listener has not been added using {@link TLAPMOutputSourceRegistry#addListener(ITLAPMOutputSourceListener)}. * * @param listener */ public void removeListener(ITLAPMOutputSourceListener listener) { /* * First, try to remove the listener from a source. * If no source is found, remove the listener from * listenerLists. */ ITLAPMOutputSource source = (ITLAPMOutputSource) sources.get(listener.getFullModulePath()); if (source != null) { source.removeListener(listener); } else { List list = (List) listenerLists.get(listener.getFullModulePath()); if (list != null) { list.remove(listener); } } } private TLAPMOutputSourceRegistry() { sources = new HashMap(); listenerLists = new HashMap(); } /** * Singleton access method. * @return */ public static TLAPMOutputSourceRegistry getInstance() { if (instance == null) { instance = new TLAPMOutputSourceRegistry(); } return instance; } }
2,738
1,799
<filename>app/appcmn/confdlg.cpp //--------------------------------------------------------------------------- #include <vcl.h> #pragma hdrstop #include "confdlg.h" //--------------------------------------------------------------------------- #pragma package(smart_init) #pragma resource "*.dfm" TConfDialog *ConfDialog; //--------------------------------------------------------------------------- __fastcall TConfDialog::TConfDialog(TComponent* Owner) : TForm(Owner) { } //---------------------------------------------------------------------------
119
834
<gh_stars>100-1000 /*! \file asio_timer.cpp \brief Asio timer example \author <NAME> \date 16.08.2018 \copyright MIT License */ #include "asio_service.h" #include "server/asio/timer.h" #include "threads/thread.h" #include <iostream> class AsioTimer : public CppServer::Asio::Timer { public: using CppServer::Asio::Timer::Timer; protected: void onTimer(bool canceled) override { std::cout << "Asio timer " << (canceled ? "canceled" : "expired") << std::endl; } void onError(int error, const std::string& category, const std::string& message) override { std::cout << "Asio timer caught an error with code " << error << " and category '" << category << "': " << message << std::endl; } }; int main(int argc, char** argv) { // Create a new Asio service auto service = std::make_shared<AsioService>(); // Start the Asio service std::cout << "Asio service starting..."; service->Start(); std::cout << "Done!" << std::endl; // Create a new Asio timer auto timer = std::make_shared<AsioTimer>(service); // Setup and synchronously wait for the timer timer->Setup(CppCommon::UtcTime() + CppCommon::Timespan::seconds(1)); timer->WaitSync(); // Setup and asynchronously wait for the timer timer->Setup(CppCommon::Timespan::seconds(1)); timer->WaitAsync(); // Wait for a while... CppCommon::Thread::Sleep(2000); // Setup and asynchronously wait for the timer timer->Setup(CppCommon::Timespan::seconds(1)); timer->WaitAsync(); // Wait for a while... CppCommon::Thread::Sleep(500); // Cancel the timer timer->Cancel(); // Wait for a while... CppCommon::Thread::Sleep(500); // Stop the Asio service std::cout << "Asio service stopping..."; service->Stop(); std::cout << "Done!" << std::endl; return 0; }
707
348
<reponame>chamberone/Leaflet.PixiOverlay {"nom":"<NAME>","circ":"10ème circonscription","dpt":"Haute-Garonne","inscrits":115,"abs":39,"votants":76,"blancs":12,"nuls":2,"exp":62,"res":[{"nuance":"REM","nom":"<NAME>","voix":33},{"nuance":"FI","nom":"<NAME>","voix":29}]}
111
333
<reponame>prehistoric-penguin/cpp-concurrency-book-source #include <future> #include <iostream> int find_the_answer_to_ltuae() { return 42; } void do_other_stuff() {} int main() { std::future<int> the_answer=std::async(find_the_answer_to_ltuae); do_other_stuff(); std::cout<<"The answer is "<<the_answer.get()<<std::endl; }
149
826
<filename>deps/opencolorio-configs/spi-vfx/make_vfx_ocio.py #!/usr/bin/env python import math, os, sys import SpImport OCIO = SpImport.SpComp2("PyOpenColorIO",2) print "OCIO",OCIO.version outputfilename = "config.ocio" config = OCIO.Config() LUT_SEARCH_PATH = ['luts'] config.setSearchPath(':'.join(LUT_SEARCH_PATH)) # Set roles config.setRole(OCIO.Constants.ROLE_SCENE_LINEAR, "lnf") config.setRole(OCIO.Constants.ROLE_REFERENCE, "lnf") config.setRole(OCIO.Constants.ROLE_COLOR_TIMING, "lg10") config.setRole(OCIO.Constants.ROLE_COMPOSITING_LOG, "lgf") config.setRole(OCIO.Constants.ROLE_COLOR_PICKING,"cpf") config.setRole(OCIO.Constants.ROLE_DATA,"ncf") config.setRole(OCIO.Constants.ROLE_DEFAULT,"ncf") config.setRole(OCIO.Constants.ROLE_MATTE_PAINT,"vd8") config.setRole(OCIO.Constants.ROLE_TEXTURE_PAINT,"dt16") ## Scene Linear ############################################################### cs = OCIO.ColorSpace(family='ln',name='lnf') cs.setDescription("lnf : linear show space") cs.setBitDepth(OCIO.Constants.BIT_DEPTH_F32) cs.setAllocationVars([-15.0,6.0]) cs.setAllocation(OCIO.Constants.ALLOCATION_LG2) config.addColorSpace(cs) cs = OCIO.ColorSpace(family='ln',name='lnh') cs.setDescription("lnh : linear show space") cs.setBitDepth(OCIO.Constants.BIT_DEPTH_F16) cs.setAllocationVars([-15.0,6.0]) cs.setAllocation(OCIO.Constants.ALLOCATION_LG2) config.addColorSpace(cs) cs = OCIO.ColorSpace(family='ln',name='ln16') cs.setDescription("ln16 : linear show space") cs.setBitDepth(OCIO.Constants.BIT_DEPTH_UINT16) cs.setAllocationVars([-15.0,0.0]) cs.setAllocation(OCIO.Constants.ALLOCATION_LG2) config.addColorSpace(cs) ## Log ######################################################################## cs = OCIO.ColorSpace(family='lg',name='lg16') cs.setDescription("lg16 : conversion from film log ") cs.setBitDepth(OCIO.Constants.BIT_DEPTH_UINT16) t = OCIO.FileTransform('lg16.spi1d',interpolation=OCIO.Constants.INTERP_NEAREST ) cs.setTransform(t, OCIO.Constants.COLORSPACE_DIR_TO_REFERENCE) config.addColorSpace(cs) cs = OCIO.ColorSpace(family='lg',name='lg10') cs.setDescription("lg10 : conversion from film log") cs.setBitDepth(OCIO.Constants.BIT_DEPTH_UINT10) t = OCIO.FileTransform('lg10.spi1d',interpolation=OCIO.Constants.INTERP_NEAREST ) cs.setTransform(t, OCIO.Constants.COLORSPACE_DIR_TO_REFERENCE) config.addColorSpace(cs) cs = OCIO.ColorSpace(family='lg',name='lgf') cs.setDescription("lgf : conversion from film log") cs.setBitDepth(OCIO.Constants.BIT_DEPTH_F32) cs.setAllocationVars([-0.25,1.5]) t = OCIO.FileTransform('lgf.spi1d',interpolation=OCIO.Constants.INTERP_LINEAR) cs.setTransform(t, OCIO.Constants.COLORSPACE_DIR_TO_REFERENCE) config.addColorSpace(cs) ## PANALOG ######################################################################## cs = OCIO.ColorSpace(family='gn',name='gn10') cs.setDescription("gn10 :conversion from Panalog") cs.setBitDepth(OCIO.Constants.BIT_DEPTH_UINT10) t = OCIO.FileTransform('gn10.spi1d',interpolation=OCIO.Constants.INTERP_NEAREST) cs.setTransform(t, OCIO.Constants.COLORSPACE_DIR_TO_REFERENCE) config.addColorSpace(cs) ## VD ######################################################################## cs = OCIO.ColorSpace(family='vd',name='vd16') cs.setDescription("vd16 :conversion from a gamma 2.2 ") cs.setBitDepth(OCIO.Constants.BIT_DEPTH_UINT16) groupTransform = OCIO.GroupTransform() groupTransform.push_back(OCIO.FileTransform('version_8_whitebalanced.spimtx',direction=OCIO.Constants.TRANSFORM_DIR_INVERSE)) groupTransform.push_back(OCIO.FileTransform('vd16.spi1d',interpolation=OCIO.Constants.INTERP_NEAREST )) cs.setTransform(groupTransform, OCIO.Constants.COLORSPACE_DIR_TO_REFERENCE) config.addColorSpace(cs) cs = OCIO.ColorSpace(family='vd',name='vd10') cs.setDescription("vd10 :conversion from a gamma 2.2 ") cs.setBitDepth(OCIO.Constants.BIT_DEPTH_UINT10) groupTransform = OCIO.GroupTransform() groupTransform.push_back(OCIO.FileTransform('version_8_whitebalanced.spimtx',direction=OCIO.Constants.TRANSFORM_DIR_INVERSE)) groupTransform.push_back(OCIO.FileTransform('vd10.spi1d',interpolation=OCIO.Constants.INTERP_NEAREST )) cs.setTransform(groupTransform, OCIO.Constants.COLORSPACE_DIR_TO_REFERENCE) config.addColorSpace(cs) cs = OCIO.ColorSpace(family='vd',name='vd8') cs.setDescription("vd8 :conversion from a gamma 2.2") cs.setBitDepth(OCIO.Constants.BIT_DEPTH_UINT8) groupTransform = OCIO.GroupTransform() groupTransform.push_back(OCIO.FileTransform('version_8_whitebalanced.spimtx',direction=OCIO.Constants.TRANSFORM_DIR_INVERSE)) groupTransform.push_back(OCIO.FileTransform('vd8.spi1d',interpolation=OCIO.Constants.INTERP_NEAREST )) cs.setTransform(groupTransform, OCIO.Constants.COLORSPACE_DIR_TO_REFERENCE) config.addColorSpace(cs) # REC709 CONVERSIONS############################################################################# cs = OCIO.ColorSpace(family='hd',name='hd10') cs.setDescription("hd10 : conversion from REC709") cs.setBitDepth(OCIO.Constants.BIT_DEPTH_UINT10) groupTransform = OCIO.GroupTransform() groupTransform.push_back(OCIO.FileTransform('hdOffset.spimtx',interpolation=OCIO.Constants.INTERP_NEAREST ,direction=OCIO.Constants.TRANSFORM_DIR_INVERSE)) groupTransform.push_back(OCIO.ColorSpaceTransform(src='vd16',dst='lnf')) cs.setTransform(groupTransform, OCIO.Constants.COLORSPACE_DIR_TO_REFERENCE) config.addColorSpace(cs) ## TEXTURE PUBLISHING ######################################################################## """ cs = OCIO.ColorSpace(family='dt',name='dt8') cs.setDescription("dt8 :conversion for diffuse texture") cs.setBitDepth(OCIO.Constants.BIT_DEPTH_UINT8) groupTransform = OCIO.GroupTransform() groupTransform.push_back(OCIO.FileTransform('diffuseTextureMultiplier.spimtx',interpolation=OCIO.Constants.INTERP_NEAREST ,direction=OCIO.Constants.TRANSFORM_DIR_FORWARD)) groupTransform.push_back(OCIO.ColorSpaceTransform(dst='lnf',src='vd16')) cs.setTransform(groupTransform, OCIO.Constants.COLORSPACE_DIR_TO_REFERENCE) config.addColorSpace(cs) """ cs = OCIO.ColorSpace(family='dt',name='dt16') cs.setDescription("dt16 :conversion for diffuse texture") cs.setBitDepth(OCIO.Constants.BIT_DEPTH_UINT16) groupTransform = OCIO.GroupTransform() groupTransform.push_back(OCIO.FileTransform('diffuseTextureMultiplier.spimtx',interpolation=OCIO.Constants.INTERP_NEAREST ,direction=OCIO.Constants.TRANSFORM_DIR_FORWARD)) groupTransform.push_back(OCIO.ColorSpaceTransform(dst='lnf',src='vd16')) cs.setTransform(groupTransform, OCIO.Constants.COLORSPACE_DIR_TO_REFERENCE) config.addColorSpace(cs) ## COLOR PICKER ######################################################################## cs = OCIO.ColorSpace(family='cp',name='cpf') cs.setDescription("cpf :video like conversion used for color picking ") cs.setBitDepth(OCIO.Constants.BIT_DEPTH_F32) t=OCIO.FileTransform(src='cpf.spi1d',interpolation=OCIO.Constants.INTERP_LINEAR ) cs.setTransform(t, OCIO.Constants.COLORSPACE_DIR_TO_REFERENCE) config.addColorSpace(cs) ## DATA ######################################################################## cs = OCIO.ColorSpace(family='nc',name='nc8') cs.setDescription("nc8 :nc,Non-color used to store non-color data such as depth or surface normals") cs.setBitDepth(OCIO.Constants.BIT_DEPTH_UINT8) cs.setIsData(True) config.addColorSpace(cs) cs = OCIO.ColorSpace(family='nc',name='nc10') cs.setDescription("nc10 :nc,Non-color used to store non-color data such as depth or surface normals") cs.setBitDepth(OCIO.Constants.BIT_DEPTH_UINT10) cs.setIsData(True) config.addColorSpace(cs) cs = OCIO.ColorSpace(family='nc',name='nc16') cs.setDescription("nc16 :nc,Non-color used to store non-color data such as depth or surface normals") cs.setBitDepth(OCIO.Constants.BIT_DEPTH_UINT16) cs.setIsData(True) config.addColorSpace(cs) cs = OCIO.ColorSpace(family='nc',name='ncf') cs.setDescription("ncf :nc,Non-color used to store non-color data such as depth or surface normals") cs.setBitDepth(OCIO.Constants.BIT_DEPTH_F32) cs.setIsData(True) config.addColorSpace(cs) ## DISPLAY SPACES ################################################################## cs = OCIO.ColorSpace(family='srgb',name='srgb8') cs.setDescription("srgb8 :rgb display space for the srgb standard.") cs.setBitDepth(OCIO.Constants.BIT_DEPTH_UINT8) groupTransform = OCIO.GroupTransform() groupTransform.push_back(OCIO.ColorSpaceTransform(src='lnf', dst='lg10')) groupTransform.push_back(OCIO.FileTransform('spi_ocio_srgb_test.spi3d',interpolation=OCIO.Constants.INTERP_LINEAR)) cs.setTransform(groupTransform, OCIO.Constants.COLORSPACE_DIR_FROM_REFERENCE) config.addColorSpace(cs) cs = OCIO.ColorSpace(family='p3dci',name='p3dci8') cs.setDescription("p3dci8 :rgb display space for gamma 2.6 P3 projection.") cs.setBitDepth(OCIO.Constants.BIT_DEPTH_UINT8) groupTransform = OCIO.GroupTransform() groupTransform.push_back(OCIO.ColorSpaceTransform(src='lnf', dst='lg10')) groupTransform.push_back( OCIO.FileTransform('colorworks_filmlg_to_p3.3dl',interpolation=OCIO.Constants.INTERP_LINEAR)) cs.setTransform(groupTransform, OCIO.Constants.COLORSPACE_DIR_FROM_REFERENCE) config.addColorSpace(cs) cs = OCIO.ColorSpace(family='xyz',name='xyz16') cs.setDescription("xyz16 :Conversion for DCP creation.") cs.setBitDepth(OCIO.Constants.BIT_DEPTH_UINT16) groupTransform = OCIO.GroupTransform() groupTransform.push_back(OCIO.ColorSpaceTransform(src='lnf', dst='p3dci8')) groupTransform.push_back(OCIO.ExponentTransform([2.6,2.6,2.6,1.0])) groupTransform.push_back(OCIO.FileTransform('p3_to_xyz16_corrected_wp.spimtx')) groupTransform.push_back(OCIO.ExponentTransform([2.6,2.6,2.6,1.0],direction=OCIO.Constants.TRANSFORM_DIR_INVERSE)) cs.setTransform(groupTransform, OCIO.Constants.COLORSPACE_DIR_FROM_REFERENCE) config.addColorSpace(cs) ## DISPLAY SPACES ################################################################## for name,colorspace in [ ['Film','srgb8'], ['Log','lg10'], ['Raw','nc10']]: config.addDisplay('sRGB',name,colorspace) for name,colorspace in [ ['Film','p3dci8'], ['Log','lg10'], ['Raw','nc10']]: config.addDisplay('DCIP3',name,colorspace) config.setActiveViews(','.join(['Film','Log','Raw'])) config.setActiveDisplays(','.join(['sRGB','DCIP3'])) try: config.sanityCheck() except Exception,e: print e print "Configuration was not written due to a failed Sanity Check" sys.exit() else: f = file(outputfilename,"w") f.write(config.serialize()) f.close() print "Wrote",outputfilename
3,780
698
# Copyright 2022, <NAME> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np from fastapi import FastAPI from onnxruntime import GraphOptimizationLevel, InferenceSession, SessionOptions from transformers import AutoTokenizer, BatchEncoding, TensorType app = FastAPI() options = SessionOptions() options.graph_optimization_level = GraphOptimizationLevel.ORT_ENABLE_ALL model = InferenceSession("triton_models/model.onnx", options, providers=["CUDAExecutionProvider"]) tokenizer = AutoTokenizer.from_pretrained("philschmid/MiniLM-L6-H384-uncased-sst2") @app.get("/predict") def predict(query: str): encode_dict: BatchEncoding = tokenizer( text=query, max_length=128, truncation=True, return_tensors=TensorType.NUMPY, ) result: np.ndarray = model.run(None, dict(encode_dict))[0] return result.tolist()
449
1,192
<reponame>clayne/DirectXShaderCompiler // RUN: %clang_cc1 -std=c++11 -fsyntax-only -verify %s // expected-no-diagnostics namespace pr12262 { template<typename T, typename... Ts> void abc1(int (*xxx)[sizeof ... (Ts) + 1]); void qq1 () { abc1<int>(0); abc1<int,double>(0); } template <unsigned N> class array {}; template<typename T, typename... Types> array<sizeof...(Types)> make_array1(Types&&... args); void qq2 () { array<1> arr = make_array1<int>(1); array<3> arr2 = make_array1<int>(1,array<5>(),0.1); } template<typename T, typename... Types> int make_array(array<sizeof...(Types)>&, Types... args); void qq3 () { array<1> a1; int aa1 = make_array<int>(a1,1); array<2> a2; int aa2 = make_array<int>(a2, 0L, "abc"); } template<typename ... Ts> struct AAA { template<typename T, typename... Types> static array<sizeof...(Types)> make_array(Types ... args); }; void qq4 () { array<2> arr2 = AAA<int, int>::make_array<int>(1,2); } } namespace pr12439 { template<class... Members> struct X { template<int Idx> using get_t = decltype(sizeof...(Members)); template<int i> get_t<i> get(); }; template<class... Members> template<int i> X<Members...>::get_t<i> X<Members...>::get() { return 0; } } namespace pr13272 { template<bool B, class T = void> struct enable_if { }; template<class T> struct enable_if<true, T> { typedef T type; }; class Exception {}; template<class Ex, typename... Args> void cxx_throw(typename enable_if<(sizeof...(Args) > 0), const char *>::type fmt, Args&&... args) { return; } void test() { cxx_throw<Exception>("Youpi",1); } } namespace pr13817 { template <unsigned> struct zod; template <> struct zod<1> {}; template <typename T, typename ... Ts> zod<sizeof...(Ts)> make_zod(Ts ...) { return zod<sizeof...(Ts)>(); } int main(int argc, char *argv[]) { make_zod<int>(1); return 0; } } namespace pr14273 { template<typename T, int i> struct myType { }; template<typename T, typename... Args> struct Counter { static const int count = 1 + Counter<Args...>::count; }; template<typename T> struct Counter<T> { static const int count = 1; }; template<typename Arg, typename... Args> myType<Arg, sizeof...(Args)>* make_array_with_type(const Args&... args) { return 0; } void func(void) { make_array_with_type<char>(1,2,3); } } namespace pr15112 { template<bool, typename _Tp = void> struct enable_if { }; template<typename _Tp> struct enable_if<true,_Tp> { typedef _Tp type; }; typedef __typeof__(sizeof(int)) size_t; template <size_t n, typename T, typename... Args> struct is_array_of { static const bool value = true; }; struct cpu { using value_type = void; }; template <size_t Order, typename T> struct coords_alias { typedef T type; }; template <size_t Order, typename MemoryTag> using coords = typename coords_alias<Order, MemoryTag>::type; template <typename MemTag, typename... Args> typename enable_if<is_array_of<sizeof...(Args), size_t, Args...>::value, coords<sizeof...(Args), MemTag>>::type mkcoords(Args... args); auto c1 = mkcoords<cpu>(0ul, 0ul, 0ul); } namespace pr12699 { template<bool B> struct bool_constant { static const bool value = B; }; template<typename... A> struct F { template<typename... B> using SameSize = bool_constant<sizeof...(A) == sizeof...(B)>; template<typename... B, typename = SameSize<B...>> F(B...) { } }; void func() { F<int> f1(3); } }
1,426
1,742
<reponame>fchapoton/sage from .debug import getattr_debug, type_debug from .getattr import raw_getattr
36
762
<gh_stars>100-1000 /*! * Copyright (c) 2019 by Contributors if not otherwise specified * \file sequential_sampler.h * \brief Sequential sampler, with fixed reading order */ #ifndef DECORD_SAMPLER_SEQUENTIAL_SAMPLER_H_ #define DECORD_SAMPLER_SEQUENTIAL_SAMPLER_H_ #include "sampler_interface.h" namespace decord { namespace sampler { class SequentialSampler : public SamplerInterface { public: SequentialSampler(std::vector<int64_t> lens, std::vector<int64_t> range, int bs, int interval, int skip); ~SequentialSampler() = default; void Reset(); bool HasNext() const; const Samples& Next(); size_t Size() const; private: size_t bs_; Samples samples_; size_t curr_; std::vector<Samples> visit_order_; }; // class SequentialSampler } // sampler } // decord #endif // DECORD_SAMPLER_SEQUENTIAL_SAMPLER_H_
374
575
// Copyright 2019 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "content/browser/scheduler/browser_io_thread_delegate.h" #include "base/message_loop/message_pump.h" #include "base/message_loop/message_pump_type.h" #include "base/task/sequence_manager/sequence_manager.h" #include "base/task/sequence_manager/task_queue.h" #include "base/task/task_executor.h" #include "base/task/task_observer.h" #include "content/browser/scheduler/browser_task_executor.h" #include "content/public/browser/browser_thread.h" namespace content { using ::base::sequence_manager::CreateUnboundSequenceManager; using ::base::sequence_manager::SequenceManager; using ::base::sequence_manager::TaskQueue; class BrowserIOThreadDelegate::TLSMultiplexer : public base::TaskObserver { public: TLSMultiplexer() = default; ~TLSMultiplexer() override = default; void SetIOTaskExecutor(base::TaskExecutor* io_task_executor) { io_task_executor_ = io_task_executor; } void WillProcessTask(const base::PendingTask& pending_task, bool was_blocked_or_low_priority) override { base::TaskExecutor* previous_executor = base::GetTaskExecutorForCurrentThread(); if (previous_executor) { previous_executors_.push_back(previous_executor); base::SetTaskExecutorForCurrentThread(nullptr); } base::SetTaskExecutorForCurrentThread(io_task_executor_); } void DidProcessTask(const base::PendingTask& pending_task) override { base::SetTaskExecutorForCurrentThread(nullptr); if (!previous_executors_.empty()) { base::SetTaskExecutorForCurrentThread(previous_executors_.back()); previous_executors_.pop_back(); } } base::TaskExecutor* io_task_executor_ = nullptr; std::vector<base::TaskExecutor*> previous_executors_; }; BrowserIOThreadDelegate::BrowserIOThreadDelegate() : owned_sequence_manager_(CreateUnboundSequenceManager( SequenceManager::Settings::Builder() .SetMessagePumpType(base::MessagePumpType::IO) .Build())), sequence_manager_(owned_sequence_manager_.get()) { Init(); } BrowserIOThreadDelegate::BrowserIOThreadDelegate( SequenceManager* sequence_manager) : sequence_manager_(sequence_manager), tls_multiplexer_(std::make_unique<TLSMultiplexer>()) { sequence_manager_->AddTaskObserver(tls_multiplexer_.get()); Init(); } void BrowserIOThreadDelegate::Init() { task_queues_ = std::make_unique<BrowserTaskQueues>( BrowserThread::IO, sequence_manager_, sequence_manager_->GetRealTimeDomain()); default_task_runner_ = task_queues_->GetHandle()->GetDefaultTaskRunner(); } void BrowserIOThreadDelegate::SetTaskExecutor( base::TaskExecutor* task_executor) { if (tls_multiplexer_) { tls_multiplexer_->SetIOTaskExecutor(task_executor); } else { task_executor_ = task_executor; } } scoped_refptr<base::SingleThreadTaskRunner> BrowserIOThreadDelegate::GetDefaultTaskRunner() { return default_task_runner_; } BrowserIOThreadDelegate::~BrowserIOThreadDelegate() { if (task_executor_) { base::SetTaskExecutorForCurrentThread(nullptr); } if (tls_multiplexer_) { sequence_manager_->RemoveTaskObserver(tls_multiplexer_.get()); } } void BrowserIOThreadDelegate::BindToCurrentThread( base::TimerSlack timer_slack) { DCHECK(sequence_manager_); sequence_manager_->BindToMessagePump( base::MessagePump::Create(base::MessagePumpType::IO)); sequence_manager_->SetTimerSlack(timer_slack); sequence_manager_->SetDefaultTaskRunner(GetDefaultTaskRunner()); sequence_manager_->EnableCrashKeys("io_scheduler_async_stack"); if (task_executor_) { base::SetTaskExecutorForCurrentThread(task_executor_); } } } // namespace content
1,359
784
/** * Copyright (c) 2015-2021, <NAME> 杨福海 (<EMAIL>). * <p> * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * <p> * http://www.apache.org/licenses/LICENSE-2.0 * <p> * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.jboot.components.cache.caredis; import com.github.benmanes.caffeine.cache.Cache; import com.github.benmanes.caffeine.cache.Caffeine; import com.jfinal.plugin.ehcache.IDataLoader; import io.jboot.Jboot; import io.jboot.components.cache.JbootCacheBase; import io.jboot.components.cache.JbootCacheConfig; import io.jboot.components.cache.caffeine.CaffeineCacheImpl; import io.jboot.components.cache.redis.JbootRedisCacheImpl; import io.jboot.components.serializer.JbootSerializer; import io.jboot.support.redis.JbootRedis; import io.jboot.utils.StrUtil; import redis.clients.jedis.BinaryJedisPubSub; import java.util.ArrayList; import java.util.List; import java.util.concurrent.TimeUnit; /** * 基于 caffeine和redis做的二级缓存 * 优点是:减少高并发下redis的io瓶颈 */ public class JbootCaredisCacheImpl extends JbootCacheBase { public static final String DEFAULT_NOTIFY_CHANNEL = "jboot_caredis_channel"; private CaffeineCacheImpl caffeineCacheImpl; private JbootRedisCacheImpl redisCacheImpl; private JbootRedis redis; private JbootSerializer serializer; private String channel = DEFAULT_NOTIFY_CHANNEL; private String clientId; private Cache<String, List> keysCache = Caffeine.newBuilder() .expireAfterAccess(10, TimeUnit.MINUTES) .expireAfterWrite(10, TimeUnit.MINUTES) .build(); public JbootCaredisCacheImpl(JbootCacheConfig config) { super(config); this.caffeineCacheImpl = new CaffeineCacheImpl(config); this.redisCacheImpl = new JbootRedisCacheImpl(config); this.clientId = StrUtil.uuid(); this.serializer = Jboot.getSerializer(); this.redis = redisCacheImpl.getRedis(); this.redis.subscribe(new BinaryJedisPubSub() { @Override public void onMessage(byte[] channel, byte[] message) { JbootCaredisCacheImpl.this.onMessage((String) serializer.deserialize(channel), serializer.deserialize(message)); } }, serializer.serialize(channel)); } @Override public <T> T get(String cacheName, Object key) { T value = caffeineCacheImpl.get(cacheName, key); if (value == null) { value = redisCacheImpl.get(cacheName, key); if (value != null) { Integer ttl = redisCacheImpl.getTtl(cacheName, key); if (ttl != null && ttl > 0) { caffeineCacheImpl.put(cacheName, key, value, ttl); } else { caffeineCacheImpl.put(cacheName, key, value); } } } return value; } @Override public void put(String cacheName, Object key, Object value) { try { caffeineCacheImpl.put(cacheName, key, value); redisCacheImpl.put(cacheName, key, value); } finally { publishMessage(JbootCaredisMessage.ACTION_PUT, cacheName, key); } } @Override public void put(String cacheName, Object key, Object value, int liveSeconds) { if (liveSeconds <= 0) { put(cacheName, key, value); return; } try { caffeineCacheImpl.put(cacheName, key, value, liveSeconds); redisCacheImpl.put(cacheName, key, value, liveSeconds); } finally { publishMessage(JbootCaredisMessage.ACTION_PUT, cacheName, key); } } @Override public void remove(String cacheName, Object key) { try { caffeineCacheImpl.remove(cacheName, key); redisCacheImpl.remove(cacheName, key); } finally { publishMessage(JbootCaredisMessage.ACTION_REMOVE, cacheName, key); } } @Override public void removeAll(String cacheName) { try { caffeineCacheImpl.removeAll(cacheName); redisCacheImpl.removeAll(cacheName); } finally { publishMessage(JbootCaredisMessage.ACTION_REMOVE_ALL, cacheName, null); } } @Override public <T> T get(String cacheName, Object key, IDataLoader dataLoader) { T value = get(cacheName, key); if (value != null) { return value; } value = (T) dataLoader.load(); if (value != null) { put(cacheName, key, value); } return value; } @Override public <T> T get(String cacheName, Object key, IDataLoader dataLoader, int liveSeconds) { if (liveSeconds <= 0) { return get(cacheName, key, dataLoader); } T value = get(cacheName, key); if (value != null) { return value; } value = (T) dataLoader.load(); if (value != null) { put(cacheName, key, value, liveSeconds); } return value; } @Override public Integer getTtl(String cacheName, Object key) { Integer ttl = caffeineCacheImpl.getTtl(cacheName, key); if (ttl == null) { ttl = redisCacheImpl.getTtl(cacheName, key); } return ttl; } @Override public void setTtl(String cacheName, Object key, int seconds) { try { caffeineCacheImpl.setTtl(cacheName, key, seconds); redisCacheImpl.setTtl(cacheName, key, seconds); } finally { publishMessage(JbootCaredisMessage.ACTION_REMOVE, cacheName, key); } } @Override public void refresh(String cacheName, Object key) { publishMessage(JbootCaredisMessage.ACTION_REMOVE, cacheName, key); } @Override public void refresh(String cacheName) { publishMessage(JbootCaredisMessage.ACTION_REMOVE_ALL, cacheName, null); } @Override public List getNames() { return redisCacheImpl.getNames(); } @Override public List getKeys(String cacheName) { List list = keysCache.getIfPresent(cacheName); if (list != null) { return list; } list = redisCacheImpl.getKeys(cacheName); if (list == null) { list = new ArrayList(); } keysCache.put(cacheName, list); return list; } private void publishMessage(int action, String cacheName, Object key) { clearKeysCache(cacheName); JbootCaredisMessage message = new JbootCaredisMessage(clientId, action, cacheName, key); redis.publish(serializer.serialize(channel), serializer.serialize(message)); } private void clearKeysCache(String cacheName) { keysCache.invalidate(cacheName); } public void onMessage(String channel, Object obj) { JbootCaredisMessage message = (JbootCaredisMessage) obj; //不处理自己发送的消息 if (clientId.equals(message.getClientId())) { return; } clearKeysCache(message.getCacheName()); switch (message.getAction()) { case JbootCaredisMessage.ACTION_PUT: case JbootCaredisMessage.ACTION_REMOVE: caffeineCacheImpl.remove(message.getCacheName(), message.getKey()); break; case JbootCaredisMessage.ACTION_REMOVE_ALL: caffeineCacheImpl.removeAll(message.getCacheName()); break; } } public CaffeineCacheImpl getCaffeineCacheImpl() { return caffeineCacheImpl; } public JbootRedisCacheImpl getRedisCacheImpl() { return redisCacheImpl; } }
3,513
14,425
<gh_stars>1000+ /** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.api.records.impl.pb; import org.apache.hadoop.yarn.api.records.NodeAttributeKey; import org.apache.hadoop.yarn.api.records.NodeAttributeInfo; import org.apache.hadoop.yarn.api.records.NodeAttributeType; import org.apache.hadoop.yarn.proto.YarnProtos.NodeAttributeKeyProto; import org.apache.hadoop.yarn.proto.YarnProtos.NodeAttributeInfoProto; import org.apache.hadoop.yarn.proto.YarnProtos.NodeAttributeInfoProtoOrBuilder; import org.apache.hadoop.yarn.proto.YarnProtos.NodeAttributeTypeProto; /** * Implementation for NodeAttributeInfo. * */ public class NodeAttributeInfoPBImpl extends NodeAttributeInfo { private NodeAttributeInfoProto proto = NodeAttributeInfoProto.getDefaultInstance(); private NodeAttributeInfoProto.Builder builder = null; private boolean viaProto = false; public NodeAttributeInfoPBImpl() { builder = NodeAttributeInfoProto.newBuilder(); } public NodeAttributeInfoPBImpl(NodeAttributeInfoProto proto) { this.proto = proto; viaProto = true; } public NodeAttributeInfoProto getProto() { proto = viaProto ? proto : builder.build(); viaProto = true; return proto; } private void maybeInitBuilder() { if (viaProto || builder == null) { builder = NodeAttributeInfoProto.newBuilder(proto); } viaProto = false; } @Override public NodeAttributeKey getAttributeKey() { NodeAttributeInfoProtoOrBuilder p = viaProto ? proto : builder; if (!p.hasAttributeKey()) { return null; } return convertFromProtoFormat(p.getAttributeKey()); } @Override public void setAttributeKey(NodeAttributeKey attributeKey) { maybeInitBuilder(); if (attributeKey == null) { builder.clearAttributeKey(); return; } builder.setAttributeKey(convertToProtoFormat(attributeKey)); } @Override public NodeAttributeType getAttributeType() { NodeAttributeInfoProtoOrBuilder p = viaProto ? proto : builder; if (!p.hasAttributeType()) { return null; } return convertFromProtoFormat(p.getAttributeType()); } @Override public void setAttributeType(NodeAttributeType attributeType) { maybeInitBuilder(); if (attributeType == null) { builder.clearAttributeType(); return; } builder.setAttributeType(convertToProtoFormat(attributeType)); } private NodeAttributeTypeProto convertToProtoFormat( NodeAttributeType attributeType) { return NodeAttributeTypeProto.valueOf(attributeType.name()); } private NodeAttributeType convertFromProtoFormat( NodeAttributeTypeProto containerState) { return NodeAttributeType.valueOf(containerState.name()); } private NodeAttributeKeyPBImpl convertFromProtoFormat( NodeAttributeKeyProto attributeKeyProto) { return new NodeAttributeKeyPBImpl(attributeKeyProto); } private NodeAttributeKeyProto convertToProtoFormat( NodeAttributeKey attributeKey) { return ((NodeAttributeKeyPBImpl) attributeKey).getProto(); } @Override public int hashCode() { return getAttributeKey().hashCode(); } @Override public boolean equals(Object obj) { if (this == obj) { return true; } if (obj == null) { return false; } if (obj instanceof NodeAttributeInfo) { NodeAttributeInfo other = (NodeAttributeInfo) obj; return getAttributeKey().equals(other.getAttributeKey()); } return false; } @Override public String toString() { StringBuilder strBuilder = new StringBuilder(); NodeAttributeKey key = this.getAttributeKey(); strBuilder.append(key.getAttributePrefix()).append("/") .append(key.getAttributeName()).append("(") .append(this.getAttributeType()).append(")"); return strBuilder.toString(); } }
1,487
810
<filename>src/main/java/io/github/spencerpark/ijava/magics/ClasspathMagics.java<gh_stars>100-1000 package io.github.spencerpark.ijava.magics; import io.github.spencerpark.jupyter.kernel.magic.registry.LineMagic; import io.github.spencerpark.jupyter.kernel.util.GlobFinder; import java.io.IOException; import java.util.List; import java.util.function.Consumer; import java.util.stream.Collectors; import java.util.stream.StreamSupport; public class ClasspathMagics { private final Consumer<String> addToClasspath; public ClasspathMagics(Consumer<String> addToClasspath) { this.addToClasspath = addToClasspath; } @LineMagic public List<String> jars(List<String> args) { List<String> jars = args.stream() .map(GlobFinder::new) .flatMap(g -> { try { return StreamSupport.stream(g.computeMatchingFiles().spliterator(), false); } catch (IOException e) { throw new RuntimeException("Exception resolving jar glob", e); } }) .map(p -> p.toAbsolutePath().toString()) .collect(Collectors.toList()); jars.forEach(this.addToClasspath); return jars; } @LineMagic public List<String> classpath(List<String> args) { List<String> paths = args.stream() .map(GlobFinder::new) .flatMap(g -> { try { return StreamSupport.stream(g.computeMatchingPaths().spliterator(), false); } catch (IOException e) { throw new RuntimeException("Exception resolving jar glob", e); } }) .map(p -> p.toAbsolutePath().toString()) .collect(Collectors.toList()); paths.forEach(this.addToClasspath); return paths; } }
929
835
<gh_stars>100-1000 package ai.verta.modeldb.entities.config; import ai.verta.modeldb.common.exceptions.ModelDBException; import ai.verta.modeldb.versioning.ContinuousHyperparameterSetConfigBlob; import ai.verta.modeldb.versioning.DiscreteHyperparameterSetConfigBlob; import ai.verta.modeldb.versioning.HyperparameterSetConfigBlob; import ai.verta.modeldb.versioning.HyperparameterSetConfigBlob.ValueCase; import ai.verta.modeldb.versioning.HyperparameterValuesConfigBlob; import io.grpc.Status.Code; import java.io.Serializable; import java.util.Collection; import java.util.HashSet; import java.util.List; import java.util.Set; import java.util.stream.Collectors; import javax.persistence.CascadeType; import javax.persistence.Column; import javax.persistence.Entity; import javax.persistence.Id; import javax.persistence.JoinColumn; import javax.persistence.JoinTable; import javax.persistence.ManyToMany; import javax.persistence.ManyToOne; import javax.persistence.Table; @Entity @Table(name = "hyperparameter_set_config_blob") public class HyperparameterSetConfigBlobEntity implements Serializable { private HyperparameterSetConfigBlobEntity() {} public HyperparameterSetConfigBlobEntity( String blobHash, HyperparameterSetConfigBlob hyperparameterSetConfigBlob) throws ModelDBException { this.blob_hash = blobHash; this.name = hyperparameterSetConfigBlob.getName(); this.value_type = hyperparameterSetConfigBlob.getValueCase().getNumber(); } @Id @Column(name = "blob_hash", columnDefinition = "varchar", length = 64, nullable = false) private String blob_hash; @Column(name = "name", columnDefinition = "varchar") private String name; @Column(name = "value_type") private Integer value_type; @ManyToOne(cascade = CascadeType.ALL) @JoinColumn(name = "interval_begin_hash") private HyperparameterElementConfigBlobEntity interval_begin_hash; @ManyToOne(cascade = CascadeType.ALL) @JoinColumn(name = "interval_end_hash") private HyperparameterElementConfigBlobEntity interval_end_hash; @ManyToOne(cascade = CascadeType.ALL) @JoinColumn(name = "interval_step_hash") private HyperparameterElementConfigBlobEntity interval_step_hash; @ManyToMany @JoinTable( name = "hyperparameter_discrete_set_element_mapping", joinColumns = {@JoinColumn(name = "set_hash")}, inverseJoinColumns = {@JoinColumn(name = "element_hash")}) private Set<HyperparameterElementConfigBlobEntity> hyperparameterSetConfigElementMapping = new HashSet<>(); public void setInterval_begin_hash(HyperparameterElementConfigBlobEntity interval_begin_hash) { this.interval_begin_hash = interval_begin_hash; } public void setInterval_end_hash(HyperparameterElementConfigBlobEntity interval_end_hash) { this.interval_end_hash = interval_end_hash; } public void setInterval_step_hash(HyperparameterElementConfigBlobEntity interval_step_hash) { this.interval_step_hash = interval_step_hash; } public void setHyperparameterSetConfigElementMapping( Collection<HyperparameterElementConfigBlobEntity> hyperparameterSetConfigElementMapping) { this.hyperparameterSetConfigElementMapping.addAll(hyperparameterSetConfigElementMapping); } public HyperparameterSetConfigBlob toProto() throws ModelDBException { var builder = HyperparameterSetConfigBlob.newBuilder().setName(this.name); var valueCase = ValueCase.forNumber(this.value_type); if (valueCase == null) { throw new ModelDBException( "Invalid value found for HyperparameterSetConfigBlob", Code.INVALID_ARGUMENT); } switch (valueCase) { case CONTINUOUS: builder.setContinuous( ContinuousHyperparameterSetConfigBlob.newBuilder() .setIntervalBegin(this.interval_begin_hash.toProto()) .setIntervalEnd(this.interval_end_hash.toProto()) .setIntervalStep(this.interval_step_hash.toProto()) .build()); break; case DISCRETE: List<HyperparameterValuesConfigBlob> valueSetConfigBlob = this.hyperparameterSetConfigElementMapping.stream() .map(HyperparameterElementConfigBlobEntity::toProto) .collect(Collectors.toList()); DiscreteHyperparameterSetConfigBlob hyperparameterSetConfigBlob = DiscreteHyperparameterSetConfigBlob.newBuilder() .addAllValues(valueSetConfigBlob) .build(); builder.setDiscrete(hyperparameterSetConfigBlob); break; case VALUE_NOT_SET: default: throw new ModelDBException( "Invalid value found for HyperparameterSetConfigBlob", Code.INVALID_ARGUMENT); } return builder.build(); } public String getBlobHash() { return blob_hash; } }
1,743
854
__________________________________________________________________________________________________ sample 68 ms submission # Definition for a binary tree node. # class TreeNode: # def __init__(self, x): # self.val = x # self.left = None # self.right = None class Solution: def searchBST(self, root: TreeNode, val: int) -> TreeNode: if val == root.val: return root if val < root.val: if root.left: return self.searchBST(root.left, val) return None if root.right: return self.searchBST(root.right, val) return None __________________________________________________________________________________________________ sample 15128 kb submission # Definition for a binary tree node. # class TreeNode: # def __init__(self, x): # self.val = x # self.left = None # self.right = None class Solution: def searchBST(self, root: TreeNode, val: int) -> TreeNode: if not root: return [] trav = root while trav: if trav.val == val: return trav elif trav.val < val: trav = trav.right else: trav = trav.left __________________________________________________________________________________________________
580
313
<reponame>hz-ants/LabelFusion-docker2- """ This class implements some data augmentation techniques for SegNet training TODO: do resizing at the same time as augmentation TODO: store labels and images in separate directories so you dont have to do this before putting the data trhough Keras data augmentation Keras library trasnfromations can do all the data transformations at once! More info at - https://keras.io/preprocessing/image/ https://blog.keras.io/building-powerful-image-classification-models-using-very-little-data.html dependancies= PIL, TensorFlow, Keras seems its also possible to do augmentation on the fly with custom Caffe Training data layer- have not tested it though https://github.com/NVIDIA/DIGITS/issues/1034 Other basic PIL library transformations to try 1.combinatorial cropping 2.random cropping 3.horizontalflipping 4.rotations 5.color jittering 6.lighting noise """ from labelfusion import utils as LabelFusionUtils import os import random import keras from keras.preprocessing.image import ImageDataGenerator import director.vtkAll as vtk from director import filterUtils from PIL import Image, ImageFilter import glob, os, shutil import itertools class DataAugmentation(object): def __init__(self, img_target_size = (256, 256), params = dict(rotation_range=90., width_shift_range=0.1, height_shift_range=0.1, zoom_range=0.2)): self.target_size = img_target_size self.data_gen_args = params def augmentWithKeras(self,log_folder): d = LabelFusionUtils.getFilenames(log_folder) path_to_img = d["images"] labels = glob.iglob(os.path.join(path_to_img, "*labels.png")) images = glob.iglob(os.path.join(path_to_img, "*rgb.png")) #create two directories for generators lab_dir = "labels_tmp/" img_dir = "images_tmp/" lab_dir_full_path = lab_dir+"images" img_dir_full_path = img_dir + "images" os.makedirs(lab_dir_full_path) os.makedirs(img_dir_full_path) for label, pic in itertools.izip(labels,images): shutil.copy2(label, lab_dir_full_path) shutil.copy2(pic, img_dir_full_path) self.generateAugmentedImages(img_dir,lab_dir,path_to_img) #note overwrites aug-prefixed images in folder shutil.rmtree(lab_dir) shutil.rmtree(img_dir) def generateAugmentedImages(self,images_dir, labels_dir, save_dir): image_datagen = ImageDataGenerator(**self.data_gen_args) mask_datagen = ImageDataGenerator(**self.data_gen_args) seed = 1 # image_datagen.fit(images, augment=True, seed=seed) # mask_datagen.fit(masks, augment=True, seed=seed) """ next 2 blocks of code are typically used as generator for model training in keras. Instead you can hack the generator by iterating over batches and using the save_to_dir param to save augmented_images. """ i = 0 num_batches = 20 for batch in image_datagen.flow_from_directory( images_dir, class_mode=None, seed=seed, save_to_dir = save_dir, save_format = "png", save_prefix = "aug_images", target_size = self.target_size): i+=1 if i>num_batches: break i = 0 for batch in mask_datagen.flow_from_directory( labels_dir, class_mode=None, seed=seed, save_to_dir = save_dir, save_format = "png", save_prefix = "aug_label", target_size = self.target_size): i+=1 if i>num_batches: break """ io functions and simple PIL transformations for augmentations def saveImage(self, labeled_image, rgb_image): rgbFilename,labelFilename = generateFileNames() print 'writing:', rgbFilename rbg_image.save(rgbFilename, 'png') labeled_image.save(labelFilename, 'png') # now save the utime print 'writing:', utimeFilename text_file = open(utimeFilename, "w") text_file.write(str(utime)) text_file.close() def generateFileNames(): baseFilename = LabelFusionUtils.convertImageIDToPaddedString(self.counter) + "_" baseFilename = os.path.join(self.fileSaveLocation, baseFilename) rgbFilename = baseFilename + "rbg.png" labelFilename = baseFilename + "label.png" utimeFilename = baseFilename + "utime.txt" self.counter += 1 return (rgbFilename,labelFilename) def rotate(image,degrees): return image.transpose(degrees) def blur(image, blur_param): #might need to refine edge sfor pixelwise labeling part return image.filter(ImageFilter.GassianBlur(blur_param)) def randomCrop(labeled, rgb, image, width, height): im_width, im_height = im.size #cropping logic return (labeled.resize((im_width,im_height)),rgb.resize((im_width,im_height))) """
2,193
535
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ #include "os/mynewt.h" #include <oic/oc_api.h> #include <cborattr/cborattr.h> #include "test_oic.h" static int test_observe_state; static volatile int test_observe_done; static struct oc_resource *test_res_observe; static void test_observe_next_step(struct os_event *); static struct os_event test_observe_next_ev = { .ev_cb = test_observe_next_step }; static void test_observe_get(struct oc_request *request, oc_interface_mask_t interface) { oc_rep_start_root_object(); switch (interface) { case OC_IF_BASELINE: oc_process_baseline_interface(request->resource); case OC_IF_R: switch (test_observe_state) { case 1: case 2: /* initial get request */ case 3: case 4: case 5: case 6: oc_rep_set_int(root, value, test_observe_state); break; default: break; } default: break; } oc_rep_end_root_object(); oc_send_response(request, OC_STATUS_OK); } static void test_observe_rsp(struct oc_client_response *rsp) { long long rsp_value; struct cbor_attr_t attrs[] = { [0] = { .attribute = "state", .type = CborAttrIntegerType, .addr.integer = &rsp_value, .dflt.integer = 0 }, [1] = { } }; struct os_mbuf *m; uint16_t data_off; int len; switch (test_observe_state) { case 1: TEST_ASSERT(rsp->code == OC_STATUS_NOT_FOUND); break; case 2: case 3: case 4: TEST_ASSERT(rsp->code == OC_STATUS_OK); len = coap_get_payload(rsp->packet, &m, &data_off); if (cbor_read_mbuf_attrs(m, data_off, len, attrs)) { TEST_ASSERT(rsp_value == test_observe_state); } break; default: break; } os_eventq_put(os_eventq_dflt_get(), &test_observe_next_ev); } static void test_observe_next_step(struct os_event *ev) { bool b_rc; int rc; struct oc_server_handle server; test_observe_state++; switch (test_observe_state) { case 1: test_res_observe = oc_new_resource("/observe", 1, 0); TEST_ASSERT_FATAL(test_res_observe); oc_resource_bind_resource_interface(test_res_observe, OC_IF_R); oc_resource_set_default_interface(test_res_observe, OC_IF_R); oc_resource_set_observable(test_res_observe); oc_resource_set_request_handler(test_res_observe, OC_GET, test_observe_get); b_rc = oc_add_resource(test_res_observe); TEST_ASSERT(b_rc == true); /* * Observe nonexistent URI */ oic_test_get_endpoint(&server); b_rc = oc_do_observe("/observe_wrong", &server, NULL, test_observe_rsp, LOW_QOS); TEST_ASSERT_FATAL(b_rc == true); oic_test_reset_tmo("observe1"); break; case 2: oic_test_get_endpoint(&server); b_rc = oc_do_observe("/observe", &server, NULL, test_observe_rsp, LOW_QOS); TEST_ASSERT_FATAL(b_rc == true); oic_test_reset_tmo("observe2"); break; case 3: case 4: /* * Valid notifications */ rc = oc_notify_observers(test_res_observe); TEST_ASSERT(rc == 1); /* one observer */ oic_test_reset_tmo("observe3-4"); break; case 5: test_observe_done = 1; break; default: TEST_ASSERT_FATAL(0); break; } } void test_observe(void) { os_eventq_put(os_eventq_dflt_get(), &test_observe_next_ev); while (!test_observe_done) ; oc_delete_resource(test_res_observe); }
2,159
1,247
<gh_stars>1000+ { "author": "Microsoft", "name": "Arquivo de manifesto da ferramenta local\u00A0Dotnet", "description": "O arquivo que define as ferramentas dotnet\u00A0disponíveis." }
74
577
/* * Copyright (c) 2016 咖枯 <<EMAIL> | <EMAIL>> * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.kaku.weac.activities; import android.content.ClipData; import android.content.ClipboardManager; import android.content.Context; import android.content.Intent; import android.net.Uri; import android.os.Bundle; import android.view.View; import android.widget.Button; import android.widget.ImageView; import android.widget.LinearLayout; import android.widget.TextView; import com.kaku.weac.R; import com.kaku.weac.util.MyUtil; import com.kaku.weac.util.ToastUtil; import com.kaku.weac.zxing.activity.CaptureActivity; /** * 显示扫描结果 * * @author 咖枯 * @version 1.0 2016/1/24 */ public class DisplayScanResultActivity extends BaseActivity implements View.OnClickListener { private String mScanResult; @Override protected void onCreate(Bundle savedInstanceState) { super.onCreate(savedInstanceState); setContentView(R.layout.activity_display_scan_result); LinearLayout backGround = (LinearLayout) findViewById(R.id.background); MyUtil.setBackground(backGround, this); assignViews(); } private void assignViews() { ImageView actionBack = (ImageView) findViewById(R.id.action_back); actionBack.setOnClickListener(this); TextView scanResultContentTv = (TextView) findViewById(R.id.scan_result_content_tv); mScanResult = getIntent().getStringExtra(CaptureActivity.SCAN_RESULT); int type = getIntent().getIntExtra(CaptureActivity.SCAN_TYPE, 0); // 二维码 if (type == 0) { scanResultContentTv.setText(mScanResult); } else { // 条形码 scanResultContentTv.setText(getString(R.string.bar_code, mScanResult)); } Button copyBtn = (Button) findViewById(R.id.copy_btn); copyBtn.setOnClickListener(this); Button searchBtn = (Button) findViewById(R.id.search_btn); searchBtn.setOnClickListener(this); } @Override public void onClick(View v) { switch (v.getId()) { case R.id.action_back: finish(); break; case R.id.copy_btn: ClipboardManager clipboardManager = (ClipboardManager) getSystemService( Context.CLIPBOARD_SERVICE); // 将文本复制到剪贴板 clipboardManager.setPrimaryClip(ClipData.newPlainText("data", mScanResult)); ToastUtil.showShortToast(this, getString(R.string.text_already_copied)); break; case R.id.search_btn: // TODO: 自定义浏览器webview Intent intent = new Intent(); intent.setAction("android.intent.action.VIEW"); if (intent.resolveActivity(getPackageManager()) != null) { Uri uri = Uri.parse("http://www.baidu.com/s?wd=" + mScanResult); intent.setData(uri); startActivity(intent); finish(); overridePendingTransition(0, 0); } else { ToastUtil.showLongToast(this, getString(R.string.no_browser)); } break; } } }
1,645
2,227
<filename>__fixtures__/linked-packages-with-linked-dev-dep/package.json<gh_stars>1000+ { "private": true, "name": "linked-packages-with-linked-dev-dep", "description": "linked projects where one of packages has a dev dependency on another", "version": "1.0.0", "bolt": { "workspaces": [ "packages/*" ] } }
124
567
<filename>todo/mail/consumers/__init__.py def tracker_consumer(**kwargs): def tracker_factory(producer): # the import needs to be delayed until call to enable # using the wrapper in the django settings from .tracker import tracker_consumer return tracker_consumer(producer, **kwargs) return tracker_factory
121
713
<gh_stars>100-1000 package org.infinispan.server.hotrod; import java.util.concurrent.TimeUnit; import org.infinispan.util.KeyValuePair; /** * @author wburns * @since 9.0 */ public enum TimeUnitValue { SECONDS(0x00), MILLISECONDS(0x01), NANOSECONDS(0x02), MICROSECONDS(0x03), MINUTES(0x04), HOURS(0x05), DAYS(0x06), DEFAULT(0x07), INFINITE(0x08); private final byte code; TimeUnitValue(int code) { this.code = (byte) code; } public byte getCode() { return code; } public TimeUnit toTimeUnit() { switch (code) { case 0x00: return TimeUnit.SECONDS; case 0x01: return TimeUnit.MILLISECONDS; case 0x02: return TimeUnit.NANOSECONDS; case 0x03: return TimeUnit.MICROSECONDS; case 0x04: return TimeUnit.MINUTES; case 0x05: return TimeUnit.HOURS; case 0x06: return TimeUnit.DAYS; default: throw new IllegalArgumentException("TimeUnit not supported for: " + code); } } public static TimeUnitValue fromTimeUnit(TimeUnit unit) { switch (unit) { case MICROSECONDS: return TimeUnitValue.MICROSECONDS; case MILLISECONDS: return TimeUnitValue.MILLISECONDS; case SECONDS: return TimeUnitValue.SECONDS; case MINUTES: return TimeUnitValue.MINUTES; case HOURS: return TimeUnitValue.HOURS; case DAYS: return TimeUnitValue.DAYS; default: throw new IllegalArgumentException(unit.name()); } } public static TimeUnitValue decode(byte rightBits) { switch (rightBits) { case 0x00: return SECONDS; case 0x01: return MILLISECONDS; case 0x02: return NANOSECONDS; case 0x03: return MICROSECONDS; case 0x04: return MINUTES; case 0x05: return HOURS; case 0x06: return DAYS; case 0x07: return DEFAULT; case 0x08: return INFINITE; default: throw new IllegalArgumentException("Unsupported byte value: " + rightBits); } } public static KeyValuePair<TimeUnitValue, TimeUnitValue> decodePair(byte timeUnitValues) { return new KeyValuePair<>(decode((byte) ((timeUnitValues & 0xf0) >> 4)), decode((byte) (timeUnitValues & 0x0f))); } private static byte encodeDuration(long duration, TimeUnit timeUnit) { return duration == 0 ? DEFAULT.code : duration < 0 ? INFINITE.code : fromTimeUnit(timeUnit).code; } public static byte encodeTimeUnits(long lifespan, TimeUnit lifespanTimeUnit, long maxIdle, TimeUnit maxIdleTimeUnit) { byte encodedLifespan = encodeDuration(lifespan, lifespanTimeUnit); byte encodedMaxIdle = encodeDuration(maxIdle, maxIdleTimeUnit); return (byte) (encodedLifespan << 4 | encodedMaxIdle); } }
1,463
2,338
//===-- Unittests for frexpl ----------------------------------------------===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// #include "FrexpTest.h" #include "src/math/frexpl.h" LIST_FREXP_TESTS(long double, __llvm_libc::frexpl)
141
14,425
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.security; import static org.apache.hadoop.security.LdapGroupsMapping.LDAP_CTX_FACTORY_CLASS_DEFAULT; import static org.apache.hadoop.security.LdapGroupsMapping.LDAP_CTX_FACTORY_CLASS_KEY; import static org.apache.hadoop.security.LdapGroupsMapping.LDAP_URL_KEY; import static org.junit.Assert.assertEquals; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; import javax.naming.Context; import javax.naming.NamingEnumeration; import javax.naming.NamingException; import javax.naming.directory.Attribute; import javax.naming.directory.Attributes; import javax.naming.directory.BasicAttribute; import javax.naming.directory.BasicAttributes; import javax.naming.directory.DirContext; import javax.naming.directory.SearchControls; import javax.naming.directory.SearchResult; import javax.naming.ldap.InitialLdapContext; import javax.naming.spi.InitialContextFactory; import org.apache.hadoop.conf.Configuration; import org.junit.Before; import org.mockito.Mock; import org.mockito.Mockito; import org.mockito.MockitoAnnotations; import org.mockito.Spy; import java.util.Hashtable; public class TestLdapGroupsMappingBase { @Mock private DirContext context; @Mock private NamingEnumeration<SearchResult> userNames; @Mock private NamingEnumeration<SearchResult> groupNames; @Mock private NamingEnumeration<SearchResult> parentGroupNames; @Mock private SearchResult userSearchResult; @Mock private Attributes attributes; @Spy private LdapGroupsMapping groupsMapping = new LdapGroupsMapping(); private String[] testGroups = new String[] {"group1", "group2"}; private String[] testParentGroups = new String[] {"group1", "group2", "group1_1"}; @Before public void setupMocksBase() throws NamingException { DummyLdapCtxFactory.reset(); MockitoAnnotations.initMocks(this); DirContext ctx = getContext(); when(ctx.search(Mockito.anyString(), Mockito.anyString(), Mockito.any(Object[].class), Mockito.any(SearchControls.class))). thenReturn(userNames); // We only ever call hasMoreElements once for the user NamingEnum, so // we can just have one return value when(userNames.hasMoreElements()).thenReturn(true); SearchResult groupSearchResult = mock(SearchResult.class); // We're going to have to define the loop here. We want two iterations, // to get both the groups when(groupNames.hasMoreElements()).thenReturn(true, true, false); when(groupNames.nextElement()).thenReturn(groupSearchResult); // Define the attribute for the name of the first group Attribute group1Attr = new BasicAttribute("cn"); group1Attr.add(testGroups[0]); Attributes group1Attrs = new BasicAttributes(); group1Attrs.put(group1Attr); // Define the attribute for the name of the second group Attribute group2Attr = new BasicAttribute("cn"); group2Attr.add(testGroups[1]); Attributes group2Attrs = new BasicAttributes(); group2Attrs.put(group2Attr); // This search result gets reused, so return group1, then group2 when(groupSearchResult.getAttributes()). thenReturn(group1Attrs, group2Attrs); when(getUserNames().nextElement()). thenReturn(getUserSearchResult()); when(getUserSearchResult().getAttributes()).thenReturn(getAttributes()); // Define results for groups 1 level up SearchResult parentGroupResult = mock(SearchResult.class); // only one parent group when(parentGroupNames.hasMoreElements()).thenReturn(true, false); when(parentGroupNames.nextElement()). thenReturn(parentGroupResult); // Define the attribute for the parent group Attribute parentGroup1Attr = new BasicAttribute("cn"); parentGroup1Attr.add(testParentGroups[2]); Attributes parentGroup1Attrs = new BasicAttributes(); parentGroup1Attrs.put(parentGroup1Attr); // attach the attributes to the result when(parentGroupResult.getAttributes()).thenReturn(parentGroup1Attrs); when(parentGroupResult.getNameInNamespace()). thenReturn("CN=some_group,DC=test,DC=com"); } protected Configuration getBaseConf() { return getBaseConf("ldap://test"); } protected Configuration getBaseConf(String ldapUrl) { return getBaseConf(ldapUrl, getContext()); } protected Configuration getBaseConf( String ldapUrl, DirContext contextToReturn) { DummyLdapCtxFactory.setContextToReturn(contextToReturn); DummyLdapCtxFactory.setExpectedLdapUrl(ldapUrl); Configuration conf = new Configuration(); conf.set(LDAP_URL_KEY, ldapUrl); conf.setClass(LDAP_CTX_FACTORY_CLASS_KEY, DummyLdapCtxFactory.class, InitialContextFactory.class); return conf; } protected DirContext getContext() { return context; } protected NamingEnumeration<SearchResult> getUserNames() { return userNames; } protected NamingEnumeration<SearchResult> getGroupNames() { return groupNames; } protected SearchResult getUserSearchResult() { return userSearchResult; } protected Attributes getAttributes() { return attributes; } protected LdapGroupsMapping getGroupsMapping() { return groupsMapping; } protected String[] getTestGroups() { return testGroups; } protected NamingEnumeration getParentGroupNames() { return parentGroupNames; } protected String[] getTestParentGroups() { return testParentGroups; } /** * Ldap Context Factory implementation to be used for testing to check * contexts are requested for the expected LDAP server URLs etc. */ public static class DummyLdapCtxFactory implements InitialContextFactory { private static DirContext contextToReturn; private static String expectedLdapUrl; private static String expectedBindUser; private static String expectedBindPassword; public DummyLdapCtxFactory() { } protected static void setContextToReturn(DirContext ctx) { contextToReturn = ctx; } protected static void setExpectedLdapUrl(String url) { expectedLdapUrl = url; } public static void setExpectedBindUser(String bindUser) { expectedBindUser = bindUser; } public static void setExpectedBindPassword(String bindPassword) { expectedBindPassword = <PASSWORD>Password; } public static void reset() { expectedLdapUrl = null; expectedBindUser = null; expectedBindPassword = null; } @Override public Context getInitialContext(Hashtable<?, ?> env) throws NamingException { if (expectedLdapUrl != null) { String actualLdapUrl = (String) env.get(Context.PROVIDER_URL); assertEquals(expectedLdapUrl, actualLdapUrl); } if (expectedBindUser != null) { String actualBindUser = (String) env.get(Context.SECURITY_PRINCIPAL); assertEquals(expectedBindUser, actualBindUser); } if (expectedBindPassword != null) { String actualBindPassword = (String) env.get( Context.SECURITY_CREDENTIALS); assertEquals(expectedBindPassword, actualBindPassword); } if (contextToReturn == null) { Hashtable<Object, Object> newEnv = new Hashtable<>(env); newEnv.put(Context.INITIAL_CONTEXT_FACTORY, LDAP_CTX_FACTORY_CLASS_DEFAULT); contextToReturn = new InitialLdapContext(newEnv, null); } return contextToReturn; } } }
2,784
704
<reponame>klen/muffin """Implement Muffin Application.""" import typing as t from types import ModuleType import logging import inspect from asgi_tools import App as BaseApp from modconfig import Config from . import CONFIG_ENV_VARIABLE from .utils import import_submodules class MuffinException(Exception): """Base class for Muffin Errors.""" pass class Application(BaseApp): """The Muffin Application.""" # Default configuration values defaults: t.Dict = dict( # The application's name NAME='muffin', # Path to configuration module CONFIG=None, # Enable debug mode (optional) DEBUG=False, # Routing options TRIM_LAST_SLASH=True, # Static files options STATIC_URL_PREFIX='/static', STATIC_FOLDERS=[], # Logging options LOG_LEVEL='WARNING', LOG_FORMAT='%(asctime)s [%(process)d] [%(levelname)s] %(message)s', LOG_DATE_FORMAT='[%Y-%m-%d %H:%M:%S]', LOG_CONFIG=None, ) def __init__(self, *cfg_mods: t.Union[str, ModuleType], **options): """Initialize the application. :param *cfg_mods: modules to import application's config :param **options: Configuration options """ from .plugins import BasePlugin self.plugins: t.Dict[str, BasePlugin] = dict() # Setup the configuration self.cfg = Config(**self.defaults, config_config=dict(update_from_env=False)) options['CONFIG'] = self.cfg.update_from_modules(*cfg_mods, 'env:%s' % CONFIG_ENV_VARIABLE) self.cfg.update(**options) self.cfg.update_from_env(prefix=f"{ self.cfg.name }_") # Setup CLI from .manage import Manager self.manage = Manager(self) # Setup logging LOG_CONFIG = self.cfg.get('LOG_CONFIG') if LOG_CONFIG and isinstance(LOG_CONFIG, dict) and LOG_CONFIG.get('version'): logging.config.dictConfig(LOG_CONFIG) # type: ignore self.logger = logging.getLogger('muffin') self.logger.setLevel(self.cfg.LOG_LEVEL) self.logger.propagate = False if not self.logger.handlers: ch = logging.StreamHandler() ch.setFormatter(logging.Formatter( self.cfg.LOG_FORMAT, self.cfg.LOG_DATE_FORMAT)) self.logger.addHandler(ch) super(Application, self).__init__( debug=self.cfg.DEBUG, logger=self.logger, trim_last_slash=self.cfg.TRIM_LAST_SLASH, static_folders=self.cfg.STATIC_FOLDERS, static_url_prefix=self.cfg.STATIC_URL_PREFIX, ) def __repr__(self) -> str: """Human readable representation.""" return f"<muffin.Application: { self.cfg.name }>" def import_submodules(self, *submodules: str) -> t.Dict[str, ModuleType]: """Import application components.""" parent_frame = inspect.stack()[1][0] package_name = parent_frame.f_locals['__name__'] return import_submodules(package_name, *submodules)
1,346
633
<gh_stars>100-1000 from models.basic.basic_model import BasicModel from models.encoders.mobilenet import MobileNet from layers.convolution import conv2d_transpose, conv2d import tensorflow as tf class FCN8sMobileNetTFRecords(BasicModel): """ FCN8s with MobileNet as an encoder Model Architecture """ def __init__(self, args): super().__init__(args) # init encoder self.encoder = None # init network layers self.upscore2 = None self.score_feed1 = None self.fuse_feed1 = None self.upscore4 = None self.score_feed2 = None self.fuse_feed2 = None self.upscore8 = None # init tfrecords needs self.handle = None self.training_iterator = None self.validation_iterator = None self.next_img = None self.training_handle = None self.validation_handle = None # get the default session self.sess = tf.get_default_session() def build(self): print("\nBuilding the MODEL...") self.init_input() self.init_tfrecord_input() self.init_network() self.init_output() self.init_train() self.init_summaries() print("The Model is built successfully\n") def init_tfrecord_input(self): if self.args.mode == 'train': print("USING TF RECORDS") # Use `tf.parse_single_example()` to extract data from a `tf.Example` # protocol buffer, and perform any additional per-record preprocessing. def parser(record): keys_to_features = { 'height': tf.FixedLenFeature([], tf.int64), 'width': tf.FixedLenFeature([], tf.int64), 'image_raw': tf.FixedLenFeature([], tf.string), 'mask_raw': tf.FixedLenFeature([], tf.string) } parsed = tf.parse_single_example(record, keys_to_features) image = tf.cast(tf.decode_raw(parsed['image_raw'], tf.uint8), tf.float32) annotation = tf.cast(tf.decode_raw(parsed['mask_raw'], tf.uint8), tf.int32) height = tf.cast(parsed['height'], tf.int32) width = tf.cast(parsed['width'], tf.int32) image_shape = tf.stack([height, width, 3]) annotation_shape = tf.stack([height, width]) image = tf.reshape(image, image_shape) annotation = tf.reshape(annotation, annotation_shape) return image, annotation # Use `Dataset.map()` to build a pair of a feature dictionary and a label # tensor for each example. train_filename = "./data/" + self.args.tfrecord_train_file train_dataset = tf.contrib.data.TFRecordDataset(['./data/cscapes_train_1.tfrecords', \ './data/cscapes_train_2.tfrecords',\ './data/cscapes_train_3.tfrecord', \ './data/cscapes_train_4.tfrecords' ])#train_filename) train_dataset = train_dataset.map(parser) train_dataset = train_dataset.shuffle(buffer_size=self.args.tfrecord_train_len) train_dataset = train_dataset.batch(self.args.batch_size) train_dataset = train_dataset.repeat() val_filename = "./data/" + self.args.tfrecord_val_file val_dataset = tf.contrib.data.TFRecordDataset(val_filename) val_dataset = val_dataset.map(parser) val_dataset = val_dataset.batch(self.args.batch_size) self.training_iterator = train_dataset.make_one_shot_iterator() self.validation_iterator = val_dataset.make_initializable_iterator() self.training_handle = self.sess.run(self.training_iterator.string_handle()) self.validation_handle = self.sess.run(self.validation_iterator.string_handle()) self.handle = tf.placeholder(tf.string, shape=[]) iterator = tf.contrib.data.Iterator.from_string_handle(self.handle, train_dataset.output_types, train_dataset.output_shapes) self.next_img = iterator.get_next() self.x_pl, self.y_pl = self.next_img self.x_pl.set_shape([None, self.args.img_height, self.args.img_width, 3]) self.y_pl.set_shape([None, self.args.img_height, self.args.img_width]) def init_network(self): """ Building the Network here :return: """ # Init MobileNet as an encoder self.encoder = MobileNet(x_input=self.x_pl, num_classes=self.params.num_classes, pretrained_path=self.args.pretrained_path, train_flag=self.is_training, width_multipler=1.0, weight_decay=self.args.weight_decay) # Build Encoding part self.encoder.build() # Build Decoding part with tf.name_scope('upscore_2s'): self.upscore2 = conv2d_transpose('upscore2', x=self.encoder.score_fr, output_shape=[self.args.batch_size] + self.encoder.feed1.shape.as_list()[1:3] + [self.params.num_classes], kernel_size=(4, 4), stride=(2, 2), l2_strength=self.encoder.wd) self.score_feed1 = conv2d('score_feed1', x=self.encoder.feed1, num_filters=self.params.num_classes, kernel_size=(1, 1), l2_strength=self.encoder.wd) self.fuse_feed1 = tf.add(self.score_feed1, self.upscore2) with tf.name_scope('upscore_4s'): self.upscore4 = conv2d_transpose('upscore4', x=self.fuse_feed1, output_shape=[self.args.batch_size] + self.encoder.feed2.shape.as_list()[1:3] + [self.params.num_classes], kernel_size=(4, 4), stride=(2, 2), l2_strength=self.encoder.wd) self.score_feed2 = conv2d('score_feed2', x=self.encoder.feed2, num_filters=self.params.num_classes, kernel_size=(1, 1), l2_strength=self.encoder.wd) self.fuse_feed2 = tf.add(self.score_feed2, self.upscore4) with tf.name_scope('upscore_8s'): self.upscore8 = conv2d_transpose('upscore8', x=self.fuse_feed2, output_shape=[self.args.batch_size] + self.x_pl.shape.as_list()[1:3] + [self.params.num_classes], kernel_size=(16, 16), stride=(8, 8), l2_strength=self.encoder.wd) self.logits = self.upscore8
3,820
8,649
package org.hswebframework.web.authorization.token; import org.hswebframework.web.authorization.Authentication; import reactor.core.publisher.Mono; /** * @author zhouhao * @since 1.0 */ public interface ThirdPartReactiveAuthenticationManager { /** * @return 支持的tokenType */ String getTokenType(); /** * 根据用户ID获取权限信息 * * @param userId 用户ID * @return 权限信息 */ Mono<Authentication> getByUserId(String userId); }
216
777
<gh_stars>100-1000 // Copyright 2014 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "chrome/browser/chromeos/login/session/chrome_session_manager.h" #include <memory> #include "base/command_line.h" #include "base/logging.h" #include "base/memory/ptr_util.h" #include "base/sys_info.h" #include "chrome/browser/browser_process.h" #include "chrome/browser/browser_process_platform_part_chromeos.h" #include "chrome/browser/chrome_notification_types.h" #include "chrome/browser/chromeos/app_mode/arc/arc_kiosk_app_manager.h" #include "chrome/browser/chromeos/app_mode/kiosk_app_launch_error.h" #include "chrome/browser/chromeos/app_mode/kiosk_app_manager.h" #include "chrome/browser/chromeos/arc/arc_service_launcher.h" #include "chrome/browser/chromeos/boot_times_recorder.h" #include "chrome/browser/chromeos/login/lock/webui_screen_locker.h" #include "chrome/browser/chromeos/login/login_wizard.h" #include "chrome/browser/chromeos/login/session/user_session_manager.h" #include "chrome/browser/chromeos/login/wizard_controller.h" #include "chrome/browser/chromeos/policy/browser_policy_connector_chromeos.h" #include "chrome/browser/profiles/profile.h" #include "chrome/browser/signin/signin_manager_factory.h" #include "chrome/browser/ui/ash/ash_util.h" #include "chrome/common/chrome_switches.h" #include "chrome/common/pref_names.h" #include "chromeos/audio/cras_audio_handler.h" #include "chromeos/chromeos_switches.h" #include "chromeos/cryptohome/cryptohome_parameters.h" #include "chromeos/dbus/dbus_thread_manager.h" #include "chromeos/dbus/session_manager_client.h" #include "components/arc/arc_bridge_service.h" #include "components/prefs/pref_service.h" #include "components/signin/core/account_id/account_id.h" #include "components/signin/core/browser/signin_manager.h" #include "components/user_manager/user_manager.h" #include "components/user_manager/user_names.h" #include "content/public/browser/notification_service.h" #include "content/public/common/content_switches.h" namespace chromeos { namespace { // Whether kiosk auto launch should be started. bool ShouldAutoLaunchKioskApp(const base::CommandLine& command_line) { KioskAppManager* app_manager = KioskAppManager::Get(); return command_line.HasSwitch(switches::kLoginManager) && !command_line.HasSwitch(switches::kForceLoginManagerInTests) && app_manager->IsAutoLaunchEnabled() && KioskAppLaunchError::Get() == KioskAppLaunchError::NONE; } // Starts kiosk app auto launch and shows the splash screen. void StartKioskSession() { // Kiosk app launcher starts with login state. session_manager::SessionManager::Get()->SetSessionState( session_manager::SessionState::LOGIN_PRIMARY); ShowLoginWizard(chromeos::OobeScreen::SCREEN_APP_LAUNCH_SPLASH); // Login screen is skipped but 'login-prompt-visible' signal is still needed. VLOG(1) << "Kiosk app auto launch >> login-prompt-visible"; DBusThreadManager::Get()->GetSessionManagerClient()->EmitLoginPromptVisible(); } // Starts the login/oobe screen. void StartLoginOobeSession() { // State will be defined once out-of-box/login branching is complete. ShowLoginWizard(OobeScreen::SCREEN_UNKNOWN); // Reset reboot after update flag when login screen is shown. policy::BrowserPolicyConnectorChromeOS* connector = g_browser_process->platform_part()->browser_policy_connector_chromeos(); if (!connector->IsEnterpriseManaged()) { PrefService* local_state = g_browser_process->local_state(); local_state->ClearPref(prefs::kRebootAfterUpdate); } } // Restores user sessions for a crash-and-restarted chrome. void StartRestoreAfterCrashSession(Profile* user_profile, const std::string& login_user_id) { base::CommandLine* command_line = base::CommandLine::ForCurrentProcess(); // TODO(xiyuan): Identify tests that do not set this kLoginUser flag but // still rely on "stub user" session. Keeping existing behavior to avoid // breaking tests. if (command_line->HasSwitch(chromeos::switches::kLoginUser)) { // This is done in SessionManager::OnProfileCreated during normal login. UserSessionManager* user_session_mgr = UserSessionManager::GetInstance(); user_manager::UserManager* user_manager = user_manager::UserManager::Get(); const user_manager::User* user = user_manager->GetActiveUser(); if (!user) { // This is possible if crash occured after profile removal // (see crbug.com/178290 for some more info). LOG(ERROR) << "Could not get active user after crash."; return; } user_session_mgr->InitRlz(user_profile); user_session_mgr->InitializeCerts(user_profile); user_session_mgr->InitializeCRLSetFetcher(user); user_session_mgr->InitializeCertificateTransparencyComponents(user); if (arc::ArcBridgeService::GetEnabled( base::CommandLine::ForCurrentProcess())) { arc::ArcServiceLauncher::Get()->OnPrimaryUserProfilePrepared( user_profile); } // Send the PROFILE_PREPARED notification and call SessionStarted() // so that the Launcher and other Profile dependent classes are created. content::NotificationService::current()->Notify( chrome::NOTIFICATION_LOGIN_USER_PROFILE_PREPARED, content::NotificationService::AllSources(), content::Details<Profile>(user_profile)); // This call will set session state to SESSION_STATE_ACTIVE (same one). session_manager::SessionManager::Get()->SessionStarted(); // Now is the good time to retrieve other logged in users for this session. // First user has been already marked as logged in and active in // PreProfileInit(). Restore sessions for other users in the background. user_session_mgr->RestoreActiveSessions(); } bool is_running_test = command_line->HasSwitch(::switches::kTestName) || command_line->HasSwitch(::switches::kTestType); if (!is_running_test) { // Enable CrasAudioHandler logging when chrome restarts after crashing. if (chromeos::CrasAudioHandler::IsInitialized()) chromeos::CrasAudioHandler::Get()->LogErrors(); // We did not log in (we crashed or are debugging), so we need to // restore Sync. UserSessionManager::GetInstance()->RestoreAuthenticationSession( user_profile); } } // Starts a user session with stub user. void StartStubLoginSession(Profile* user_profile, const std::string& login_user_id) { // For dev machines and stub user emulate as if sync has been initialized. SigninManagerFactory::GetForProfile(user_profile) ->SetAuthenticatedAccountInfo(login_user_id, login_user_id); StartRestoreAfterCrashSession(user_profile, login_user_id); } } // namespace ChromeSessionManager::ChromeSessionManager() {} ChromeSessionManager::~ChromeSessionManager() {} void ChromeSessionManager::Initialize( const base::CommandLine& parsed_command_line, Profile* profile, bool is_running_test) { // Keep Chrome alive for mash. // TODO(xiyuan): Remove this when session manager is moved out of Chrome. if (chrome::IsRunningInMash() && !base::CommandLine::ForCurrentProcess()->HasSwitch( ::switches::kDisableZeroBrowsersOpenForTests)) { g_browser_process->platform_part()->RegisterKeepAlive(); } // Tests should be able to tune login manager before showing it. Thus only // show login UI (login and out-of-box) in normal (non-testing) mode with // --login-manager switch and if test passed --force-login-manager-in-tests. bool force_login_screen_in_test = parsed_command_line.HasSwitch(switches::kForceLoginManagerInTests); const std::string cryptohome_id = parsed_command_line.GetSwitchValueASCII(switches::kLoginUser); const AccountId login_account_id( cryptohome::Identification::FromString(cryptohome_id).GetAccountId()); KioskAppManager::RemoveObsoleteCryptohomes(); ArcKioskAppManager::RemoveObsoleteCryptohomes(); if (ShouldAutoLaunchKioskApp(parsed_command_line)) { VLOG(1) << "Starting Chrome with kiosk auto launch."; StartKioskSession(); return; } if (parsed_command_line.HasSwitch(switches::kLoginManager) && (!is_running_test || force_login_screen_in_test)) { VLOG(1) << "Starting Chrome with login/oobe screen."; StartLoginOobeSession(); return; } if (!base::SysInfo::IsRunningOnChromeOS() && login_account_id == user_manager::StubAccountId()) { VLOG(1) << "Starting Chrome with stub login."; StartStubLoginSession(profile, login_account_id.GetUserEmail()); return; } VLOG(1) << "Starting Chrome with restart after crash session."; // Restarting Chrome inside existing user session. Possible cases: // 1. Chrome is restarted after crash. // 2. Chrome is restarted for Guest session. // 3. Chrome is started in browser_tests skipping the login flow. // 4. Chrome is started on dev machine i.e. not on Chrome OS device w/o // login flow. In that case --login-user=[user_manager::kStubUser] is // added. See PreEarlyInitialization(). StartRestoreAfterCrashSession(profile, login_account_id.GetUserEmail()); } void ChromeSessionManager::SessionStarted() { session_manager::SessionManager::SessionStarted(); SetSessionState(session_manager::SessionState::ACTIVE); // Notifies UserManager so that it can update login state. user_manager::UserManager* user_manager = user_manager::UserManager::Get(); if (user_manager) user_manager->OnSessionStarted(); content::NotificationService::current()->Notify( chrome::NOTIFICATION_SESSION_STARTED, content::Source<session_manager::SessionManager>(this), content::Details<const user_manager::User>( user_manager->GetActiveUser())); chromeos::WebUIScreenLocker::RequestPreload(); } void ChromeSessionManager::NotifyUserLoggedIn(const AccountId& user_account_id, const std::string& user_id_hash, bool browser_restart) { BootTimesRecorder* btl = BootTimesRecorder::Get(); btl->AddLoginTimeMarker("UserLoggedIn-Start", false); session_manager::SessionManager::NotifyUserLoggedIn( user_account_id, user_id_hash, browser_restart); btl->AddLoginTimeMarker("UserLoggedIn-End", false); } } // namespace chromeos
3,549
6,717
<filename>deps/3rdparty/libdispatch/platform/windows/time.c #include "time.h" #include <SDKDDKVer.h> #include <Windows.h> void _ulonglong_to_timespec(ULONGLONG when, struct timespec* ts) { when -= EPOCH_DIFFERENCE; ts->tv_sec = when / INTERVALS_PER_SEC; ts->tv_nsec = (when % INTERVALS_PER_SEC) * NSEC_PER_INTERVAL; } void _filetime_to_timespec(FILETIME when, struct timespec* ts) { ULARGE_INTEGER computable_time = {0}; computable_time.HighPart = when.dwHighDateTime; computable_time.LowPart = when.dwLowDateTime; _ulonglong_to_timespec(computable_time.QuadPart, ts); } int clock_gettime(clockid_t clk_id, struct timespec* now) { switch(clk_id) { case CLOCK_REALTIME: { FILETIME current_time = {0}; GetSystemTimeAsFileTime(&current_time); _filetime_to_timespec(current_time, now); } break; case CLOCK_UPTIME: { ULONGLONG unbiased_time = {0}; QueryUnbiasedInterruptTime(&unbiased_time); _ulonglong_to_timespec(unbiased_time, now); } break; default: return -1; } return 0; }
443
700
<gh_stars>100-1000 from skidl import SKIDL, TEMPLATE, Part, Pin, SchLib SKIDL_lib_version = '0.0.1' display = SchLib(tool=SKIDL).add_parts(*[ Part(name='7SEGM',dest=TEMPLATE,tool=SKIDL,keywords='DEV',description='Afficheur Leds 7 segments',ref_prefix='S',num_units=1,do_erc=True,pins=[ Pin(num='1',name='Segm_E',func=Pin.PASSIVE,do_erc=True), Pin(num='2',name='Segm_D',func=Pin.PASSIVE,do_erc=True), Pin(num='3',name='K',func=Pin.PASSIVE,do_erc=True), Pin(num='4',name='Segm_C',func=Pin.PASSIVE,do_erc=True), Pin(num='5',name='SegmDP',func=Pin.PASSIVE,do_erc=True), Pin(num='6',name='Segm_B',func=Pin.PASSIVE,do_erc=True), Pin(num='7',name='Segm_A',func=Pin.PASSIVE,do_erc=True), Pin(num='8',name='K',do_erc=True), Pin(num='9',name='Segm_F',func=Pin.PASSIVE,do_erc=True), Pin(num='10',name='Segm_G',func=Pin.PASSIVE,do_erc=True)]), Part(name='7SEGMENTS',dest=TEMPLATE,tool=SKIDL,do_erc=True), Part(name='AG12864E',dest=TEMPLATE,tool=SKIDL,keywords='LCD Graphics 128x64 KS108 Ampire',description='Graphics Display 128x64px, 1/64 Duty, KS108B Controller, AMPIRE',ref_prefix='DS',num_units=1,do_erc=True,pins=[ Pin(num='1',name='VCC',func=Pin.PWRIN,do_erc=True), Pin(num='2',name='GND',func=Pin.PWRIN,do_erc=True), Pin(num='3',name='VO',func=Pin.PASSIVE,do_erc=True), Pin(num='4',name='DB0',func=Pin.BIDIR,do_erc=True), Pin(num='5',name='DB1',func=Pin.BIDIR,do_erc=True), Pin(num='6',name='DB2',func=Pin.BIDIR,do_erc=True), Pin(num='7',name='DB3',func=Pin.BIDIR,do_erc=True), Pin(num='8',name='DB4',func=Pin.BIDIR,do_erc=True), Pin(num='9',name='DB5',func=Pin.BIDIR,do_erc=True), Pin(num='10',name='DB6',func=Pin.BIDIR,do_erc=True), Pin(num='20',name='LEDK',func=Pin.PASSIVE,do_erc=True), Pin(num='11',name='DB7',func=Pin.BIDIR,do_erc=True), Pin(num='12',name='~CS1~',do_erc=True), Pin(num='13',name='~CS2~',do_erc=True), Pin(num='14',name='~RST~',do_erc=True), Pin(num='15',name='R/~W~',do_erc=True), Pin(num='16',name='~C~/D',do_erc=True), Pin(num='17',name='EN',do_erc=True), Pin(num='18',name='VEE',func=Pin.PASSIVE,do_erc=True), Pin(num='19',name='LEDA',func=Pin.PASSIVE,do_erc=True)]), Part(name='CA56-12',dest=TEMPLATE,tool=SKIDL,keywords='7 SEGMENTS 4 display',description='Kingbright 4 x 7-segment common anode display',ref_prefix='AFF',num_units=1,fplist=['Cx56-12'],do_erc=True,pins=[ Pin(num='1',name='e',do_erc=True), Pin(num='2',name='d',do_erc=True), Pin(num='3',name='DP',do_erc=True), Pin(num='4',name='c',do_erc=True), Pin(num='5',name='g',do_erc=True), Pin(num='6',name='CA4',do_erc=True), Pin(num='7',name='b',do_erc=True), Pin(num='8',name='CA3',do_erc=True), Pin(num='9',name='CA2',do_erc=True), Pin(num='10',name='f',do_erc=True), Pin(num='11',name='a',do_erc=True), Pin(num='12',name='CA1',do_erc=True)]), Part(name='CC56-12',dest=TEMPLATE,tool=SKIDL,keywords='7 SEGMENTS 4 display',description='Kingbright 4 x 7-segment common cathode display',ref_prefix='AFF',num_units=1,fplist=['Cx56-12'],do_erc=True,pins=[ Pin(num='1',name='e',do_erc=True), Pin(num='2',name='d',do_erc=True), Pin(num='3',name='DP',do_erc=True), Pin(num='4',name='c',do_erc=True), Pin(num='5',name='g',do_erc=True), Pin(num='6',name='CC4',do_erc=True), Pin(num='7',name='b',do_erc=True), Pin(num='8',name='CC3',do_erc=True), Pin(num='9',name='CC2',do_erc=True), Pin(num='10',name='f',do_erc=True), Pin(num='11',name='a',do_erc=True), Pin(num='12',name='CC1',do_erc=True)]), Part(name='DA04-11',dest=TEMPLATE,tool=SKIDL,keywords='7 SEGMENTS',description='2 x 7 Segments common A.',ref_prefix='AFF',num_units=1,do_erc=True,pins=[ Pin(num='1',name='c',do_erc=True), Pin(num='2',name='e',do_erc=True), Pin(num='3',name='d',do_erc=True), Pin(num='4',name='Anod1',do_erc=True), Pin(num='5',name='Anod2',do_erc=True), Pin(num='6',name='d',do_erc=True), Pin(num='7',name='e',do_erc=True), Pin(num='8',name='c',do_erc=True), Pin(num='9',name='g',do_erc=True), Pin(num='10',name='a',do_erc=True), Pin(num='11',name='f',do_erc=True), Pin(num='12',name='b',do_erc=True), Pin(num='13',name='b',do_erc=True), Pin(num='14',name='f',do_erc=True), Pin(num='15',name='a',do_erc=True), Pin(num='16',name='g',do_erc=True)]), Part(name='DC04-11',dest=TEMPLATE,tool=SKIDL,keywords='7 SEGMENTS',description='2 x 7 Segments common K.',ref_prefix='AFF',num_units=1,do_erc=True,pins=[ Pin(num='1',name='c',do_erc=True), Pin(num='2',name='e',do_erc=True), Pin(num='3',name='d',do_erc=True), Pin(num='4',name='D1',do_erc=True), Pin(num='5',name='D2',do_erc=True), Pin(num='6',name='d',do_erc=True), Pin(num='7',name='e',do_erc=True), Pin(num='8',name='c',do_erc=True), Pin(num='9',name='g',do_erc=True), Pin(num='10',name='a',do_erc=True), Pin(num='11',name='f',do_erc=True), Pin(num='12',name='b',do_erc=True), Pin(num='13',name='b',do_erc=True), Pin(num='14',name='f',do_erc=True), Pin(num='15',name='a',do_erc=True), Pin(num='16',name='g',do_erc=True)]), Part(name='DISPLAY',dest=TEMPLATE,tool=SKIDL,keywords='DEV',description='Afficheur LCD nLignes',ref_prefix='S',num_units=1,do_erc=True,pins=[ Pin(num='1',name='GND',func=Pin.PWRIN,do_erc=True), Pin(num='2',name='VCC',func=Pin.PWRIN,do_erc=True), Pin(num='3',name='VLCD',do_erc=True), Pin(num='4',name='RS',do_erc=True), Pin(num='5',name='R/W',do_erc=True), Pin(num='6',name='CS',do_erc=True), Pin(num='7',name='D0',func=Pin.TRISTATE,do_erc=True), Pin(num='8',name='D1',func=Pin.TRISTATE,do_erc=True), Pin(num='9',name='D2',func=Pin.TRISTATE,do_erc=True), Pin(num='10',name='D3',func=Pin.TRISTATE,do_erc=True), Pin(num='11',name='D4',func=Pin.TRISTATE,do_erc=True), Pin(num='12',name='D5',func=Pin.TRISTATE,do_erc=True), Pin(num='13',name='D6',func=Pin.TRISTATE,do_erc=True), Pin(num='14',name='D7',func=Pin.TRISTATE,do_erc=True)]), Part(name='DISPLAY_3_LIGNE',dest=TEMPLATE,tool=SKIDL,description='DISPLAY EA7123-12C',ref_prefix='S',num_units=1,do_erc=True,pins=[ Pin(num='1',name='GND',func=Pin.PWRIN,do_erc=True), Pin(num='2',name='VCC',func=Pin.PWRIN,do_erc=True), Pin(num='3',name='VLCD',do_erc=True), Pin(num='4',name='VO',do_erc=True), Pin(num='5',name='SDA',do_erc=True), Pin(num='6',name='SCL',do_erc=True)]), Part(name='DOT-BAR',dest=TEMPLATE,tool=SKIDL,keywords='BAR DOT',description='GRAPH unit',ref_prefix='BAR',num_units=10,do_erc=True,pins=[ Pin(num='1',name='A',func=Pin.PASSIVE,do_erc=True), Pin(num='20',name='K',func=Pin.PASSIVE,do_erc=True), Pin(num='2',name='A',func=Pin.PASSIVE,do_erc=True), Pin(num='19',name='K',func=Pin.PASSIVE,do_erc=True), Pin(num='3',name='A',func=Pin.PASSIVE,do_erc=True), Pin(num='18',name='K',func=Pin.PASSIVE,do_erc=True), Pin(num='4',name='A',func=Pin.PASSIVE,do_erc=True), Pin(num='17',name='K',func=Pin.PASSIVE,do_erc=True), Pin(num='5',name='A',func=Pin.PASSIVE,do_erc=True), Pin(num='16',name='K',func=Pin.PASSIVE,do_erc=True), Pin(num='6',name='A',func=Pin.PASSIVE,do_erc=True), Pin(num='15',name='K',func=Pin.PASSIVE,do_erc=True), Pin(num='7',name='A',func=Pin.PASSIVE,do_erc=True), Pin(num='14',name='K',func=Pin.PASSIVE,do_erc=True), Pin(num='8',name='A',func=Pin.PASSIVE,do_erc=True), Pin(num='13',name='K',func=Pin.PASSIVE,do_erc=True), Pin(num='9',name='A',func=Pin.PASSIVE,do_erc=True), Pin(num='12',name='K',func=Pin.PASSIVE,do_erc=True), Pin(num='10',name='A',func=Pin.PASSIVE,do_erc=True), Pin(num='11',name='K',func=Pin.PASSIVE,do_erc=True)]), Part(name='DOT-BAR2',dest=TEMPLATE,tool=SKIDL,keywords='BAR DOT',description='BAR GRAPH Block',ref_prefix='BAR',num_units=1,do_erc=True,pins=[ Pin(num='1',name='A',func=Pin.PASSIVE,do_erc=True), Pin(num='2',name='A',func=Pin.PASSIVE,do_erc=True), Pin(num='3',name='A',func=Pin.PASSIVE,do_erc=True), Pin(num='4',name='A',func=Pin.PASSIVE,do_erc=True), Pin(num='5',name='A',func=Pin.PASSIVE,do_erc=True), Pin(num='6',name='A',func=Pin.PASSIVE,do_erc=True), Pin(num='7',name='A',do_erc=True), Pin(num='8',name='A',func=Pin.PASSIVE,do_erc=True), Pin(num='9',name='A',func=Pin.PASSIVE,do_erc=True), Pin(num='10',name='A',func=Pin.PASSIVE,do_erc=True), Pin(num='20',name='K',func=Pin.PASSIVE,do_erc=True), Pin(num='11',name='K',func=Pin.PASSIVE,do_erc=True), Pin(num='12',name='K',func=Pin.PASSIVE,do_erc=True), Pin(num='13',name='K',func=Pin.PASSIVE,do_erc=True), Pin(num='14',name='K',do_erc=True), Pin(num='15',name='K',func=Pin.PASSIVE,do_erc=True), Pin(num='16',name='K',func=Pin.PASSIVE,do_erc=True), Pin(num='17',name='K',func=Pin.PASSIVE,do_erc=True), Pin(num='18',name='K',func=Pin.PASSIVE,do_erc=True), Pin(num='19',name='K',func=Pin.PASSIVE,do_erc=True)]), Part(name='ELD-426x',dest=TEMPLATE,tool=SKIDL,do_erc=True), Part(name='HCLD0438',dest=TEMPLATE,tool=SKIDL,do_erc=True), Part(name='HDSP-7xxx-A',dest=TEMPLATE,tool=SKIDL,do_erc=True), Part(name='HDSP-7xxx-B',dest=TEMPLATE,tool=SKIDL,do_erc=True), Part(name='HDSP-7xxx-C',dest=TEMPLATE,tool=SKIDL,do_erc=True), Part(name='HDSP-7xxx-D',dest=TEMPLATE,tool=SKIDL,do_erc=True), Part(name='HY1602E',dest=TEMPLATE,tool=SKIDL,keywords='LCD 16x2 Alphanumeric 16pin Blue/Yellow/Green Backlight',description='HY1602E',ref_prefix='DS',num_units=1,do_erc=True,pins=[ Pin(num='1',name='LEDK',func=Pin.PASSIVE,do_erc=True), Pin(num='2',name='LEDA',func=Pin.PASSIVE,do_erc=True), Pin(num='3',name='VSS',func=Pin.PWRIN,do_erc=True), Pin(num='4',name='VDD',func=Pin.PWRIN,do_erc=True), Pin(num='5',name='Vo',do_erc=True), Pin(num='6',name='RS',do_erc=True), Pin(num='7',name='R/~W',do_erc=True), Pin(num='8',name='E',do_erc=True), Pin(num='9',name='DB0',func=Pin.BIDIR,do_erc=True), Pin(num='10',name='DB1',func=Pin.BIDIR,do_erc=True), Pin(num='11',name='DB2',func=Pin.BIDIR,do_erc=True), Pin(num='12',name='DB3',func=Pin.BIDIR,do_erc=True), Pin(num='13',name='DB4',func=Pin.BIDIR,do_erc=True), Pin(num='14',name='DB5',func=Pin.BIDIR,do_erc=True), Pin(num='15',name='DB6',func=Pin.BIDIR,do_erc=True), Pin(num='16',name='DB7',func=Pin.BIDIR,do_erc=True)]), Part(name='ILI9341_LCD_Breakout',dest=TEMPLATE,tool=SKIDL,keywords='GLCD TFT ILI9341 320x240',description='ILI9341 controller, SPI TFT LCD Display, 9-pin breakout PCB, 4-pin SD card interface, 5V/3.3V',ref_prefix='U',num_units=1,do_erc=True,pins=[ Pin(num='1',name='Vcc',func=Pin.PWRIN,do_erc=True), Pin(num='2',name='GND',func=Pin.PWRIN,do_erc=True), Pin(num='3',name='~CS',do_erc=True), Pin(num='4',name='Reset',do_erc=True), Pin(num='5',name='D/~C',do_erc=True), Pin(num='6',name='MOSI',do_erc=True), Pin(num='7',name='SCK',do_erc=True), Pin(num='8',name='LED',do_erc=True), Pin(num='9',name='MISO',func=Pin.OUTPUT,do_erc=True), Pin(num='10',name='SD_CS',do_erc=True), Pin(num='11',name='SD_MOSI',do_erc=True), Pin(num='12',name='SD_MISO',func=Pin.OUTPUT,do_erc=True), Pin(num='13',name='SD_SCK',do_erc=True)]), Part(name='LCD16X2',dest=TEMPLATE,tool=SKIDL,keywords='Generic LCD 16x2 Alphanumeric 16pin Green Backlight',description='WC1602A0-SFYLYNC06',ref_prefix='DS',num_units=1,do_erc=True,aliases=['LCD-016N002L'],pins=[ Pin(num='1',name='VSS',func=Pin.PWRIN,do_erc=True), Pin(num='2',name='VDD',func=Pin.PWRIN,do_erc=True), Pin(num='3',name='VO',do_erc=True), Pin(num='4',name='RS',do_erc=True), Pin(num='5',name='R/W',do_erc=True), Pin(num='6',name='E',do_erc=True), Pin(num='7',name='D0',do_erc=True), Pin(num='8',name='D1',do_erc=True), Pin(num='9',name='D2',do_erc=True), Pin(num='10',name='D3',do_erc=True), Pin(num='11',name='D4',do_erc=True), Pin(num='12',name='D5',do_erc=True), Pin(num='13',name='D6',do_erc=True), Pin(num='14',name='D7',do_erc=True), Pin(num='15',name='LED+',func=Pin.PASSIVE,do_erc=True), Pin(num='16',name='LED-',func=Pin.PASSIVE,do_erc=True)]), Part(name='LCD4',dest=TEMPLATE,tool=SKIDL,do_erc=True), Part(name='LM16255K',dest=TEMPLATE,tool=SKIDL,do_erc=True), Part(name='LTS-6960HR',dest=TEMPLATE,tool=SKIDL,keywords='7 SEGMENTS',description='DISPLAY 7 SEGMENTS common A.',ref_prefix='AFF',num_units=1,do_erc=True,pins=[ Pin(num='1',name='e',do_erc=True), Pin(num='2',name='d',do_erc=True), Pin(num='3',name='C.A.',do_erc=True), Pin(num='4',name='c',do_erc=True), Pin(num='5',name='DP',do_erc=True), Pin(num='6',name='b',do_erc=True), Pin(num='7',name='a',do_erc=True), Pin(num='8',name='C.A.',do_erc=True), Pin(num='9',name='f',do_erc=True), Pin(num='10',name='g',do_erc=True)]), Part(name='LTS-6980HR',dest=TEMPLATE,tool=SKIDL,keywords='7 SEGMENTS',description='DISPLAY 7 SEGMENTS common K',ref_prefix='AFF',num_units=1,do_erc=True,pins=[ Pin(num='1',name='e',do_erc=True), Pin(num='2',name='d',do_erc=True), Pin(num='3',name='C.K.',do_erc=True), Pin(num='4',name='c',do_erc=True), Pin(num='5',name='DP',do_erc=True), Pin(num='6',name='b',do_erc=True), Pin(num='7',name='a',do_erc=True), Pin(num='8',name='C.K.',do_erc=True), Pin(num='9',name='f',do_erc=True), Pin(num='10',name='g',do_erc=True)]), Part(name='MAN3640A',dest=TEMPLATE,tool=SKIDL,do_erc=True), Part(name='MAN71A',dest=TEMPLATE,tool=SKIDL,keywords='DISPLAY LED',description='7 segments display - Common anods',ref_prefix='AFF',num_units=1,do_erc=True,pins=[ Pin(num='1',name='a',do_erc=True), Pin(num='2',name='f',do_erc=True), Pin(num='3',name='C.A.',do_erc=True), Pin(num='7',name='e',do_erc=True), Pin(num='8',name='d',do_erc=True), Pin(num='9',name='DP',do_erc=True), Pin(num='10',name='c',do_erc=True), Pin(num='11',name='g',do_erc=True), Pin(num='13',name='b',do_erc=True), Pin(num='14',name='C.A.',do_erc=True)]), Part(name='RC1602A-GHW-ESX',dest=TEMPLATE,tool=SKIDL,keywords='LCD 16x2 Alphanumeric 16pin Gray Backlight',description='RC1602A-GHW-ESX',ref_prefix='DS',num_units=1,do_erc=True,pins=[ Pin(num='1',name='VSS',func=Pin.PWRIN,do_erc=True), Pin(num='2',name='VDD',func=Pin.PWRIN,do_erc=True), Pin(num='3',name='Vo',do_erc=True), Pin(num='4',name='RS',do_erc=True), Pin(num='5',name='R/~W',do_erc=True), Pin(num='6',name='E',do_erc=True), Pin(num='7',name='DB0',func=Pin.BIDIR,do_erc=True), Pin(num='8',name='DB1',func=Pin.BIDIR,do_erc=True), Pin(num='9',name='DB2',func=Pin.BIDIR,do_erc=True), Pin(num='10',name='DB3',func=Pin.BIDIR,do_erc=True), Pin(num='11',name='DB4',func=Pin.BIDIR,do_erc=True), Pin(num='12',name='DB5',func=Pin.BIDIR,do_erc=True), Pin(num='13',name='DB6',func=Pin.BIDIR,do_erc=True), Pin(num='14',name='DB7',func=Pin.BIDIR,do_erc=True), Pin(num='15',name='LED+',func=Pin.PASSIVE,do_erc=True), Pin(num='16',name='LED-',func=Pin.PASSIVE,do_erc=True)])])
9,355
3,172
<reponame>minddistrict/authlib import werkzeug from werkzeug.exceptions import HTTPException _version = werkzeug.__version__.split('.')[0] if _version in ('0', '1'): class _HTTPException(HTTPException): def __init__(self, code, body, headers, response=None): super(_HTTPException, self).__init__(None, response) self.code = code self.body = body self.headers = headers def get_body(self, environ=None): return self.body def get_headers(self, environ=None): return self.headers else: class _HTTPException(HTTPException): def __init__(self, code, body, headers, response=None): super(_HTTPException, self).__init__(None, response) self.code = code self.body = body self.headers = headers def get_body(self, environ=None, scope=None): return self.body def get_headers(self, environ=None, scope=None): return self.headers def raise_http_exception(status, body, headers): raise _HTTPException(status, body, headers)
479
409
<reponame>menuet/xlang import loader_native, unittest, asyncio wf = loader_native.import_ns("Windows.Foundation") wdg = loader_native.import_ns("Windows.Devices.Geolocation") # async_test inspired by https://stackoverflow.com/a/23036785 def async_test(test): def wrapper(*args, **kwargs): original_loop = asyncio.get_event_loop() test_loop = asyncio.new_event_loop() asyncio.set_event_loop(test_loop) try: test_loop.run_until_complete(test(*args, **kwargs)) finally: test_loop.close() asyncio.set_event_loop(original_loop) return wrapper class TestGeolocation(unittest.TestCase): def test_pinterface_qi(self): locator = wdg.Geolocator() op = locator.get_geoposition_async() self.assertEqual(type(op), wf.IAsyncOperation) op.cancel() def test_struct_ctor(self): basic_pos = wdg.BasicGeoposition(latitude = 47.1, longitude = -122.1, altitude = 0.0) self.assertEqual(basic_pos.latitude, 47.1) self.assertEqual(basic_pos.longitude, -122.1) self.assertEqual(basic_pos.altitude, 0.0) geocircle = wdg.Geocircle(basic_pos, 10) center = geocircle.center self.assertEqual(10, geocircle.radius) for x in ["latitude", "longitude", "altitude"]: self.assertEqual(getattr(basic_pos, x), getattr(center, x)) def test_struct_from_dict(self): basic_pos = {"latitude": 47.1, "longitude": -122.1, "altitude": 0.0} geocircle = wdg.Geocircle(basic_pos, 10) center = geocircle.center self.assertEqual(10, geocircle.radius) for x in ["latitude", "longitude", "altitude"]: self.assertEqual(basic_pos[x], getattr(center, x)) def test_iiterable_wraping(self): basic_pos1 = wdg.BasicGeoposition(47.1, -122.1, 0.0) basic_pos2 = wdg.BasicGeoposition(47.2, -122.2, 0.0) box = wdg.GeoboundingBox.try_compute([basic_pos1, basic_pos2]) nw = box.northwest_corner se = box.southeast_corner self.assertAlmostEqual(nw.latitude, basic_pos2.latitude) self.assertAlmostEqual(nw.longitude, basic_pos2.longitude) self.assertAlmostEqual(se.latitude, basic_pos1.latitude) self.assertAlmostEqual(se.longitude, basic_pos1.longitude) def test_GetGeopositionAsync(self): """test async method using IAsyncOperation Completed callback""" import threading complete_event = threading.Event() def callback(operation, status): self.assertEqual(status, 1) pos = operation.get_results() self.assertEqual(type(pos), wdg.Geoposition) coord = pos.coordinate self.assertEqual(type(coord.timestamp.universal_time), int) basic_pos = coord.point.position lat = basic_pos.latitude self.assertEqual(type(lat), float) complete_event.set() locator = wdg.Geolocator() op = locator.get_geoposition_async() op.completed = callback self.assertTrue(complete_event.wait(5)) @async_test async def test_GetGeopositionAsync_await(self): """test async method by directly awaiting IAsyncOperation""" locator = wdg.Geolocator() pos = await locator.get_geoposition_async() self.assertEqual(type(pos), wdg.Geoposition) coord = pos.coordinate self.assertEqual(type(coord.timestamp.universal_time), int) basic_pos = coord.point.position lat = basic_pos.latitude self.assertEqual(type(lat), float) if __name__ == '__main__': unittest.main()
1,662
1,326
package org.mangosdk.spi.processor; public enum LogLocation { MESSAGER, LOG_FILE, BOTH; public boolean toMessager() { return this != LOG_FILE; } public boolean toLogFile() { return this != MESSAGER; } }
89
654
<filename>test/unit/awsume/awsumepy/lib/test_aws_files.py import os import json import pytest import argparse from io import StringIO from pathlib import Path from unittest.mock import patch, MagicMock, mock_open from awsume.awsumepy.lib import constants from awsume.awsumepy.lib import aws_files def test_get_aws_files(): args = argparse.Namespace(config_file=None, credentials_file=None) config = {} config_file, credentials_file = aws_files.get_aws_files(args, config) assert config_file == str(Path(constants.DEFAULT_CONFIG_FILE)) assert credentials_file == str(Path(constants.DEFAULT_CREDENTIALS_FILE)) def test_get_aws_files_args(): args = argparse.Namespace(config_file='my/config/file', credentials_file='my/credentials/file') config = {} config_file, credentials_file = aws_files.get_aws_files(args, config) assert config_file == str(Path('my/config/file')) assert credentials_file == str(Path('my/credentials/file')) def test_get_aws_files_config(): args = argparse.Namespace(config_file=None, credentials_file=None) config = { 'config-file': 'my/config/file', 'credentials-file': 'my/credentials/file', } config_file, credentials_file = aws_files.get_aws_files(args, config) assert config_file == str(Path('my/config/file')) assert credentials_file == str(Path('my/credentials/file')) @patch.dict('os.environ', {'AWS_CONFIG_FILE': 'my/config/file', 'AWS_SHARED_CREDENTIALS_FILE': 'my/credentials/file'}, clear=True) def test_get_aws_files_environment(): args = argparse.Namespace(config_file=None, credentials_file=None) config = {} config_file, credentials_file = aws_files.get_aws_files(args, config) assert config_file == str(Path('my/config/file')) assert credentials_file == str(Path('my/credentials/file')) @patch('builtins.open') @patch('configparser.ConfigParser') def test_add_section(ConfigParser: MagicMock, open: MagicMock): parser = MagicMock() ConfigParser.return_value = parser parser.has_section.return_value = True aws_files.add_section('section-name', {'key': 'value', 'key2': 'value2'}, 'file-name', overwrite=True) parser.read.assert_called_once_with('file-name') parser.remove_section.assert_called_once_with('section-name') parser.add_section.assert_called_once_with('section-name') assert parser.set.call_count == 3 parser.set.assert_any_call('section-name', 'manager', 'awsume') parser.write.assert_called_once() open.assert_called_once() @patch.object(aws_files, 'safe_print') @patch('builtins.open') @patch('configparser.ConfigParser') def test_add_section_no_overwrite(ConfigParser: MagicMock, open: MagicMock, safe_print: MagicMock): parser = MagicMock() ConfigParser.return_value = parser parser.has_section.return_value = True aws_files.add_section('section-name', {'key': 'value', 'key2': 'value2'}, 'file-name', overwrite=False) parser.read.assert_called_once_with('file-name') parser.remove_section.assert_not_called() parser.add_section.assert_not_called() parser.set.assert_not_called() @patch.object(aws_files, 'safe_print') @patch('builtins.open') @patch('configparser.ConfigParser') def test_add_section_new_section(ConfigParser: MagicMock, open: MagicMock, safe_print: MagicMock): parser = MagicMock() ConfigParser.return_value = parser parser.has_section.return_value = False aws_files.add_section('section-name', {'key': 'value', 'key2': 'value2'}, 'file-name') parser.read.assert_called_once_with('file-name') parser.remove_section.assert_not_called() parser.add_section.assert_called_once_with('section-name') assert parser.set.call_count == 3 parser.set.assert_any_call('section-name', 'manager', 'awsume') parser.write.assert_called_once() open.assert_called_once() @patch('builtins.open') @patch('configparser.ConfigParser') def test_delete_section(ConfigParser: MagicMock, open: MagicMock): parser = MagicMock() ConfigParser.return_value = parser parser.has_section.return_value = True aws_files.delete_section('section-name', 'file-name') parser.read.assert_called_once_with('file-name') parser.remove_section.assert_called_once_with('section-name') parser.write.assert_called_once() open.assert_called_once() @patch('builtins.open') @patch('configparser.ConfigParser') def test_delete_section_no_section(ConfigParser: MagicMock, open: MagicMock): parser = MagicMock() ConfigParser.return_value = parser parser.has_section.return_value = False aws_files.delete_section('section-name', 'file-name') parser.read.assert_called_once_with('file-name') parser.remove_section.assert_not_called() myfile = """ [default] region = us-east-1 mfa_serial = arn:aws:iam::123123123123:mfa/admin """ @patch('builtins.open') def test_read_aws_file(open: MagicMock): open.return_value = StringIO(myfile) result = aws_files.read_aws_file('my/file/') assert result == { 'default': { 'region': 'us-east-1', 'mfa_serial': 'arn:aws:iam::123123123123:mfa/admin', }, }
1,912
529
/* crypto/asn1/a_bytes.c */ /* Copyright (C) 1995-1998 <NAME> (<EMAIL>) * All rights reserved. * * This package is an SSL implementation written * by <NAME> (<EMAIL>). * The implementation was written so as to conform with Netscapes SSL. * * This library is free for commercial and non-commercial use as long as * the following conditions are aheared to. The following conditions * apply to all code found in this distribution, be it the RC4, RSA, * lhash, DES, etc., code; not just the SSL code. The SSL documentation * included with this distribution is covered by the same copyright terms * except that the holder is <NAME> (<EMAIL>). * * Copyright remains <NAME>ung's, and as such any Copyright notices in * the code are not to be removed. * If this package is used in a product, Eric Young should be given attribution * as the author of the parts of the library used. * This can be in the form of a textual message at program startup or * in documentation (online or textual) provided with the package. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * "This product includes cryptographic software written by * <NAME> (<EMAIL>)" * The word 'cryptographic' can be left out if the rouines from the library * being used are not cryptographic related :-). * 4. If you include any Windows specific code (or a derivative thereof) from * the apps directory (application code) you must include an acknowledgement: * "This product includes software written by <NAME> (<EMAIL>)" * * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * The licence and distribution terms for any publically available version or * derivative of this code cannot be changed. i.e. this code cannot simply be * copied and put under another distribution licence * [including the GNU Public Licence.] */ #include "cryptlib.h" #include <openssl/asn1.h> #ifdef OPENSSL_SYS_WINDOWS #include <stdio.h> #endif static int asn1_collate_primitive(ASN1_STRING *a, ASN1_const_CTX *c); /* type is a 'bitmap' of acceptable string types. */ ASN1_STRING *d2i_ASN1_type_bytes(ASN1_STRING **a, const unsigned char **pp, long length, int type) { ASN1_STRING *ret=NULL; const unsigned char *p; unsigned char *s; long len; int inf,tag,xclass; int i=0; p= *pp; inf=ASN1_get_object(&p,&len,&tag,&xclass,length); if (inf & 0x80) goto err; if (tag >= 32) { i=ASN1_R_TAG_VALUE_TOO_HIGH; goto err; } if (!(ASN1_tag2bit(tag) & type)) { i=ASN1_R_WRONG_TYPE; goto err; } /* If a bit-string, exit early */ if (tag == V_ASN1_BIT_STRING) return(d2i_ASN1_BIT_STRING(a,pp,length)); if ((a == NULL) || ((*a) == NULL)) { if ((ret=ASN1_STRING_new()) == NULL) return(NULL); } else ret=(*a); if (len != 0) { s=(unsigned char *)OPENSSL_malloc((int)len+1); if (s == NULL) { i=ERR_R_MALLOC_FAILURE; goto err; } TINYCLR_SSL_MEMCPY(s,p,(int)len); s[len]='\0'; p+=len; } else s=NULL; if (ret->data != NULL) OPENSSL_free(ret->data); ret->length=(int)len; ret->data=s; ret->type=tag; if (a != NULL) (*a)=ret; *pp=p; return(ret); err: ASN1err(ASN1_F_D2I_ASN1_TYPE_BYTES,i); if ((ret != NULL) && ((a == NULL) || (*a != ret))) ASN1_STRING_free(ret); return(NULL); } int i2d_ASN1_bytes(ASN1_STRING *a, unsigned char **pp, int tag, int xclass) { int ret,r,constructed; unsigned char *p; if (a == NULL) return(0); if (tag == V_ASN1_BIT_STRING) return(i2d_ASN1_BIT_STRING(a,pp)); ret=a->length; r=ASN1_object_size(0,ret,tag); if (pp == NULL) return(r); p= *pp; if ((tag == V_ASN1_SEQUENCE) || (tag == V_ASN1_SET)) constructed=1; else constructed=0; ASN1_put_object(&p,constructed,ret,tag,xclass); TINYCLR_SSL_MEMCPY(p,a->data,a->length); p+=a->length; *pp= p; return(r); } ASN1_STRING *d2i_ASN1_bytes(ASN1_STRING **a, const unsigned char **pp, long length, int Ptag, int Pclass) { ASN1_STRING *ret=NULL; const unsigned char *p; unsigned char *s; long len; int inf,tag,xclass; int i=0; if ((a == NULL) || ((*a) == NULL)) { if ((ret=ASN1_STRING_new()) == NULL) return(NULL); } else ret=(*a); p= *pp; inf=ASN1_get_object(&p,&len,&tag,&xclass,length); if (inf & 0x80) { i=ASN1_R_BAD_OBJECT_HEADER; goto err; } if (tag != Ptag) { i=ASN1_R_WRONG_TAG; goto err; } if (inf & V_ASN1_CONSTRUCTED) { ASN1_const_CTX c; c.pp=pp; c.p=p; c.inf=inf; c.slen=len; c.tag=Ptag; c.xclass=Pclass; c.max=(length == 0)?0:(p+length); if (!asn1_collate_primitive(ret,&c)) goto err; else { p=c.p; } } else { if (len != 0) { if ((ret->length < len) || (ret->data == NULL)) { if (ret->data != NULL) OPENSSL_free(ret->data); s=(unsigned char *)OPENSSL_malloc((int)len + 1); if (s == NULL) { i=ERR_R_MALLOC_FAILURE; goto err; } } else s=ret->data; TINYCLR_SSL_MEMCPY(s,p,(int)len); s[len] = '\0'; p+=len; } else { s=NULL; if (ret->data != NULL) OPENSSL_free(ret->data); } ret->length=(int)len; ret->data=s; ret->type=Ptag; } if (a != NULL) (*a)=ret; *pp=p; return(ret); err: if ((ret != NULL) && ((a == NULL) || (*a != ret))) ASN1_STRING_free(ret); ASN1err(ASN1_F_D2I_ASN1_BYTES,i); return(NULL); } /* We are about to parse 0..n d2i_ASN1_bytes objects, we are to collapse * them into the one structure that is then returned */ /* There have been a few bug fixes for this function from * <NAME> <<EMAIL>>, many thanks to him */ static int asn1_collate_primitive(ASN1_STRING *a, ASN1_const_CTX *c) { ASN1_STRING *os=NULL; BUF_MEM b; int num; b.length=0; b.max=0; b.data=NULL; if (a == NULL) { c->error=ERR_R_PASSED_NULL_PARAMETER; goto err; } num=0; for (;;) { if (c->inf & 1) { c->eos=ASN1_const_check_infinite_end(&c->p, (long)(c->max-c->p)); if (c->eos) break; } else { if (c->slen <= 0) break; } c->q=c->p; if (d2i_ASN1_bytes(&os,&c->p,c->max-c->p,c->tag,c->xclass) == NULL) { c->error=ERR_R_ASN1_LIB; goto err; } if (!BUF_MEM_grow_clean(&b,num+os->length)) { c->error=ERR_R_BUF_LIB; goto err; } TINYCLR_SSL_MEMCPY(&(b.data[num]),os->data,os->length); if (!(c->inf & 1)) c->slen-=(c->p-c->q); num+=os->length; } if (!asn1_const_Finish(c)) goto err; a->length=num; if (a->data != NULL) OPENSSL_free(a->data); a->data=(unsigned char *)b.data; if (os != NULL) ASN1_STRING_free(os); return(1); err: ASN1err(ASN1_F_ASN1_COLLATE_PRIMITIVE,c->error); if (os != NULL) ASN1_STRING_free(os); if (b.data != NULL) OPENSSL_free(b.data); return(0); }
3,720
400
# type: ignore import cflearn from u2net_finetune import get_data from cflearn.misc.toolkit import check_is_ci from cflearn.misc.toolkit import download_model is_ci = check_is_ci() finetune_ckpt = "path/to/your/finetune/model" if is_ci: finetune_ckpt = download_model("u2net.lite") if __name__ == "__main__": data = get_data(is_ci) m = cflearn.api.u2net_lite_refine( finetune_ckpt, callback_names=["cascade_u2net", "mlflow"], debug=is_ci, ) m.fit(data, cuda=None if is_ci else 5)
242
852
import FWCore.ParameterSet.Config as cms from PhysicsTools.PatAlgos.producersHeavyIons.heavyIonJets_cff import * from PhysicsTools.PatAlgos.producersHeavyIons.heavyIonMuons_cff import * from PhysicsTools.PatAlgos.producersHeavyIons.heavyIonPhotons_cff import * from PhysicsTools.PatAlgos.producersHeavyIons.heavyIonProducer_cfi import * heavyIonPatCandidates = cms.Sequence( heavyIon + makeHeavyIonJets + makeHeavyIonPhotons + makeHeavyIonMuons )
174
1,444
package mage.cards.c; import java.util.UUID; import mage.abilities.Ability; import mage.abilities.common.SimpleActivatedAbility; import mage.abilities.costs.common.TapSourceCost; import mage.abilities.costs.mana.GenericManaCost; import mage.abilities.effects.keyword.ScryEffect; import mage.cards.CardImpl; import mage.cards.CardSetInfo; import mage.constants.CardType; import mage.constants.Zone; /** * * @author <EMAIL> */ public final class CrystalBall extends CardImpl { public CrystalBall(UUID ownerId, CardSetInfo setInfo) { super(ownerId,setInfo,new CardType[]{CardType.ARTIFACT},"{3}"); Ability ability = new SimpleActivatedAbility(Zone.BATTLEFIELD, new ScryEffect(2), new TapSourceCost()); ability.addManaCost(new GenericManaCost(1)); this.addAbility(ability); } private CrystalBall(final CrystalBall card) { super(card); } @Override public CrystalBall copy() { return new CrystalBall(this); } }
351
2,816
<gh_stars>1000+ // DuckDB // // duckdb/common/fast_mem.hpp // // //===----------------------------------------------------------------------===// #pragma once #include "duckdb/common/common.hpp" #include "duckdb/common/types.hpp" template <size_t SIZE> static inline void MemcpyFixed(void *dest, const void *src) { memcpy(dest, src, SIZE); } template <size_t SIZE> static inline int MemcmpFixed(const void *str1, const void *str2) { return memcmp(str1, str2, SIZE); } namespace duckdb { //! This templated memcpy is significantly faster than std::memcpy, //! but only when you are calling memcpy with a const size in a loop. //! For instance `while (<cond>) { memcpy(<dest>, <src>, const_size); ... }` static inline void FastMemcpy(void *dest, const void *src, const size_t size) { // LCOV_EXCL_START switch (size) { case 0: return; case 1: return MemcpyFixed<1>(dest, src); case 2: return MemcpyFixed<2>(dest, src); case 3: return MemcpyFixed<3>(dest, src); case 4: return MemcpyFixed<4>(dest, src); case 5: return MemcpyFixed<5>(dest, src); case 6: return MemcpyFixed<6>(dest, src); case 7: return MemcpyFixed<7>(dest, src); case 8: return MemcpyFixed<8>(dest, src); case 9: return MemcpyFixed<9>(dest, src); case 10: return MemcpyFixed<10>(dest, src); case 11: return MemcpyFixed<11>(dest, src); case 12: return MemcpyFixed<12>(dest, src); case 13: return MemcpyFixed<13>(dest, src); case 14: return MemcpyFixed<14>(dest, src); case 15: return MemcpyFixed<15>(dest, src); case 16: return MemcpyFixed<16>(dest, src); case 17: return MemcpyFixed<17>(dest, src); case 18: return MemcpyFixed<18>(dest, src); case 19: return MemcpyFixed<19>(dest, src); case 20: return MemcpyFixed<20>(dest, src); case 21: return MemcpyFixed<21>(dest, src); case 22: return MemcpyFixed<22>(dest, src); case 23: return MemcpyFixed<23>(dest, src); case 24: return MemcpyFixed<24>(dest, src); case 25: return MemcpyFixed<25>(dest, src); case 26: return MemcpyFixed<26>(dest, src); case 27: return MemcpyFixed<27>(dest, src); case 28: return MemcpyFixed<28>(dest, src); case 29: return MemcpyFixed<29>(dest, src); case 30: return MemcpyFixed<30>(dest, src); case 31: return MemcpyFixed<31>(dest, src); case 32: return MemcpyFixed<32>(dest, src); case 33: return MemcpyFixed<33>(dest, src); case 34: return MemcpyFixed<34>(dest, src); case 35: return MemcpyFixed<35>(dest, src); case 36: return MemcpyFixed<36>(dest, src); case 37: return MemcpyFixed<37>(dest, src); case 38: return MemcpyFixed<38>(dest, src); case 39: return MemcpyFixed<39>(dest, src); case 40: return MemcpyFixed<40>(dest, src); case 41: return MemcpyFixed<41>(dest, src); case 42: return MemcpyFixed<42>(dest, src); case 43: return MemcpyFixed<43>(dest, src); case 44: return MemcpyFixed<44>(dest, src); case 45: return MemcpyFixed<45>(dest, src); case 46: return MemcpyFixed<46>(dest, src); case 47: return MemcpyFixed<47>(dest, src); case 48: return MemcpyFixed<48>(dest, src); case 49: return MemcpyFixed<49>(dest, src); case 50: return MemcpyFixed<50>(dest, src); case 51: return MemcpyFixed<51>(dest, src); case 52: return MemcpyFixed<52>(dest, src); case 53: return MemcpyFixed<53>(dest, src); case 54: return MemcpyFixed<54>(dest, src); case 55: return MemcpyFixed<55>(dest, src); case 56: return MemcpyFixed<56>(dest, src); case 57: return MemcpyFixed<57>(dest, src); case 58: return MemcpyFixed<58>(dest, src); case 59: return MemcpyFixed<59>(dest, src); case 60: return MemcpyFixed<60>(dest, src); case 61: return MemcpyFixed<61>(dest, src); case 62: return MemcpyFixed<62>(dest, src); case 63: return MemcpyFixed<63>(dest, src); case 64: return MemcpyFixed<64>(dest, src); case 65: return MemcpyFixed<65>(dest, src); case 66: return MemcpyFixed<66>(dest, src); case 67: return MemcpyFixed<67>(dest, src); case 68: return MemcpyFixed<68>(dest, src); case 69: return MemcpyFixed<69>(dest, src); case 70: return MemcpyFixed<70>(dest, src); case 71: return MemcpyFixed<71>(dest, src); case 72: return MemcpyFixed<72>(dest, src); case 73: return MemcpyFixed<73>(dest, src); case 74: return MemcpyFixed<74>(dest, src); case 75: return MemcpyFixed<75>(dest, src); case 76: return MemcpyFixed<76>(dest, src); case 77: return MemcpyFixed<77>(dest, src); case 78: return MemcpyFixed<78>(dest, src); case 79: return MemcpyFixed<79>(dest, src); case 80: return MemcpyFixed<80>(dest, src); case 81: return MemcpyFixed<81>(dest, src); case 82: return MemcpyFixed<82>(dest, src); case 83: return MemcpyFixed<83>(dest, src); case 84: return MemcpyFixed<84>(dest, src); case 85: return MemcpyFixed<85>(dest, src); case 86: return MemcpyFixed<86>(dest, src); case 87: return MemcpyFixed<87>(dest, src); case 88: return MemcpyFixed<88>(dest, src); case 89: return MemcpyFixed<89>(dest, src); case 90: return MemcpyFixed<90>(dest, src); case 91: return MemcpyFixed<91>(dest, src); case 92: return MemcpyFixed<92>(dest, src); case 93: return MemcpyFixed<93>(dest, src); case 94: return MemcpyFixed<94>(dest, src); case 95: return MemcpyFixed<95>(dest, src); case 96: return MemcpyFixed<96>(dest, src); case 97: return MemcpyFixed<97>(dest, src); case 98: return MemcpyFixed<98>(dest, src); case 99: return MemcpyFixed<99>(dest, src); case 100: return MemcpyFixed<100>(dest, src); case 101: return MemcpyFixed<101>(dest, src); case 102: return MemcpyFixed<102>(dest, src); case 103: return MemcpyFixed<103>(dest, src); case 104: return MemcpyFixed<104>(dest, src); case 105: return MemcpyFixed<105>(dest, src); case 106: return MemcpyFixed<106>(dest, src); case 107: return MemcpyFixed<107>(dest, src); case 108: return MemcpyFixed<108>(dest, src); case 109: return MemcpyFixed<109>(dest, src); case 110: return MemcpyFixed<110>(dest, src); case 111: return MemcpyFixed<111>(dest, src); case 112: return MemcpyFixed<112>(dest, src); case 113: return MemcpyFixed<113>(dest, src); case 114: return MemcpyFixed<114>(dest, src); case 115: return MemcpyFixed<115>(dest, src); case 116: return MemcpyFixed<116>(dest, src); case 117: return MemcpyFixed<117>(dest, src); case 118: return MemcpyFixed<118>(dest, src); case 119: return MemcpyFixed<119>(dest, src); case 120: return MemcpyFixed<120>(dest, src); case 121: return MemcpyFixed<121>(dest, src); case 122: return MemcpyFixed<122>(dest, src); case 123: return MemcpyFixed<123>(dest, src); case 124: return MemcpyFixed<124>(dest, src); case 125: return MemcpyFixed<125>(dest, src); case 126: return MemcpyFixed<126>(dest, src); case 127: return MemcpyFixed<127>(dest, src); case 128: return MemcpyFixed<128>(dest, src); case 129: return MemcpyFixed<129>(dest, src); case 130: return MemcpyFixed<130>(dest, src); case 131: return MemcpyFixed<131>(dest, src); case 132: return MemcpyFixed<132>(dest, src); case 133: return MemcpyFixed<133>(dest, src); case 134: return MemcpyFixed<134>(dest, src); case 135: return MemcpyFixed<135>(dest, src); case 136: return MemcpyFixed<136>(dest, src); case 137: return MemcpyFixed<137>(dest, src); case 138: return MemcpyFixed<138>(dest, src); case 139: return MemcpyFixed<139>(dest, src); case 140: return MemcpyFixed<140>(dest, src); case 141: return MemcpyFixed<141>(dest, src); case 142: return MemcpyFixed<142>(dest, src); case 143: return MemcpyFixed<143>(dest, src); case 144: return MemcpyFixed<144>(dest, src); case 145: return MemcpyFixed<145>(dest, src); case 146: return MemcpyFixed<146>(dest, src); case 147: return MemcpyFixed<147>(dest, src); case 148: return MemcpyFixed<148>(dest, src); case 149: return MemcpyFixed<149>(dest, src); case 150: return MemcpyFixed<150>(dest, src); case 151: return MemcpyFixed<151>(dest, src); case 152: return MemcpyFixed<152>(dest, src); case 153: return MemcpyFixed<153>(dest, src); case 154: return MemcpyFixed<154>(dest, src); case 155: return MemcpyFixed<155>(dest, src); case 156: return MemcpyFixed<156>(dest, src); case 157: return MemcpyFixed<157>(dest, src); case 158: return MemcpyFixed<158>(dest, src); case 159: return MemcpyFixed<159>(dest, src); case 160: return MemcpyFixed<160>(dest, src); case 161: return MemcpyFixed<161>(dest, src); case 162: return MemcpyFixed<162>(dest, src); case 163: return MemcpyFixed<163>(dest, src); case 164: return MemcpyFixed<164>(dest, src); case 165: return MemcpyFixed<165>(dest, src); case 166: return MemcpyFixed<166>(dest, src); case 167: return MemcpyFixed<167>(dest, src); case 168: return MemcpyFixed<168>(dest, src); case 169: return MemcpyFixed<169>(dest, src); case 170: return MemcpyFixed<170>(dest, src); case 171: return MemcpyFixed<171>(dest, src); case 172: return MemcpyFixed<172>(dest, src); case 173: return MemcpyFixed<173>(dest, src); case 174: return MemcpyFixed<174>(dest, src); case 175: return MemcpyFixed<175>(dest, src); case 176: return MemcpyFixed<176>(dest, src); case 177: return MemcpyFixed<177>(dest, src); case 178: return MemcpyFixed<178>(dest, src); case 179: return MemcpyFixed<179>(dest, src); case 180: return MemcpyFixed<180>(dest, src); case 181: return MemcpyFixed<181>(dest, src); case 182: return MemcpyFixed<182>(dest, src); case 183: return MemcpyFixed<183>(dest, src); case 184: return MemcpyFixed<184>(dest, src); case 185: return MemcpyFixed<185>(dest, src); case 186: return MemcpyFixed<186>(dest, src); case 187: return MemcpyFixed<187>(dest, src); case 188: return MemcpyFixed<188>(dest, src); case 189: return MemcpyFixed<189>(dest, src); case 190: return MemcpyFixed<190>(dest, src); case 191: return MemcpyFixed<191>(dest, src); case 192: return MemcpyFixed<192>(dest, src); case 193: return MemcpyFixed<193>(dest, src); case 194: return MemcpyFixed<194>(dest, src); case 195: return MemcpyFixed<195>(dest, src); case 196: return MemcpyFixed<196>(dest, src); case 197: return MemcpyFixed<197>(dest, src); case 198: return MemcpyFixed<198>(dest, src); case 199: return MemcpyFixed<199>(dest, src); case 200: return MemcpyFixed<200>(dest, src); case 201: return MemcpyFixed<201>(dest, src); case 202: return MemcpyFixed<202>(dest, src); case 203: return MemcpyFixed<203>(dest, src); case 204: return MemcpyFixed<204>(dest, src); case 205: return MemcpyFixed<205>(dest, src); case 206: return MemcpyFixed<206>(dest, src); case 207: return MemcpyFixed<207>(dest, src); case 208: return MemcpyFixed<208>(dest, src); case 209: return MemcpyFixed<209>(dest, src); case 210: return MemcpyFixed<210>(dest, src); case 211: return MemcpyFixed<211>(dest, src); case 212: return MemcpyFixed<212>(dest, src); case 213: return MemcpyFixed<213>(dest, src); case 214: return MemcpyFixed<214>(dest, src); case 215: return MemcpyFixed<215>(dest, src); case 216: return MemcpyFixed<216>(dest, src); case 217: return MemcpyFixed<217>(dest, src); case 218: return MemcpyFixed<218>(dest, src); case 219: return MemcpyFixed<219>(dest, src); case 220: return MemcpyFixed<220>(dest, src); case 221: return MemcpyFixed<221>(dest, src); case 222: return MemcpyFixed<222>(dest, src); case 223: return MemcpyFixed<223>(dest, src); case 224: return MemcpyFixed<224>(dest, src); case 225: return MemcpyFixed<225>(dest, src); case 226: return MemcpyFixed<226>(dest, src); case 227: return MemcpyFixed<227>(dest, src); case 228: return MemcpyFixed<228>(dest, src); case 229: return MemcpyFixed<229>(dest, src); case 230: return MemcpyFixed<230>(dest, src); case 231: return MemcpyFixed<231>(dest, src); case 232: return MemcpyFixed<232>(dest, src); case 233: return MemcpyFixed<233>(dest, src); case 234: return MemcpyFixed<234>(dest, src); case 235: return MemcpyFixed<235>(dest, src); case 236: return MemcpyFixed<236>(dest, src); case 237: return MemcpyFixed<237>(dest, src); case 238: return MemcpyFixed<238>(dest, src); case 239: return MemcpyFixed<239>(dest, src); case 240: return MemcpyFixed<240>(dest, src); case 241: return MemcpyFixed<241>(dest, src); case 242: return MemcpyFixed<242>(dest, src); case 243: return MemcpyFixed<243>(dest, src); case 244: return MemcpyFixed<244>(dest, src); case 245: return MemcpyFixed<245>(dest, src); case 246: return MemcpyFixed<246>(dest, src); case 247: return MemcpyFixed<247>(dest, src); case 248: return MemcpyFixed<248>(dest, src); case 249: return MemcpyFixed<249>(dest, src); case 250: return MemcpyFixed<250>(dest, src); case 251: return MemcpyFixed<251>(dest, src); case 252: return MemcpyFixed<252>(dest, src); case 253: return MemcpyFixed<253>(dest, src); case 254: return MemcpyFixed<254>(dest, src); case 255: return MemcpyFixed<255>(dest, src); case 256: return MemcpyFixed<256>(dest, src); default: memcpy(dest, src, size); } // LCOV_EXCL_STOP } //! This templated memcmp is significantly faster than std::memcmp, //! but only when you are calling memcmp with a const size in a loop. //! For instance `while (<cond>) { memcmp(<str1>, <str2>, const_size); ... }` static inline int FastMemcmp(const void *str1, const void *str2, const size_t size) { // LCOV_EXCL_START switch (size) { case 0: return 0; case 1: return MemcmpFixed<1>(str1, str2); case 2: return MemcmpFixed<2>(str1, str2); case 3: return MemcmpFixed<3>(str1, str2); case 4: return MemcmpFixed<4>(str1, str2); case 5: return MemcmpFixed<5>(str1, str2); case 6: return MemcmpFixed<6>(str1, str2); case 7: return MemcmpFixed<7>(str1, str2); case 8: return MemcmpFixed<8>(str1, str2); case 9: return MemcmpFixed<9>(str1, str2); case 10: return MemcmpFixed<10>(str1, str2); case 11: return MemcmpFixed<11>(str1, str2); case 12: return MemcmpFixed<12>(str1, str2); case 13: return MemcmpFixed<13>(str1, str2); case 14: return MemcmpFixed<14>(str1, str2); case 15: return MemcmpFixed<15>(str1, str2); case 16: return MemcmpFixed<16>(str1, str2); case 17: return MemcmpFixed<17>(str1, str2); case 18: return MemcmpFixed<18>(str1, str2); case 19: return MemcmpFixed<19>(str1, str2); case 20: return MemcmpFixed<20>(str1, str2); case 21: return MemcmpFixed<21>(str1, str2); case 22: return MemcmpFixed<22>(str1, str2); case 23: return MemcmpFixed<23>(str1, str2); case 24: return MemcmpFixed<24>(str1, str2); case 25: return MemcmpFixed<25>(str1, str2); case 26: return MemcmpFixed<26>(str1, str2); case 27: return MemcmpFixed<27>(str1, str2); case 28: return MemcmpFixed<28>(str1, str2); case 29: return MemcmpFixed<29>(str1, str2); case 30: return MemcmpFixed<30>(str1, str2); case 31: return MemcmpFixed<31>(str1, str2); case 32: return MemcmpFixed<32>(str1, str2); case 33: return MemcmpFixed<33>(str1, str2); case 34: return MemcmpFixed<34>(str1, str2); case 35: return MemcmpFixed<35>(str1, str2); case 36: return MemcmpFixed<36>(str1, str2); case 37: return MemcmpFixed<37>(str1, str2); case 38: return MemcmpFixed<38>(str1, str2); case 39: return MemcmpFixed<39>(str1, str2); case 40: return MemcmpFixed<40>(str1, str2); case 41: return MemcmpFixed<41>(str1, str2); case 42: return MemcmpFixed<42>(str1, str2); case 43: return MemcmpFixed<43>(str1, str2); case 44: return MemcmpFixed<44>(str1, str2); case 45: return MemcmpFixed<45>(str1, str2); case 46: return MemcmpFixed<46>(str1, str2); case 47: return MemcmpFixed<47>(str1, str2); case 48: return MemcmpFixed<48>(str1, str2); case 49: return MemcmpFixed<49>(str1, str2); case 50: return MemcmpFixed<50>(str1, str2); case 51: return MemcmpFixed<51>(str1, str2); case 52: return MemcmpFixed<52>(str1, str2); case 53: return MemcmpFixed<53>(str1, str2); case 54: return MemcmpFixed<54>(str1, str2); case 55: return MemcmpFixed<55>(str1, str2); case 56: return MemcmpFixed<56>(str1, str2); case 57: return MemcmpFixed<57>(str1, str2); case 58: return MemcmpFixed<58>(str1, str2); case 59: return MemcmpFixed<59>(str1, str2); case 60: return MemcmpFixed<60>(str1, str2); case 61: return MemcmpFixed<61>(str1, str2); case 62: return MemcmpFixed<62>(str1, str2); case 63: return MemcmpFixed<63>(str1, str2); case 64: return MemcmpFixed<64>(str1, str2); default: return memcmp(str1, str2, size); } // LCOV_EXCL_STOP } } // namespace duckdb
6,987
1,602
<reponame>jhh67/chapel<filename>test/memory/shannon/getRandomPtr.h<gh_stars>1000+ #ifndef _FREED_WITHOUT_MALLOC_H_ #define _FREED_WITHOUT_MALLOC_H_ void* getRandomPtr(void); #endif
87
450
'd' class GameObject: 'd' # def isIntersecting(self, other): pass pass class SoAndSo(GameObject): 'd' def isIntersecting(self, other): return GameObject.isIntersecting(self, other)
89
722
<filename>asteroid/masknn/base.py<gh_stars>100-1000 import numpy as np import torch from .. import complex_nn def _none_sequential(*args): return torch.nn.Sequential(*[x for x in args if x is not None]) class BaseUNet(torch.nn.Module): """Base class for u-nets with skip connections between encoders and decoders. (For u-nets without skip connections, simply use a `nn.Sequential`.) Args: encoders (List[torch.nn.Module] of length `N`): List of encoders decoders (List[torch.nn.Module] of length `N - 1`): List of decoders output_layer (Optional[torch.nn.Module], optional): Layer after last decoder. """ def __init__( self, encoders, decoders, *, output_layer=None, ): assert len(encoders) == len(decoders) + 1 super().__init__() self.encoders = torch.nn.ModuleList(encoders) self.decoders = torch.nn.ModuleList(decoders) self.output_layer = output_layer or torch.nn.Identity() def forward(self, x): enc_outs = [] for idx, enc in enumerate(self.encoders): x = enc(x) enc_outs.append(x) for idx, (enc_out, dec) in enumerate(zip(reversed(enc_outs[:-1]), self.decoders)): x = dec(x) x = torch.cat([x, enc_out], dim=1) return self.output_layer(x) class BaseDCUMaskNet(BaseUNet): """Base class for DCU-style mask nets. Used for DCUMaskNet and DCCRMaskNet. The preferred way to instantiate this class is to use the ``default_architecture()`` classmethod. Args: encoders (List[torch.nn.Module]): List of encoders decoders (List[torch.nn.Module]): List of decoders output_layer (Optional[torch.nn.Module], optional): Layer after last decoder, before mask application. mask_bound (Optional[str], optional): Type of mask bound to use, as defined in [1]. Valid values are "tanh" ("BDT mask"), "sigmoid" ("BDSS mask"), None (unbounded mask). References - [1] : "Phase-aware Speech Enhancement with Deep Complex U-Net", <NAME> et al. https://arxiv.org/abs/1903.03107 """ _architectures = NotImplemented @classmethod def default_architecture(cls, architecture: str, n_src=1, **kwargs): """Create a masknet instance from a predefined, named architecture. Args: architecture (str): Name of predefined architecture. Valid values are dependent on the concrete subclass of ``BaseDCUMaskNet``. n_src (int, optional): Number of sources kwargs (optional): Passed to ``__init__``. """ encoders, decoders = cls._architectures[architecture] # Fix n_src in last decoder in_chan, _ignored_out_chan, *rest = decoders[-1] decoders = (*decoders[:-1], (in_chan, n_src, *rest)) return cls(encoders, decoders, **kwargs) def __init__(self, encoders, decoders, output_layer=None, mask_bound="tanh", **kwargs): self.mask_bound = mask_bound super().__init__( encoders=encoders, decoders=decoders, output_layer=_none_sequential( output_layer, complex_nn.BoundComplexMask(mask_bound), ), **kwargs, ) def forward(self, x): fixed_x = self.fix_input_dims(x) out = super().forward(fixed_x.unsqueeze(1)) out = self.fix_output_dims(out, x) return out def fix_input_dims(self, x): """Overwrite this in subclasses to implement input dimension checks.""" return x def fix_output_dims(self, y, x): """Overwrite this in subclasses to implement output dimension checks. y is the output and x was the input (passed to use the shape).""" return y
1,699
575
// Copyright 2015 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "content/browser/background_sync/background_sync_metrics.h" #include "base/metrics/histogram_functions.h" namespace { content::BackgroundSyncMetrics::ResultPattern EventResultToResultPattern( bool success, bool finished_in_foreground) { if (success) { return finished_in_foreground ? content::BackgroundSyncMetrics:: RESULT_PATTERN_SUCCESS_FOREGROUND : content::BackgroundSyncMetrics:: RESULT_PATTERN_SUCCESS_BACKGROUND; } return finished_in_foreground ? content::BackgroundSyncMetrics::RESULT_PATTERN_FAILED_FOREGROUND : content::BackgroundSyncMetrics::RESULT_PATTERN_FAILED_BACKGROUND; } const std::string GetBackgroundSyncSuffix( blink::mojom::BackgroundSyncType sync_type) { if (sync_type == blink::mojom::BackgroundSyncType::ONE_SHOT) return "OneShot"; else return "Periodic"; } const std::string GetBackgroundSyncPrefix( blink::mojom::BackgroundSyncType sync_type) { if (sync_type == blink::mojom::BackgroundSyncType::ONE_SHOT) return ""; else return "Periodic"; } } // namespace namespace content { // static void BackgroundSyncMetrics::RecordEventStarted( blink::mojom::BackgroundSyncType sync_type, bool started_in_foreground) { base::UmaHistogramBoolean("BackgroundSync.Event." + GetBackgroundSyncSuffix(sync_type) + "StartedInForeground", started_in_foreground); } // static void BackgroundSyncMetrics::RecordRegistrationComplete( bool event_succeeded, int num_attempts_required) { base::UmaHistogramBoolean( "BackgroundSync.Registration.OneShot.EventSucceededAtCompletion", event_succeeded); if (!event_succeeded) return; base::UmaHistogramExactLinear( "BackgroundSync.Registration.OneShot.NumAttemptsForSuccessfulEvent", num_attempts_required, 50); } // static void BackgroundSyncMetrics::RecordEventResult( blink::mojom::BackgroundSyncType sync_type, bool success, bool finished_in_foreground) { base::UmaHistogramEnumeration( "BackgroundSync.Event." + GetBackgroundSyncSuffix(sync_type) + "ResultPattern", EventResultToResultPattern(success, finished_in_foreground), static_cast<ResultPattern>(RESULT_PATTERN_MAX + 1)); } // static void BackgroundSyncMetrics::RecordBatchSyncEventComplete( blink::mojom::BackgroundSyncType sync_type, const base::TimeDelta& time, bool from_wakeup_task, int number_of_batched_sync_events) { // The total batch handling time should be under 5 minutes; we'll record up to // 6 minutes, to be safe. base::UmaHistogramCustomTimes( GetBackgroundSyncPrefix(sync_type) + "BackgroundSync.Event.Time", time, /* min= */ base::TimeDelta::FromMilliseconds(10), /* max= */ base::TimeDelta::FromMinutes(6), /* buckets= */ 50); base::UmaHistogramCounts100( GetBackgroundSyncPrefix(sync_type) + "BackgroundSync.Event.BatchSize", number_of_batched_sync_events); base::UmaHistogramBoolean(GetBackgroundSyncPrefix(sync_type) + "BackgroundSync.Event.FromWakeupTask", from_wakeup_task); } // static void BackgroundSyncMetrics::CountRegisterSuccess( blink::mojom::BackgroundSyncType sync_type, int64_t min_interval_ms, RegistrationCouldFire registration_could_fire, RegistrationIsDuplicate registration_is_duplicate) { base::UmaHistogramEnumeration( "BackgroundSync.Registration." + GetBackgroundSyncSuffix(sync_type), BACKGROUND_SYNC_STATUS_OK, static_cast<BackgroundSyncStatus>(BACKGROUND_SYNC_STATUS_MAX + 1)); if (sync_type == blink::mojom::BackgroundSyncType::ONE_SHOT) { base::UmaHistogramBoolean( "BackgroundSync.Registration.OneShot.CouldFire", registration_could_fire == REGISTRATION_COULD_FIRE); } else { DCHECK_GE(min_interval_ms, 0); base::UmaHistogramCounts10M( "BackgroundSync.Registration.Periodic.MinInterval", min_interval_ms / 1000); } base::UmaHistogramBoolean( "BackgroundSync.Registration." + GetBackgroundSyncSuffix(sync_type) + ".IsDuplicate", registration_is_duplicate == REGISTRATION_IS_DUPLICATE); } // static void BackgroundSyncMetrics::CountRegisterFailure( blink::mojom::BackgroundSyncType sync_type, BackgroundSyncStatus result) { base::UmaHistogramEnumeration( std::string("BackgroundSync.Registration.") + GetBackgroundSyncSuffix(sync_type), result, static_cast<BackgroundSyncStatus>(BACKGROUND_SYNC_STATUS_MAX + 1)); } // static void BackgroundSyncMetrics::CountUnregisterPeriodicSync( BackgroundSyncStatus status) { base::UmaHistogramEnumeration( "BackgroundSync.Unregistration.Periodic", status, static_cast<BackgroundSyncStatus>(BACKGROUND_SYNC_STATUS_MAX + 1)); } // static void BackgroundSyncMetrics::RecordEventsFiredFromWakeupTask( blink::mojom::BackgroundSyncType sync_type, bool fired_events) { base::UmaHistogramBoolean("BackgroundSync.WakeupTaskFiredEvents." + GetBackgroundSyncSuffix(sync_type), fired_events); } } // namespace content
2,167
776
package act.handler.builtin.controller; /*- * #%L * ACT Framework * %% * Copyright (C) 2014 - 2017 ActFramework * %% * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * #L% */ import act.app.ActionContext; import act.controller.CacheSupportMetaInfo; import act.security.CORS; import act.security.CSRF; import act.util.MissingAuthenticationHandler; import org.osgl.$; import org.osgl.mvc.result.Result; /** * Dispatch request to real controller action method */ public class ControllerAction extends ActionHandler<ControllerAction> { private ActionHandlerInvoker handlerInvoker; public ControllerAction(ActionHandlerInvoker invoker) { super(-1); this.handlerInvoker = invoker; } public CacheSupportMetaInfo cacheSupport() { return handlerInvoker.cacheSupport(); } @Override public Result handle(ActionContext actionContext) throws Exception { return handlerInvoker.handle(actionContext); } @Override public int hashCode() { return $.hc(handlerInvoker, getClass()); } @Override public boolean equals(Object obj) { if (obj == this) { return true; } if (obj instanceof ControllerAction) { return $.eq(((ControllerAction) obj).handlerInvoker, handlerInvoker); } return false; } @Override public String toString() { return handlerInvoker.toString(); } @Override public CORS.Spec corsSpec() { return handlerInvoker.corsSpec(); } public CSRF.Spec csrfSpec() { return handlerInvoker.csrfSpec(); } public String contentSecurityPolicy() { return handlerInvoker.contentSecurityPolicy(); } public boolean disableContentSecurityPolicy() { return handlerInvoker.disableContentSecurityPolicy(); } @Override public boolean sessionFree() { return handlerInvoker.sessionFree(); } public MissingAuthenticationHandler missingAuthenticationHandler() { return handlerInvoker.missingAuthenticationHandler(); } public MissingAuthenticationHandler csrfFailureHandler() { return handlerInvoker.csrfFailureHandler(); } @Override public boolean express() { return handlerInvoker.express(); } @Override public boolean skipEvents() { return handlerInvoker.skipEvents(); } @Override public void accept(Visitor visitor) { handlerInvoker.accept(visitor.invokerVisitor()); } public ActionHandlerInvoker invoker() { return handlerInvoker; } @Override protected void releaseResources() { handlerInvoker.destroy(); handlerInvoker = null; } @Override public int order() { return handlerInvoker.order(); } }
1,138
917
<filename>bitcoin-datastruct/src/main/java/com/itranswarp/bitcoin/struct/InvVect.java package com.itranswarp.bitcoin.struct; import java.io.IOException; import com.fasterxml.jackson.databind.annotation.JsonSerialize; import com.itranswarp.bitcoin.io.BitcoinInput; import com.itranswarp.bitcoin.io.BitcoinOutput; import com.itranswarp.bitcoin.serializer.HashSerializer; public class InvVect { /** * Any data of with this number may be ignored. */ public static final int ERROR = 0; /** * Hash is related to a transaction. */ public static final int MSG_TX = 1; /** * Hash is related to a data block. */ public static final int MSG_BLOCK = 2; /** * Hash of a block header; identical to MSG_BLOCK. Only to be used in * getdata message. Indicates the reply should be a merkleblock message * rather than a block message; this only works if a bloom filter has been * set. */ public static final int MSG_FILTERED_BLOCK = 3; /** * Hash of a block header; identical to MSG_BLOCK. Only to be used in * getdata message. Indicates the reply should be a cmpctblock message. See * BIP 152 for more info. */ public static final int MSG_CMPCT_BLOCK = 4; /** * uint32 */ public int type; /** * 32-bytes hash */ @JsonSerialize(using = HashSerializer.class) public byte[] hash; public InvVect() { } public InvVect(BitcoinInput input) throws IOException { this.type = input.readInt(); this.hash = input.readBytes(32); } public byte[] toByteArray() { return new BitcoinOutput().writeInt(this.type).write(this.hash).toByteArray(); } }
533
325
/* SPDX-License-Identifier: BSD-3-Clause * * Copyright(c) 2021 AMD.All rights reserved. * * Author: <NAME> <<EMAIL>> * <NAME> <<EMAIL>> * <NAME> <<EMAIL>> */ #if !defined(_RN_REG_HEADER) #define _RN_REG_HEADER typedef union acp_dma_cntl_0 { struct { unsigned int dmachrst:1; unsigned int dmachrun:1; unsigned int dmachiocen:1; unsigned int :29; } bits; unsigned int u32all; } acp_dma_cntl_0_t; typedef union acp_dma_ch_sts { struct { unsigned int dmachrunsts:8; unsigned int :24; } bits; unsigned int u32all; } acp_dma_ch_sts_t; typedef union acp_external_intr_enb { struct { unsigned int acpextintrenb:1; unsigned int :31; } bits; unsigned int u32all; } acp_external_intr_enb_t; typedef union acp_dsp0_intr_cntl { struct { unsigned int dmaiocmask:8; unsigned int :8; unsigned int wov_dma_intr_mask:1; unsigned int :6; unsigned int audio_buffer_int_mask:6; unsigned int :3; } bits; unsigned int u32all; } acp_dsp0_intr_cntl_t; typedef union acp_dsp0_intr_stat { struct { unsigned int dmaiocstat:8; unsigned int :8; unsigned int wov_dma_stat:1; unsigned int :6; unsigned int audio_buffer_int_stat:6; unsigned int :3; } bits; unsigned int u32all; } acp_dsp0_intr_stat_t; typedef union acp_dsp_sw_intr_cntl { struct { unsigned int :2; unsigned int dsp0_to_host_intr_mask:1; unsigned int :29; } bits; unsigned int u32all; } acp_dsp_sw_intr_cntl_t; typedef union acp_dsp_sw_intr_stat { struct { unsigned int host_to_dsp0_intr1_stat:1; unsigned int host_to_dsp0_intr2_stat:1; unsigned int dsp0_to_host_intr_stat:1; unsigned int host_to_dsp0_intr3_stat:1; unsigned int :28; } bits; unsigned int u32all; } acp_dsp_sw_intr_stat_t; typedef union acp_sw_intr_trig { struct { unsigned int trig_host_to_dsp0_intr1:1; unsigned int :1; unsigned int trig_dsp0_to_host_intr:1; unsigned int :29; } bits; unsigned int u32all; } acp_sw_intr_trig_t; typedef union dsp_interrupt_routing_ctrl { struct { unsigned int dma_intr_level:2; unsigned int :14; unsigned int az_sw_i2s_intr_level:2; unsigned int :2; unsigned int host_to_dsp_intr1_level:2; unsigned int :8; unsigned int wov_intr_level:2; } bits; unsigned int u32all; } dsp_interrupt_routing_ctrl_t; typedef union acp_i2s_rx_ringbufaddr { struct { unsigned int i2s_rx_ringbufaddr:27; unsigned int :5; } bits; unsigned int u32all; } acp_i2s_rx_ringbufaddr_t; typedef union acp_i2s_rx_ringbufsize { struct { unsigned int i2s_rx_ringbufsize:26; unsigned int :6; } bits; unsigned int u32all; } acp_i2s_rx_ringbufsize_t; typedef union acp_i2s_rx_linkpositioncntr { struct { unsigned int i2s_rx_linkpositioncntr:26; unsigned int :6; } bits; unsigned int u32all; } acp_i2s_rx_linkpositioncntr_t; typedef union acp_i2s_rx_fifoaddr { struct { unsigned int i2s_rx_fifoaddr:27; unsigned int :5; } bits; unsigned int u32all; } acp_i2s_rx_fifoaddr_t; typedef union acp_i2s_rx_fifosize { struct { unsigned int i2s_rx_fifosize:13; unsigned int :19; } bits; unsigned int u32all; } acp_i2s_rx_fifosize_t; typedef union acp_i2s_rx_dma_size { struct { unsigned int i2s_rx_dma_size:13; unsigned int :19; } bits; unsigned int u32all; } acp_i2s_rx_dma_size_t; typedef union acp_i2s_rx_linearpositioncntr_high { struct { unsigned int i2s_rx_linearpositioncntr_high:32; } bits; unsigned int u32all; } acp_i2s_rx_linearpositioncntr_high_t; typedef union acp_i2s_rx_linearpositioncntr_low { struct { unsigned int i2s_rx_linearpositioncntr_low:32; } bits; unsigned int u32all; } acp_i2s_rx_linearpositioncntr_low_t; typedef union acp_i2s_rx_watermark_size { struct { unsigned int i2s_rx_intr_watermark_size:26; unsigned int :6; } bits; unsigned int u32all; } acp_i2s_rx_intr_watermark_size_t; typedef union acp_i2s_tx_ringbufaddr { struct { unsigned int i2s_tx_ringbufaddr:27; unsigned int :5; } bits; unsigned int u32all; } acp_i2s_tx_ringbufaddr_t; typedef union acp_i2s_tx_ringbufsize { struct { unsigned int i2s_tx_ringbufsize:26; unsigned int :6; } bits; unsigned int u32all; } acp_i2s_tx_ringbufsize_t; typedef union acp_i2s_tx_linkpositioncntr { struct { unsigned int i2s_tx_linkpositioncntr:26; unsigned int :6; } bits; unsigned int u32all; } acp_i2s_tx_linkpositioncntr_t; typedef union acp_i2s_tx_fifoaddr { struct { unsigned int i2s_tx_fifoaddr:27; unsigned int :5; } bits; unsigned int u32all; } acp_i2s_tx_fifoaddr_t; typedef union acp_i2s_tx_fifosize { struct { unsigned int i2s_tx_fifosize:13; unsigned int :19; } bits; unsigned int u32all; } acp_i2s_tx_fifosize_t; typedef union acp_i2s_tx_dma_size { struct { unsigned int i2s_tx_dma_size:13; unsigned int :19; } bits; unsigned int u32all; } acp_i2s_tx_dma_size_t; typedef union acp_i2s_tx_linearpositioncntr_high { struct { unsigned int i2s_tx_linearpositioncntr_high:32; } bits; unsigned int u32all; } acp_i2s_tx_linearpositioncntr_hight_t; typedef union acp_i2s_tx_linearpositioncntr_low { struct { unsigned int i2s_tx_linearpositioncntr_low:32; } bits; unsigned int u32all; } acp_i2s_tx_linearpositioncntr_low_t; typedef union acp_i2s_tx_intr_watermark_size { struct { unsigned int i2s_tx_intr_watermark_size:26; unsigned int :6; } bits; unsigned int u32all; } acp_i2s_tx_intr_watermark_size_t; typedef union acp_bt_rx_ringbufaddr { struct { unsigned int bt_rx_ringbufaddr:27; unsigned int :5; } bits; unsigned int u32all; } acp_bt_rx_ringbufaddr_t; typedef union acp_bt_rx_ringbufsize { struct { unsigned int bt_rx_ringbufsize:26; unsigned int :6; } bits; unsigned int u32all; } acp_bt_rx_ringbufsize_t; typedef union acp_bt_rx_linkpositioncntr { struct { unsigned int bt_rx_linkpositioncntr:26; unsigned int :6; } bits; unsigned int u32all; } acp_bt_rx_linkpositioncntr_t; typedef union acp_bt_rx_fifoaddr { struct { unsigned int bt_rx_fifoaddr:27; unsigned int :5; } bits; unsigned int u32all; } acp_bt_rx_fifoaddr_t; typedef union acp_bt_rx_fifosize { struct { unsigned int bt_rx_fifosize:13; unsigned int :19; } bits; unsigned int u32all; } acp_bt_rx_fifosize_t; typedef union acp_bt_rx_dma_size { struct { unsigned int bt_rx_dma_size:13; unsigned int :19; } bits; unsigned int u32all; } acp_bt_rx_dma_size_t; typedef union acp_bt_rx_linearpositioncntr_high { struct { unsigned int bt_rx_linearpositioncntr_high:32; } bits; unsigned int u32all; } acp_bt_rx_linearpositioncntr_high_t; typedef union acp_bt_rx_linearpositioncntr_low { struct { unsigned int bt_rx_linearpositioncntr_low:32; } bits; unsigned int u32all; } acp_bt_rx_linearpositioncntr_low_t; typedef union acp_bt_rx_intr_watermark_size { struct { unsigned int bt_rx_intr_watermark_size:26; unsigned int :6; } bits; unsigned int u32all; } acp_bt_rx_intr_watermark_size_t; typedef union acp_bt_tx_ringbufaddr { struct { unsigned int bt_tx_ringbufaddr:27; unsigned int :5; } bits; unsigned int u32all; } acp_bt_tx_ringbufaddr_t; typedef union acp_bt_tx_ringbufsize { struct { unsigned int bt_tx_ringbufsize:26; unsigned int :6; } bits; unsigned int u32all; } acp_bt_tx_ringbufsize_t; typedef union acp_bt_tx_linkpositiontcntr { struct { unsigned int bt_tx_linkpositioncntr:26; unsigned int :6; } bits; unsigned int u32all; } acp_bt_tx_linkpositiontcntr_t; typedef union acp_bt_tx_fifoaddr { struct { unsigned int bt_tx_fifoaddr:27; unsigned int :5; } bits; unsigned int u32all; } acp_bt_tx_fifoaddr_t; typedef union acp_bt_tx_fifosize { struct { unsigned int bt_tx_fifosize:13; unsigned int :19; } bits; unsigned int u32all; } acp_bt_tx_fifosize_t; typedef union acp_bt_tx_dmasize { struct { unsigned int bt_tx_dma_size:13; unsigned int :19; } bits; unsigned int u32all; } acp_bt_tx_dmasize_t; typedef union acp_bt_tx_linearpositioncntr_high { struct { unsigned int bt_tx_linearpositioncntr_high:32; } bits; unsigned int u32all; } acp_bt_tx_linearpositioncntr_high_t; typedef union acp_bt_tx_linearpositioncntr_low { struct { unsigned int bt_tx_linearpositioncntr_low:32; } bits; unsigned int u32all; } acp_bt_tx_linearpositioncntr_low_t; typedef union acp_bt_tx_intr_watermark_size { struct { unsigned int bt_tx_intr_watermark_size:26; unsigned int :6; } bits; unsigned int u32all; } acp_bt_tx_intr_watermark_size_t; typedef union acp_i2stdm_ier { struct { unsigned int i2stdm_ien:1; unsigned int :31; } bits; unsigned int u32all; } acp_i2stdm_ier_t; typedef union acp_i2stdm_irer { struct { unsigned int i2stdm_rx_en:1; unsigned int i2stdm_rx_protocol_mode:1; unsigned int i2stdm_rx_data_path_mode:1; unsigned int i2stdm_rx_samplen:3; unsigned int i2stdm_rx_status:1; unsigned int :25; } bits; unsigned int u32all; } acp_i2stdm_irer_t; typedef union acp_i2stdm_iter { struct { unsigned int i2stdm_txen:1; unsigned int i2stdm_tx_protocol_mode:1; unsigned int i2stdm_tx_data_path_mode:1; unsigned int i2stdm_tx_samp_len:3; unsigned int i2stdm_tx_status:1; unsigned int :25; } bits; unsigned int u32all; } acp_i2stdm_iter_t; typedef union acp_bttdm_ier { struct { unsigned int bttdm_ien:1; unsigned int :31; } bits; unsigned int u32all; } acp_bttdm_ier_t; typedef union acp_bttdm_irer { struct { unsigned int bttdm_rx_en:1; unsigned int bttdm_rx_protocol_mode:1; unsigned int bttdm_rx_data_path_mode:1; unsigned int bttdm_rx_samplen:3; unsigned int bttdm_rx_status:1; unsigned int :25; } bits; unsigned int u32all; } acp_bttdm_irer_t; typedef union acp_bttdm_iter { struct { unsigned int bttdm_txen:1; unsigned int bttdm_tx_protocol_mode:1; unsigned int bttdm_tx_data_path_mode:1; unsigned int bttdm_tx_samp_len:3; unsigned int bttdm_tx_status:1; unsigned int :25; } bits; unsigned int u32all; } acp_bttdm_iter_t; typedef union acp_wov_pdm_dma_enable { struct { unsigned int pdm_dma_en:1; unsigned int pdm_dma_en_status:1; unsigned int :30; } bits; unsigned int u32all; } acp_wov_pdm_dma_enable_t; typedef union acp_wov_rx_ringbufaddr { struct { unsigned int rx_ringbufaddr:27; unsigned int :5; } bits; unsigned int u32all; } acp_wov_rx_ringbufaddr_t; typedef union acp_wov_rx_ringbufsize { struct { unsigned int rx_ringbufsize:26; unsigned int :6; } bits; unsigned int u32all; } acp_wov_rx_ringbufsize_t; typedef union acp_wov_rx_intr_watermark_size { struct { unsigned int rx_intr_watermark_size:26; unsigned int :6; } bits; unsigned int u32all; } acp_wov_rx_intr_watermark_size_t; typedef union acp_wov_pdm_no_of_channels { struct { unsigned int pdm_no_of_channels:2; unsigned int :30; } bits; unsigned int u32all; } acp_wov_pdm_no_of_channels_t; typedef union acp_wov_pdm_decimation_factor { struct { unsigned int pdm_decimation_factor:2; unsigned int :30; } bits; unsigned int u32all; } acp_wov_pdm_decimation_factor_t; typedef union acp_wov_misc_ctrl { struct { unsigned int :3; unsigned int pcm_data_shift_ctrl:2; unsigned int :27; } bits; unsigned int u32all; } acp_wov_misc_ctrl_t; typedef union acp_wov_clk_ctrl { struct { unsigned int brm_clk_ctrl:4; unsigned int pdm_vad_clkdiv:2; unsigned int :26; } bits; unsigned int u32all; } acp_wov_clk_ctrl_t; typedef union acp_srbm_cycle_sts { struct { unsigned int srbm_clients_sts:1; unsigned int :7; } bits; unsigned int u32all; } acp_srbm_cycle_sts_t; #endif
5,377
402
<filename>allennlp_models/vision/dataset_readers/__init__.py from allennlp_models.vision.dataset_readers.vision_reader import VisionReader from allennlp_models.vision.dataset_readers.gqa import GQAReader from allennlp_models.vision.dataset_readers.nlvr2 import Nlvr2Reader from allennlp_models.vision.dataset_readers.vgqa import VGQAReader from allennlp_models.vision.dataset_readers.vqav2 import VQAv2Reader from allennlp_models.vision.dataset_readers.visual_entailment import VisualEntailmentReader from allennlp_models.vision.dataset_readers.flickr30k import Flickr30kReader
205
2,706
<gh_stars>1000+ /* Copyright (c) 2013-2016 <NAME> * * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ #include <mgba/core/rewind.h> #include <mgba/core/core.h> #include <mgba/core/serialize.h> #include <mgba-util/patch/fast.h> #include <mgba-util/vfs.h> DEFINE_VECTOR(mCoreRewindPatches, struct PatchFast); void _rewindDiff(struct mCoreRewindContext* context); #ifndef DISABLE_THREADING THREAD_ENTRY _rewindThread(void* context); #endif void mCoreRewindContextInit(struct mCoreRewindContext* context, size_t entries, bool onThread) { if (context->currentState) { return; } mCoreRewindPatchesInit(&context->patchMemory, entries); size_t e; for (e = 0; e < entries; ++e) { initPatchFast(mCoreRewindPatchesAppend(&context->patchMemory)); } context->previousState = VFileMemChunk(0, 0); context->currentState = VFileMemChunk(0, 0); context->size = 0; context->stateFlags = SAVESTATE_SAVEDATA; #ifndef DISABLE_THREADING context->onThread = onThread; context->ready = false; if (onThread) { MutexInit(&context->mutex); ConditionInit(&context->cond); ThreadCreate(&context->thread, _rewindThread, context); } #else UNUSED(onThread); #endif } void mCoreRewindContextDeinit(struct mCoreRewindContext* context) { if (!context->currentState) { return; } #ifndef DISABLE_THREADING if (context->onThread) { MutexLock(&context->mutex); context->onThread = false; MutexUnlock(&context->mutex); ConditionWake(&context->cond); ThreadJoin(context->thread); MutexDeinit(&context->mutex); ConditionDeinit(&context->cond); } #endif context->previousState->close(context->previousState); context->currentState->close(context->currentState); context->previousState = NULL; context->currentState = NULL; size_t s; for (s = 0; s < mCoreRewindPatchesSize(&context->patchMemory); ++s) { deinitPatchFast(mCoreRewindPatchesGetPointer(&context->patchMemory, s)); } mCoreRewindPatchesDeinit(&context->patchMemory); } void mCoreRewindAppend(struct mCoreRewindContext* context, struct mCore* core) { #ifndef DISABLE_THREADING if (context->onThread) { MutexLock(&context->mutex); } #endif struct VFile* nextState = context->previousState; mCoreSaveStateNamed(core, nextState, context->stateFlags); context->previousState = context->currentState; context->currentState = nextState; #ifndef DISABLE_THREADING if (context->onThread) { context->ready = true; ConditionWake(&context->cond); MutexUnlock(&context->mutex); return; } #endif _rewindDiff(context); } void _rewindDiff(struct mCoreRewindContext* context) { ++context->current; if (context->size < mCoreRewindPatchesSize(&context->patchMemory)) { ++context->size; } if (context->current >= mCoreRewindPatchesSize(&context->patchMemory)) { context->current = 0; } struct PatchFast* patch = mCoreRewindPatchesGetPointer(&context->patchMemory, context->current); size_t size2 = context->currentState->size(context->currentState); size_t size = context->previousState->size(context->previousState); if (size2 > size) { context->previousState->truncate(context->previousState, size2); size = size2; } else if (size > size2) { context->currentState->truncate(context->currentState, size); } void* current = context->previousState->map(context->previousState, size, MAP_READ); void* next = context->currentState->map(context->currentState, size, MAP_READ); diffPatchFast(patch, current, next, size); context->previousState->unmap(context->previousState, current, size); context->currentState->unmap(context->currentState, next, size); } bool mCoreRewindRestore(struct mCoreRewindContext* context, struct mCore* core) { #ifndef DISABLE_THREADING if (context->onThread) { MutexLock(&context->mutex); } #endif if (!context->size) { #ifndef DISABLE_THREADING if (context->onThread) { MutexUnlock(&context->mutex); } #endif return false; } --context->size; mCoreLoadStateNamed(core, context->previousState, context->stateFlags); if (context->current == 0) { context->current = mCoreRewindPatchesSize(&context->patchMemory); } --context->current; struct PatchFast* patch = mCoreRewindPatchesGetPointer(&context->patchMemory, context->current); size_t size2 = context->previousState->size(context->previousState); size_t size = context->currentState->size(context->currentState); if (size2 < size) { size = size2; } void* current = context->currentState->map(context->currentState, size, MAP_READ); void* previous = context->previousState->map(context->previousState, size, MAP_WRITE); patch->d.applyPatch(&patch->d, previous, size, current, size); context->currentState->unmap(context->currentState, current, size); context->previousState->unmap(context->previousState, previous, size); struct VFile* nextState = context->previousState; context->previousState = context->currentState; context->currentState = nextState; #ifndef DISABLE_THREADING if (context->onThread) { MutexUnlock(&context->mutex); } #endif return true; } #ifndef DISABLE_THREADING THREAD_ENTRY _rewindThread(void* context) { struct mCoreRewindContext* rewindContext = context; ThreadSetName("Rewind Diff Thread"); MutexLock(&rewindContext->mutex); while (rewindContext->onThread) { while (!rewindContext->ready && rewindContext->onThread) { ConditionWait(&rewindContext->cond, &rewindContext->mutex); } if (rewindContext->ready) { _rewindDiff(rewindContext); } rewindContext->ready = false; } MutexUnlock(&rewindContext->mutex); return 0; } #endif
1,973
1,601
void suppressDuplicatedByAllAndASingle() { // codechecker_suppress [all] Suppress all bug in the next C/C++ statement // codechecker_suppress [bugprone-sizeof-expression] Already suppressed by the previous "[all]" comment above sizeof(46); }
74
459
<filename>engine/assets/code/renderers/shadow_map_renderer.cpp #include <GL/glew.h> #include <GLFW/glfw3.h> #include "shadow_map_renderer.h" #include "../scene/light.h" #include "../programs/depth_program.h" #include "../programs/blur_program.h" #include "../../../scene/scene.h" #include "../../../scene/texture.h" #include "../../../scene/material.h" #include "../../../core/assets_manager.h" #include <oglplus/context.hpp> #include <oglplus/bound/texture.hpp> void ShadowMapRenderer::SetMaterialUniforms(const Material &material) const { auto &prog = CurrentProgram<DepthProgram>(); oglplus::Texture::Active(RawTexture::Diffuse); material.BindTexture(RawTexture::Diffuse); prog.diffuseMap.Set(RawTexture::Diffuse); } void ShadowMapRenderer::SetMatricesUniforms(const Node &node) const { auto &prog = CurrentProgram<DepthProgram>(); static auto &camera = Camera::Active(); prog.matrices.modelViewProjection.Set(camera->ViewProjectionMatrix() * node.transform.Matrix()); } void ShadowMapRenderer::Render() { using namespace oglplus; static Context gl; static Scene * scenePtr = nullptr; static auto &scene = Scene::Active(); auto camera = Camera::Active().get(); if (!camera || !scene || !scene->IsLoaded() || !shadowCaster) { return; } // initially assign invalid direction static auto &changes = Transform::TransformChangedMap(); static bool updateShadowMap = true; // any node transformation happened for (auto &c : changes) { auto &type = typeid(*c.first); if (type == typeid(Node)) { updateShadowMap = true; break; } } // shadow caster change if (shadowCaster->TransformChanged()) { updateShadowMap = true; } // scene change if (scenePtr != scene.get()) { scenePtr = scene.get(); updateShadowMap = true; } if (!updateShadowMap) { return; } updateShadowMap = false; // update shadow map SetAsActive(); lightView.SetAsActive(); shadowFramebuffer.Bind(FramebufferTarget::Draw); gl.Viewport(0, 0, shadowMapSize.x, shadowMapSize.y); gl.ClearColor(0.0f, 0.0f, 0.0f, 0.0f); gl.Clear().DepthBuffer().ColorBuffer(); // activate geometry pass shader program auto &prog = DepthShader(); CurrentProgram<DepthProgram>(prog); // rendering flags gl.Disable(Capability::Blend); gl.Enable(Capability::DepthTest); gl.Disable(Capability::CullFace); // unneded since directional light cover the whole scene // can be useful for view frustum aware light frustum later lightView.DoFrustumCulling(false); // scene spatial cues auto sceneBB = scene->rootNode->boundaries; auto &center = sceneBB.Center(); auto radius = distance(center, sceneBB.MaxPoint()); // fix light frustum to fit scene bounding sphere lightView.OrthoRect(glm::vec4(-radius, radius, -radius, radius)); lightView.ClipPlaneNear(-radius); lightView.ClipPlaneFar(2.0f * radius); lightView.Projection(Camera::ProjectionMode::Orthographic); lightView.transform.Position(center + shadowCaster->Direction() * radius); lightView.transform.Forward(-shadowCaster->Direction()); // update lightview matrix LightSpaceMatrix(); // uniforms prog.exponents.Set(exponents); // draw whole scene tree from root node scene->rootNode->DrawList(); // recover original render camera camera->SetAsActive(); // blur the result evsm map if(blurScale > 0) { BlurShadowMap(); } // recover DefaultFramebuffer().Bind(FramebufferTarget::Draw); // no trilinear filtering if (filtering < 2) return; // mip map shadow map shadowMap.Bind(TextureTarget::_2D); shadowMap.GenerateMipmap(TextureTarget::_2D); } void ShadowMapRenderer::Caster(const Light * caster) { shadowCaster = caster; } const Light * ShadowMapRenderer::Caster() const { return shadowCaster; } const glm::mat4x4 &ShadowMapRenderer::LightSpaceMatrix() { static glm::mat4 biasMatrix(0.5, 0.0, 0.0, 0.0, 0.0, 0.5, 0.0, 0.0, 0.0, 0.0, 0.5, 0.0, 0.5, 0.5, 0.5, 1.0); if(lightView.TransformChanged()) { return lightSpaceMatrix = biasMatrix * lightView.ViewProjectionMatrix(); } return lightSpaceMatrix; } void ShadowMapRenderer::BindReading(unsigned unit) const { shadowMap.Active(unit); shadowMap.Bind(oglplus::TextureTarget::_2D); } const Camera &ShadowMapRenderer::LightCamera() const { return lightView; } const oglplus::Texture &ShadowMapRenderer::ShadowMap() const { return shadowMap; } ShadowMapRenderer::ShadowMapRenderer(RenderWindow &window) : Renderer(window), shadowCaster(nullptr) { blurScale = 0.5f; blurQuality = 1; filtering = 1; exponents = glm::vec2(40.0f, 5.0f); lightBleedingReduction = 0.0f; SetupFramebuffers(1024, 1024); fsQuad.Load(); } ShadowMapRenderer::~ShadowMapRenderer() { } const glm::vec2 &ShadowMapRenderer::Exponents() const { return exponents; } void ShadowMapRenderer::Exponents(const glm::vec2 &val) { exponents = val; } const float &ShadowMapRenderer::LightBleedingReduction() const { return lightBleedingReduction; } void ShadowMapRenderer::LightBleedingReduction(const float &val) { lightBleedingReduction = val; } DepthProgram &ShadowMapRenderer::DepthShader() { static auto &assets = AssetsManager::Instance(); static auto &prog = *static_cast<DepthProgram *> (assets->programs["Depth"].get()); return prog; } BlurProgram &ShadowMapRenderer::BlurShader() { static auto &assets = AssetsManager::Instance(); static auto &prog = *static_cast<BlurProgram *> (assets->programs["Blur"].get()); return prog; } void ShadowMapRenderer::SetupFramebuffers(const unsigned &w, const unsigned &h) { using namespace oglplus; static Context gl; // save size shadowMapSize = glm::uvec2(w, h); // setup shadow framebuffer shadowFramebuffer.Bind(FramebufferTarget::Draw); // create render buffer for depth testing depthRender.Bind(RenderbufferTarget::Renderbuffer); depthRender.Storage(RenderbufferTarget::Renderbuffer, PixelDataInternalFormat::DepthComponent24, w, h); // create variance shadow mapping texture, z and z * z gl.Bound(TextureTarget::_2D, shadowMap) .Image2D(0, PixelDataInternalFormat::RGBA32F, w, h, 0, PixelDataFormat::RGBA, PixelDataType::Float, nullptr); Filtering(filtering); Anisotropy(8); shadowFramebuffer.AttachColorTexture(FramebufferTarget::Draw, 0, shadowMap, 0); shadowFramebuffer.AttachRenderbuffer(FramebufferTarget::Draw, FramebufferAttachment::Depth, depthRender); gl.DrawBuffer(FramebufferColorAttachment::_0); // check if success building frame buffer if (!Framebuffer::IsComplete(FramebufferTarget::Draw)) { auto status = Framebuffer::Status(FramebufferTarget::Draw); Framebuffer::HandleIncompleteError(FramebufferTarget::Draw, status); } Framebuffer::Bind(Framebuffer::Target::Draw, FramebufferName(0)); // setup shadow blur framebuffer blurFramebuffer.Bind(FramebufferTarget::Draw); // create variance shadow mapping texture, z and z * z gl.Bound(TextureTarget::_2D, blurShadow) .Image2D(0, PixelDataInternalFormat::RGBA32F, w, h, 0, PixelDataFormat::RGBA, PixelDataType::Float, nullptr) .MinFilter(TextureMinFilter::Linear).MagFilter(TextureMagFilter::Linear) .WrapS(TextureWrap::ClampToEdge).WrapT(TextureWrap::ClampToEdge).Anisotropy(8); blurFramebuffer.AttachColorTexture(FramebufferTarget::Draw, 0, blurShadow, 0); gl.DrawBuffer(FramebufferColorAttachment::_0); // check if success building frame buffer if (!Framebuffer::IsComplete(FramebufferTarget::Draw)) { auto status = Framebuffer::Status(FramebufferTarget::Draw); Framebuffer::HandleIncompleteError(FramebufferTarget::Draw, status); } Framebuffer::Bind(Framebuffer::Target::Draw, FramebufferName(0)); } void ShadowMapRenderer::BlurScale(const float &val) { blurScale = val; } void ShadowMapRenderer::BlurQuality(const int &val) { blurQuality = glm::clamp(val, 1, 3); } void ShadowMapRenderer::Anisotropy(const int &val) const { using namespace oglplus; static Context gl; gl.Bound(TextureTarget::_2D, shadowMap) .Anisotropy(static_cast<float>(val)); } void ShadowMapRenderer::Filtering(const int &val) { using namespace oglplus; static Context gl; filtering = glm::clamp(val, 0, 2); if(filtering == 0) gl.Bound(TextureTarget::_2D, shadowMap) .MagFilter(TextureMagFilter::Nearest) .MinFilter(TextureMinFilter::Nearest); if(filtering == 1) gl.Bound(TextureTarget::_2D, shadowMap) .MagFilter(TextureMagFilter::Linear) .MinFilter(TextureMinFilter::Linear); if(filtering == 2) gl.Bound(TextureTarget::_2D, shadowMap) .MagFilter(TextureMagFilter::Linear) .MinFilter(TextureMinFilter::LinearMipmapLinear) .GenerateMipmap(); } void ShadowMapRenderer::BlurShadowMap() { using namespace oglplus; static Context gl; auto &prog = BlurShader(); CurrentProgram<BlurProgram>(prog); // horizontal blur blurFramebuffer.Bind(FramebufferTarget::Draw); gl.Disable(Capability::DepthTest); // active shadow to be read shadowMap.Active(0); shadowMap.Bind(TextureTarget::_2D); // update uniform prog.source.Set(0); prog.blurDirection.Set(glm::vec2(1.0f / shadowMapSize.x * blurScale, 0.0f)); prog.blurType.Set(blurQuality); gl.Clear().DepthBuffer().ColorBuffer(); fsQuad.DrawElements(); // blur vertically shadowFramebuffer.Bind(FramebufferTarget::Draw); // active shadow to be read blurShadow.Bind(TextureTarget::_2D); // update uniform prog.source.Set(0); prog.blurDirection.Set(glm::vec2(0.0f, 1.0f / shadowMapSize.y * blurScale)); prog.blurType.Set(blurQuality); gl.Clear().DepthBuffer().ColorBuffer(); fsQuad.DrawElements(); gl.Enable(Capability::DepthTest); }
4,045
348
<gh_stars>100-1000 {"nom":"<NAME>","dpt":"Français établis hors de France","inscrits":770,"abs":433,"votants":337,"blancs":19,"nuls":5,"exp":313,"res":[{"panneau":"1","voix":288},{"panneau":"2","voix":25}]}
83
712
<reponame>TOT0RoKR/libmemcached<gh_stars>100-1000 /** * rijndael-alg-fst.h * * @version 3.0 (December 2000) * * Optimised ANSI C code for the Rijndael cipher (now AES) * * @author <NAME> <<EMAIL>> * @author <NAME> <<EMAIL>> * @author <NAME> <<EMAIL>> * * This code is hereby placed in the public domain. * * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ''AS IS'' AND ANY EXPRESS * OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #pragma once #define MAXKC (256/32) #define MAXKB (256/8) #define MAXNR 14 #define AES_MAXKC MAXKC #define AES_MAXKB MAXKB #define AES_MAXNR MAXNR typedef unsigned char u8; typedef unsigned short u16; typedef unsigned int u32; int rijndaelKeySetupEnc(u32 rk[/*4*(Nr + 1)*/], const u8 cipherKey[], int keyBits); int rijndaelKeySetupDec(u32 rk[/*4*(Nr + 1)*/], const u8 cipherKey[], int keyBits); void rijndaelEncrypt(const u32 rk[/*4*(Nr + 1)*/], int Nr, const u8 pt[16], u8 ct[16]); void rijndaelDecrypt(const u32 rk[/*4*(Nr + 1)*/], int Nr, const u8 ct[16], u8 pt[16]); #ifdef INTERMEDIATE_VALUE_KAT void rijndaelEncryptRound(const u32 rk[/*4*(Nr + 1)*/], int Nr, u8 block[16], int rounds); void rijndaelDecryptRound(const u32 rk[/*4*(Nr + 1)*/], int Nr, u8 block[16], int rounds); #endif /* INTERMEDIATE_VALUE_KAT */
737
4,335
<reponame>Intellinium/DAPLink /** ****************************************************************************** * @file stm32f1xx_hal_gpio.c * @author MCD Application Team * @version V1.0.4 * @date 29-April-2016 * @brief GPIO HAL module driver. * This file provides firmware functions to manage the following * functionalities of the General Purpose Input/Output (GPIO) peripheral: * + Initialization and de-initialization functions * + IO operation functions * @verbatim ============================================================================== ##### GPIO Peripheral features ##### ============================================================================== [..] Subject to the specific hardware characteristics of each I/O port listed in the datasheet, each port bit of the General Purpose IO (GPIO) Ports, can be individually configured by software in several modes: (+) Input mode (+) Analog mode (+) Output mode (+) Alternate function mode (+) External interrupt/event lines [..] During and just after reset, the alternate functions and external interrupt lines are not active and the I/O ports are configured in input floating mode. [..] All GPIO pins have weak internal pull-up and pull-down resistors, which can be activated or not. [..] In Output or Alternate mode, each IO can be configured on open-drain or push-pull type and the IO speed can be selected depending on the VDD value. [..] All ports have external interrupt/event capability. To use external interrupt lines, the port must be configured in input mode. All available GPIO pins are connected to the 16 external interrupt/event lines from EXTI0 to EXTI15. [..] The external interrupt/event controller consists of up to 20 edge detectors in connectivity line devices, or 19 edge detectors in other devices for generating event/interrupt requests. Each input line can be independently configured to select the type (event or interrupt) and the corresponding trigger event (rising or falling or both). Each line can also masked independently. A pending register maintains the status line of the interrupt requests ##### How to use this driver ##### ============================================================================== [..] (#) Enable the GPIO APB2 clock using the following function : __HAL_RCC_GPIOx_CLK_ENABLE(). (#) Configure the GPIO pin(s) using HAL_GPIO_Init(). (++) Configure the IO mode using "Mode" member from GPIO_InitTypeDef structure (++) Activate Pull-up, Pull-down resistor using "Pull" member from GPIO_InitTypeDef structure. (++) In case of Output or alternate function mode selection: the speed is configured through "Speed" member from GPIO_InitTypeDef structure (++) Analog mode is required when a pin is to be used as ADC channel or DAC output. (++) In case of external interrupt/event selection the "Mode" member from GPIO_InitTypeDef structure select the type (interrupt or event) and the corresponding trigger event (rising or falling or both). (#) In case of external interrupt/event mode selection, configure NVIC IRQ priority mapped to the EXTI line using HAL_NVIC_SetPriority() and enable it using HAL_NVIC_EnableIRQ(). (#) To get the level of a pin configured in input mode use HAL_GPIO_ReadPin(). (#) To set/reset the level of a pin configured in output mode use HAL_GPIO_WritePin()/HAL_GPIO_TogglePin(). (#) To lock pin configuration until next reset use HAL_GPIO_LockPin(). (#) During and just after reset, the alternate functions are not active and the GPIO pins are configured in input floating mode (except JTAG pins). (#) The LSE oscillator pins OSC32_IN and OSC32_OUT can be used as general purpose (PC14 and PC15, respectively) when the LSE oscillator is off. The LSE has priority over the GPIO function. (#) The HSE oscillator pins OSC_IN/OSC_OUT can be used as general purpose PD0 and PD1, respectively, when the HSE oscillator is off. The HSE has priority over the GPIO function. @endverbatim ****************************************************************************** * @attention * * <h2><center>&copy; COPYRIGHT(c) 2016 STMicroelectronics</center></h2> * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * 3. Neither the name of STMicroelectronics nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * ****************************************************************************** */ /* Includes ------------------------------------------------------------------*/ #include "stm32f1xx_hal.h" /** @addtogroup STM32F1xx_HAL_Driver * @{ */ /** @defgroup GPIO GPIO * @brief GPIO HAL module driver * @{ */ #ifdef HAL_GPIO_MODULE_ENABLED /* Private typedef -----------------------------------------------------------*/ /* Private define ------------------------------------------------------------*/ /** @defgroup GPIO_Private_Constants GPIO Private Constants * @{ */ #define GPIO_MODE ((uint32_t)0x00000003) #define EXTI_MODE ((uint32_t)0x10000000) #define GPIO_MODE_IT ((uint32_t)0x00010000) #define GPIO_MODE_EVT ((uint32_t)0x00020000) #define RISING_EDGE ((uint32_t)0x00100000) #define FALLING_EDGE ((uint32_t)0x00200000) #define GPIO_OUTPUT_TYPE ((uint32_t)0x00000010) #define GPIO_NUMBER ((uint32_t)16) /* Definitions for bit manipulation of CRL and CRH register */ #define GPIO_CR_MODE_INPUT ((uint32_t)0x00000000) /*!< 00: Input mode (reset state) */ #define GPIO_CR_CNF_ANALOG ((uint32_t)0x00000000) /*!< 00: Analog mode */ #define GPIO_CR_CNF_INPUT_FLOATING ((uint32_t)0x00000004) /*!< 01: Floating input (reset state) */ #define GPIO_CR_CNF_INPUT_PU_PD ((uint32_t)0x00000008) /*!< 10: Input with pull-up / pull-down */ #define GPIO_CR_CNF_GP_OUTPUT_PP ((uint32_t)0x00000000) /*!< 00: General purpose output push-pull */ #define GPIO_CR_CNF_GP_OUTPUT_OD ((uint32_t)0x00000004) /*!< 01: General purpose output Open-drain */ #define GPIO_CR_CNF_AF_OUTPUT_PP ((uint32_t)0x00000008) /*!< 10: Alternate function output Push-pull */ #define GPIO_CR_CNF_AF_OUTPUT_OD ((uint32_t)0x0000000C) /*!< 11: Alternate function output Open-drain */ /** * @} */ /* Private macro -------------------------------------------------------------*/ /* Private variables ---------------------------------------------------------*/ /* Private function prototypes -----------------------------------------------*/ /* Private functions ---------------------------------------------------------*/ /** @defgroup GPIO_Exported_Functions GPIO Exported Functions * @{ */ /** @defgroup GPIO_Exported_Functions_Group1 Initialization and deinitialization functions * @brief Initialization and Configuration functions * @verbatim =============================================================================== ##### Initialization and deinitialization functions ##### =============================================================================== [..] This section provides functions allowing to initialize and de-initialize the GPIOs to be ready for use. @endverbatim * @{ */ /** * @brief Initializes the GPIOx peripheral according to the specified parameters in the GPIO_Init. * @param GPIOx: where x can be (A..G depending on device used) to select the GPIO peripheral * @param GPIO_Init: pointer to a GPIO_InitTypeDef structure that contains * the configuration information for the specified GPIO peripheral. * @retval None */ void HAL_GPIO_Init(GPIO_TypeDef *GPIOx, GPIO_InitTypeDef *GPIO_Init) { uint32_t position; uint32_t ioposition = 0x00; uint32_t iocurrent = 0x00; uint32_t temp = 0x00; uint32_t config = 0x00; __IO uint32_t *configregister; /* Store the address of CRL or CRH register based on pin number */ uint32_t registeroffset = 0; /* offset used during computation of CNF and MODE bits placement inside CRL or CRH register */ /* Check the parameters */ assert_param(IS_GPIO_ALL_INSTANCE(GPIOx)); assert_param(IS_GPIO_PIN(GPIO_Init->Pin)); assert_param(IS_GPIO_MODE(GPIO_Init->Mode)); /* Configure the port pins */ for (position = 0; position < GPIO_NUMBER; position++) { /* Get the IO position */ ioposition = ((uint32_t)0x01) << position; /* Get the current IO position */ iocurrent = (uint32_t)(GPIO_Init->Pin) & ioposition; if (iocurrent == ioposition) { /* Check the Alternate function parameters */ assert_param(IS_GPIO_AF_INSTANCE(GPIOx)); /* Based on the required mode, filling config variable with MODEy[1:0] and CNFy[3:2] corresponding bits */ switch (GPIO_Init->Mode) { /* If we are configuring the pin in OUTPUT push-pull mode */ case GPIO_MODE_OUTPUT_PP: /* Check the GPIO speed parameter */ assert_param(IS_GPIO_SPEED(GPIO_Init->Speed)); config = GPIO_Init->Speed + GPIO_CR_CNF_GP_OUTPUT_PP; break; /* If we are configuring the pin in OUTPUT open-drain mode */ case GPIO_MODE_OUTPUT_OD: /* Check the GPIO speed parameter */ assert_param(IS_GPIO_SPEED(GPIO_Init->Speed)); config = GPIO_Init->Speed + GPIO_CR_CNF_GP_OUTPUT_OD; break; /* If we are configuring the pin in ALTERNATE FUNCTION push-pull mode */ case GPIO_MODE_AF_PP: /* Check the GPIO speed parameter */ assert_param(IS_GPIO_SPEED(GPIO_Init->Speed)); config = GPIO_Init->Speed + GPIO_CR_CNF_AF_OUTPUT_PP; break; /* If we are configuring the pin in ALTERNATE FUNCTION open-drain mode */ case GPIO_MODE_AF_OD: /* Check the GPIO speed parameter */ assert_param(IS_GPIO_SPEED(GPIO_Init->Speed)); config = GPIO_Init->Speed + GPIO_CR_CNF_AF_OUTPUT_OD; break; /* If we are configuring the pin in INPUT (also applicable to EVENT and IT mode) */ case GPIO_MODE_INPUT: case GPIO_MODE_IT_RISING: case GPIO_MODE_IT_FALLING: case GPIO_MODE_IT_RISING_FALLING: case GPIO_MODE_EVT_RISING: case GPIO_MODE_EVT_FALLING: case GPIO_MODE_EVT_RISING_FALLING: /* Check the GPIO pull parameter */ assert_param(IS_GPIO_PULL(GPIO_Init->Pull)); if(GPIO_Init->Pull == GPIO_NOPULL) { config = GPIO_CR_MODE_INPUT + GPIO_CR_CNF_INPUT_FLOATING; } else if(GPIO_Init->Pull == GPIO_PULLUP) { config = GPIO_CR_MODE_INPUT + GPIO_CR_CNF_INPUT_PU_PD; /* Set the corresponding ODR bit */ GPIOx->BSRR = ioposition; } else /* GPIO_PULLDOWN */ { config = GPIO_CR_MODE_INPUT + GPIO_CR_CNF_INPUT_PU_PD; /* Reset the corresponding ODR bit */ GPIOx->BRR = ioposition; } break; /* If we are configuring the pin in INPUT analog mode */ case GPIO_MODE_ANALOG: config = GPIO_CR_MODE_INPUT + GPIO_CR_CNF_ANALOG; break; /* Parameters are checked with assert_param */ default: break; } /* Check if the current bit belongs to first half or last half of the pin count number in order to address CRH or CRL register*/ configregister = (iocurrent < GPIO_PIN_8) ? &GPIOx->CRL : &GPIOx->CRH; registeroffset = (iocurrent < GPIO_PIN_8) ? (position << 2) : ((position - 8) << 2); /* Apply the new configuration of the pin to the register */ MODIFY_REG((*configregister), ((GPIO_CRL_MODE0 | GPIO_CRL_CNF0) << registeroffset ), (config << registeroffset)); /*--------------------- EXTI Mode Configuration ------------------------*/ /* Configure the External Interrupt or event for the current IO */ if((GPIO_Init->Mode & EXTI_MODE) == EXTI_MODE) { /* Enable AFIO Clock */ __HAL_RCC_AFIO_CLK_ENABLE(); temp = AFIO->EXTICR[position >> 2]; CLEAR_BIT(temp, ((uint32_t)0x0F) << (4 * (position & 0x03))); SET_BIT(temp, (GPIO_GET_INDEX(GPIOx)) << (4 * (position & 0x03))); AFIO->EXTICR[position >> 2] = temp; /* Configure the interrupt mask */ if((GPIO_Init->Mode & GPIO_MODE_IT) == GPIO_MODE_IT) { SET_BIT(EXTI->IMR, iocurrent); } else { CLEAR_BIT(EXTI->IMR, iocurrent); } /* Configure the event mask */ if((GPIO_Init->Mode & GPIO_MODE_EVT) == GPIO_MODE_EVT) { SET_BIT(EXTI->EMR, iocurrent); } else { CLEAR_BIT(EXTI->EMR, iocurrent); } /* Enable or disable the rising trigger */ if((GPIO_Init->Mode & RISING_EDGE) == RISING_EDGE) { SET_BIT(EXTI->RTSR, iocurrent); } else { CLEAR_BIT(EXTI->RTSR, iocurrent); } /* Enable or disable the falling trigger */ if((GPIO_Init->Mode & FALLING_EDGE) == FALLING_EDGE) { SET_BIT(EXTI->FTSR, iocurrent); } else { CLEAR_BIT(EXTI->FTSR, iocurrent); } } } } } /** * @brief De-initializes the GPIOx peripheral registers to their default reset values. * @param GPIOx: where x can be (A..G depending on device used) to select the GPIO peripheral * @param GPIO_Pin: specifies the port bit to be written. * This parameter can be one of GPIO_PIN_x where x can be (0..15). * @retval None */ void HAL_GPIO_DeInit(GPIO_TypeDef *GPIOx, uint32_t GPIO_Pin) { uint32_t position = 0x00; uint32_t iocurrent = 0x00; uint32_t tmp = 0x00; __IO uint32_t *configregister; /* Store the address of CRL or CRH register based on pin number */ uint32_t registeroffset = 0; /* Check the parameters */ assert_param(IS_GPIO_ALL_INSTANCE(GPIOx)); assert_param(IS_GPIO_PIN(GPIO_Pin)); /* Configure the port pins */ while ((GPIO_Pin >> position) != 0) { /* Get current io position */ iocurrent = (GPIO_Pin) & ((uint32_t)1 << position); if (iocurrent) { /*------------------------- GPIO Mode Configuration --------------------*/ /* Check if the current bit belongs to first half or last half of the pin count number in order to address CRH or CRL register */ configregister = (iocurrent < GPIO_PIN_8) ? &GPIOx->CRL : &GPIOx->CRH; registeroffset = (iocurrent < GPIO_PIN_8) ? (position << 2) : ((position - 8) << 2); /* CRL/CRH default value is floating input(0x04) shifted to correct position */ MODIFY_REG(*configregister, ((GPIO_CRL_MODE0 | GPIO_CRL_CNF0) << registeroffset ), GPIO_CRL_CNF0_0 << registeroffset); /* ODR default value is 0 */ CLEAR_BIT(GPIOx->ODR, iocurrent); /*------------------------- EXTI Mode Configuration --------------------*/ /* Clear the External Interrupt or Event for the current IO */ tmp = AFIO->EXTICR[position >> 2]; tmp &= (((uint32_t)0x0F) << (4 * (position & 0x03))); if(tmp == (GPIO_GET_INDEX(GPIOx) << (4 * (position & 0x03)))) { tmp = ((uint32_t)0x0F) << (4 * (position & 0x03)); CLEAR_BIT(AFIO->EXTICR[position >> 2], tmp); /* Clear EXTI line configuration */ CLEAR_BIT(EXTI->IMR, (uint32_t)iocurrent); CLEAR_BIT(EXTI->EMR, (uint32_t)iocurrent); /* Clear Rising Falling edge configuration */ CLEAR_BIT(EXTI->RTSR, (uint32_t)iocurrent); CLEAR_BIT(EXTI->FTSR, (uint32_t)iocurrent); } } position++; } } /** * @} */ /** @defgroup GPIO_Exported_Functions_Group2 IO operation functions * @brief GPIO Read and Write * @verbatim =============================================================================== ##### IO operation functions ##### =============================================================================== [..] This subsection provides a set of functions allowing to manage the GPIOs. @endverbatim * @{ */ /** * @brief Reads the specified input port pin. * @param GPIOx: where x can be (A..G depending on device used) to select the GPIO peripheral * @param GPIO_Pin: specifies the port bit to read. * This parameter can be GPIO_PIN_x where x can be (0..15). * @retval The input port pin value. */ GPIO_PinState HAL_GPIO_ReadPin(GPIO_TypeDef* GPIOx, uint16_t GPIO_Pin) { GPIO_PinState bitstatus; /* Check the parameters */ assert_param(IS_GPIO_PIN(GPIO_Pin)); if ((GPIOx->IDR & GPIO_Pin) != (uint32_t)GPIO_PIN_RESET) { bitstatus = GPIO_PIN_SET; } else { bitstatus = GPIO_PIN_RESET; } return bitstatus; } /** * @brief Sets or clears the selected data port bit. * * @note This function uses GPIOx_BSRR register to allow atomic read/modify * accesses. In this way, there is no risk of an IRQ occurring between * the read and the modify access. * * @param GPIOx: where x can be (A..G depending on device used) to select the GPIO peripheral * @param GPIO_Pin: specifies the port bit to be written. * This parameter can be one of GPIO_PIN_x where x can be (0..15). * @param PinState: specifies the value to be written to the selected bit. * This parameter can be one of the GPIO_PinState enum values: * @arg GPIO_BIT_RESET: to clear the port pin * @arg GPIO_BIT_SET: to set the port pin * @retval None */ void HAL_GPIO_WritePin(GPIO_TypeDef* GPIOx, uint16_t GPIO_Pin, GPIO_PinState PinState) { /* Check the parameters */ assert_param(IS_GPIO_PIN(GPIO_Pin)); assert_param(IS_GPIO_PIN_ACTION(PinState)); if(PinState != GPIO_PIN_RESET) { GPIOx->BSRR = GPIO_Pin; } else { GPIOx->BSRR = (uint32_t)GPIO_Pin << 16; } } /** * @brief Toggles the specified GPIO pin * @param GPIOx: where x can be (A..G depending on device used) to select the GPIO peripheral * @param GPIO_Pin: Specifies the pins to be toggled. * @retval None */ void HAL_GPIO_TogglePin(GPIO_TypeDef* GPIOx, uint16_t GPIO_Pin) { /* Check the parameters */ assert_param(IS_GPIO_PIN(GPIO_Pin)); GPIOx->ODR ^= GPIO_Pin; } /** * @brief Locks GPIO Pins configuration registers. * @note The locking mechanism allows the IO configuration to be frozen. When the LOCK sequence * has been applied on a port bit, it is no longer possible to modify the value of the port bit until * the next reset. * @param GPIOx: where x can be (A..G depending on device used) to select the GPIO peripheral * @param GPIO_Pin: specifies the port bit to be locked. * This parameter can be any combination of GPIO_Pin_x where x can be (0..15). * @retval None */ HAL_StatusTypeDef HAL_GPIO_LockPin(GPIO_TypeDef* GPIOx, uint16_t GPIO_Pin) { __IO uint32_t tmp = GPIO_LCKR_LCKK; /* Check the parameters */ assert_param(IS_GPIO_LOCK_INSTANCE(GPIOx)); assert_param(IS_GPIO_PIN(GPIO_Pin)); /* Apply lock key write sequence */ SET_BIT(tmp, GPIO_Pin); /* Set LCKx bit(s): LCKK='1' + LCK[15-0] */ GPIOx->LCKR = tmp; /* Reset LCKx bit(s): LCKK='0' + LCK[15-0] */ GPIOx->LCKR = GPIO_Pin; /* Set LCKx bit(s): LCKK='1' + LCK[15-0] */ GPIOx->LCKR = tmp; /* Read LCKK bit*/ tmp = GPIOx->LCKR; if((uint32_t)(GPIOx->LCKR & GPIO_LCKR_LCKK)) { return HAL_OK; } else { return HAL_ERROR; } } /** * @brief This function handles EXTI interrupt request. * @param GPIO_Pin: Specifies the pins connected EXTI line * @retval None */ void HAL_GPIO_EXTI_IRQHandler(uint16_t GPIO_Pin) { /* EXTI line interrupt detected */ if(__HAL_GPIO_EXTI_GET_IT(GPIO_Pin) != RESET) { __HAL_GPIO_EXTI_CLEAR_IT(GPIO_Pin); HAL_GPIO_EXTI_Callback(GPIO_Pin); } } /** * @brief EXTI line detection callback * @param GPIO_Pin: Specifies the pins connected EXTI line * @retval None */ __weak void HAL_GPIO_EXTI_Callback(uint16_t GPIO_Pin) { /* Prevent unused argument(s) compilation warning */ UNUSED(GPIO_Pin); /* NOTE : This function Should not be modified, when the callback is needed, the HAL_GPIO_EXTI_Callback could be implemented in the user file */ } /** * @} */ /** * @} */ #endif /* HAL_GPIO_MODULE_ENABLED */ /** * @} */ /** * @} */ /************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/
8,558
460
<filename>trunk/win/BumpTop Settings/include/wxWidgets/wx/mac/classic/control.h<gh_stars>100-1000 ///////////////////////////////////////////////////////////////////////////// // Name: control.h // Purpose: wxControl class // Author: <NAME> // Modified by: // Created: 1998-01-01 // RCS-ID: $Id: control.h 36891 2006-01-16 14:59:55Z MR $ // Copyright: (c) <NAME> // Licence: wxWindows licence ///////////////////////////////////////////////////////////////////////////// #ifndef _WX_CONTROL_H_ #define _WX_CONTROL_H_ WXDLLEXPORT_DATA(extern const wxChar) wxControlNameStr[]; // General item class class WXDLLEXPORT wxControl : public wxControlBase { DECLARE_ABSTRACT_CLASS(wxControl) public: wxControl(); wxControl(wxWindow *parent, wxWindowID id, const wxPoint& pos = wxDefaultPosition, const wxSize& size = wxDefaultSize, long style = 0, const wxValidator& validator = wxDefaultValidator, const wxString& name = wxControlNameStr) { Create(parent, id, pos, size, style, validator, name); } bool Create(wxWindow *parent, wxWindowID id, const wxPoint& pos = wxDefaultPosition, const wxSize& size = wxDefaultSize, long style = 0, const wxValidator& validator = wxDefaultValidator, const wxString& name = wxControlNameStr); virtual ~wxControl(); // Simulates an event virtual void Command(wxCommandEvent& event) { ProcessCommand(event); } // implementation from now on // -------------------------- // Calls the callback and appropriate event handlers bool ProcessCommand(wxCommandEvent& event); virtual void SetLabel(const wxString& title) ; wxList& GetSubcontrols() { return m_subControls; } void OnEraseBackground(wxEraseEvent& event); virtual bool Enable(bool enable = TRUE) ; virtual bool Show(bool show = TRUE) ; virtual void DoSetWindowVariant( wxWindowVariant variant ) ; virtual void MacRedrawControl () ; virtual void MacHandleControlClick( WXWidget control , wxInt16 controlpart , bool mouseStillDown ) ; virtual void MacPreControlCreate( wxWindow *parent, wxWindowID id, wxString label , const wxPoint& pos, const wxSize& size, long style, const wxValidator& validator, const wxString& name , WXRECTPTR outBounds , unsigned char* maclabel ) ; virtual void MacPostControlCreate() ; virtual void MacAdjustControlRect() ; virtual WXWidget MacGetContainerForEmbedding() ; virtual void MacSuperChangedPosition() ; virtual void MacSuperEnabled( bool enabled ) ; virtual void MacSuperShown( bool show ) ; virtual bool MacCanFocus() const ; virtual void MacUpdateDimensions() ; void* MacGetControlAction() { return m_macControlAction ; } virtual void DoSetSize(int x, int y,int width, int height,int sizeFlags = wxSIZE_AUTO ) ; void OnKeyDown( wxKeyEvent &event ) ; void OnMouseEvent( wxMouseEvent &event ) ; void OnPaint(wxPaintEvent& event) ; virtual void Refresh(bool eraseBack = TRUE, const wxRect *rect = NULL) ; WXWidget GetMacControl() { return m_macControl ;} protected: // For controls like radiobuttons which are really composite WXWidget m_macControl ; void* m_macControlAction ; bool m_macControlIsShown ; wxList m_subControls; int m_macHorizontalBorder ; int m_macVerticalBorder ; virtual wxSize DoGetBestSize() const; private: DECLARE_EVENT_TABLE() }; wxControl *wxFindControlFromMacControl(WXWidget inControl ) ; void wxAssociateControlWithMacControl(WXWidget inControl, wxControl *control) ; void wxRemoveMacControlAssociation(wxControl *control) ; #endif // _WX_CONTROL_H_
1,649
679
/************************************************************** * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. * *************************************************************/ package fvt.gui.sc.sheet; import static org.junit.Assert.*; import static testlib.gui.UIMap.*; import org.junit.After; import org.junit.Before; import org.junit.Rule; import org.junit.Test; import org.openoffice.test.common.Logger; import testlib.gui.AppTool; /** * Before running the testing class, you need specify the AOO location firstly * with system property openoffice.home. * * */ public class Sheets { @Rule public Logger log = Logger.getLogger(this); @Before public void setUp() throws Exception { app.start(true); AppTool.newSpreadsheet(); } @After public void tearDown() throws Exception { app.stop(); } /** * Insert one sheet in different place * * @throws Exception */ @Test public void testInsertMultipleSheet() { // Open Insert Sheet dialog via main menu Insert-> Sheet app.dispatch(".uno:Insert"); // Change new sheet number into 3 to insert 3 new sheet one time scSheetNumber.setText("3"); // Click OK button to create sheet with default setting scInsertSheetDlg.ok(); // Verify new sheets have been inserted before Sheet1 app.dispatch(".uno:SelectTables"); // To support multi-language, just verify the number in the sheet name assertTrue("The first sheet name not contain 4",scSheetsList.getItemsText()[0].contains("4")); assertTrue("The second sheet name not contain 5",scSheetsList.getItemsText()[1].contains("5")); assertTrue("The third sheet name not contain 6",scSheetsList.getItemsText()[2].contains("6")); assertTrue("The fourth sheet name not contain 1",scSheetsList.getItemsText()[3].contains("1")); assertTrue("The fifth sheet name not contain 2",scSheetsList.getItemsText()[4].contains("2")); assertTrue("The sixth sheet name not contain 3",scSheetsList.getItemsText()[5].contains("3")); scSelectSheetsDlg.ok(); } /** * Insert one sheet in different place * * @throws Exception */ @Test public void testInsertOneSheet() { // Open Insert Sheet dialog via main menu Insert-> Sheet app.dispatch(".uno:Insert"); // Click OK button to create sheet with default setting scInsertSheetDlg.ok(); // Verify new sheet has been inserted before Sheet1 app.dispatch(".uno:SelectTables"); // To support multi-language, just verify the number in the sheet name assertTrue(scSheetsList.getItemsText()[0].contains("4")); assertTrue(scSheetsList.getItemsText()[1].contains("1")); assertTrue(scSheetsList.getItemsText()[2].contains("2")); assertTrue(scSheetsList.getItemsText()[3].contains("3")); scSelectSheetsDlg.ok(); } }
1,175
777
<filename>components/ntp_tiles/webui/popular_sites_internals_message_handler_client.h // Copyright 2016 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef COMPONENTS_NTP_TILES_WEBUI_POPULAR_SITES_HANDLER_CLIENT_H_ #define COMPONENTS_NTP_TILES_WEBUI_POPULAR_SITES_HANDLER_CLIENT_H_ #include <memory> #include <string> #include <vector> #include "base/callback_forward.h" #include "base/macros.h" class PrefService; namespace base { class Value; class ListValue; class SequencedWorkerPool; } // namespace base namespace ntp_tiles { class PopularSites; // Implemented by embedders to hook up PopularSitesInternalsMessageHandler. class PopularSitesInternalsMessageHandlerClient { public: // Returns the blocking pool for hte embedder. virtual base::SequencedWorkerPool* GetBlockingPool() = 0; // Returns the PrefService for the embedder and containing WebUI page. virtual PrefService* GetPrefs() = 0; // Creates a new PopularSites based on the context pf the WebUI page. virtual std::unique_ptr<ntp_tiles::PopularSites> MakePopularSites() = 0; // Registers a callback in Javascript. See content::WebUI and web::WebUIIOS. virtual void RegisterMessageCallback( const std::string& message, const base::Callback<void(const base::ListValue*)>& callback) = 0; // Invokes a function in Javascript. See content::WebUI and web::WebUIIOS. virtual void CallJavascriptFunctionVector( const std::string& name, const std::vector<const base::Value*>& values) = 0; // Helper function for CallJavascriptFunctionVector(). template <typename... Arg> void CallJavascriptFunction(const std::string& name, const Arg&... arg) { CallJavascriptFunctionVector(name, {&arg...}); } protected: PopularSitesInternalsMessageHandlerClient(); virtual ~PopularSitesInternalsMessageHandlerClient(); private: DISALLOW_COPY_AND_ASSIGN(PopularSitesInternalsMessageHandlerClient); }; } // namespace ntp_tiles #endif // COMPONENTS_NTP_TILES_WEBUI_POPULAR_SITES_HANDLER_CLIENT_H_
697