file_name
stringlengths 3
137
| prefix
stringlengths 0
918k
| suffix
stringlengths 0
962k
| middle
stringlengths 0
812k
|
---|---|---|---|
content.tsx
|
import * as React from 'react';
const bs = require('@src/main.css');
export interface DialogContentProps {
children: React.ReactNode;
}
const DialogContent: React.SFC<DialogContentProps> = ({ children }) => {
|
return <div className={bs.modalBody}>{children}</div>;
};
export default DialogContent;
| |
audio_output.rs
|
use crate::{Audio, AudioSource, Decodable};
use bevy_asset::{Asset, Assets};
use bevy_ecs::system::{NonSend, Res, ResMut};
use bevy_reflect::TypeUuid;
use bevy_utils::tracing::warn;
use rodio::{OutputStream, OutputStreamHandle, Sink, Source};
use std::marker::PhantomData;
/// Used internally to play audio on the current "audio device"
pub struct AudioOutput<Source = AudioSource>
where
Source: Decodable,
{
_stream: Option<OutputStream>,
stream_handle: Option<OutputStreamHandle>,
phantom: PhantomData<Source>,
}
impl<Source> Default for AudioOutput<Source>
where
Source: Decodable,
{
fn default() -> Self {
if let Ok((stream, stream_handle)) = OutputStream::try_default() {
Self {
_stream: Some(stream),
stream_handle: Some(stream_handle),
phantom: PhantomData,
}
} else {
warn!("No audio device found.");
Self {
_stream: None,
stream_handle: None,
phantom: PhantomData,
}
}
}
}
impl<Source> AudioOutput<Source>
where
Source: Asset + Decodable,
{
fn
|
(&self, audio_source: &Source, repeat: bool) -> Option<Sink> {
if let Some(stream_handle) = &self.stream_handle {
let sink = Sink::try_new(stream_handle).unwrap();
if repeat {
sink.append(audio_source.decoder().repeat_infinite());
} else {
sink.append(audio_source.decoder());
}
Some(sink)
} else {
None
}
}
fn try_play_queued(
&self,
audio_sources: &Assets<Source>,
audio: &mut Audio<Source>,
sinks: &mut Assets<AudioSink>,
) {
let mut queue = audio.queue.write();
let len = queue.len();
let mut i = 0;
while i < len {
let config = queue.pop_front().unwrap();
if let Some(audio_source) = audio_sources.get(&config.source_handle) {
if let Some(sink) = self.play_source(audio_source, config.settings.repeat) {
sink.set_speed(config.settings.speed);
sink.set_volume(config.settings.volume);
// don't keep the strong handle. there is no way to return it to the user here as it is async
let _ = sinks.set(config.sink_handle, AudioSink { sink: Some(sink) });
}
} else {
// audio source hasn't loaded yet. add it back to the queue
queue.push_back(config);
}
i += 1;
}
}
}
/// Plays audio currently queued in the [`Audio`] resource through the [`AudioOutput`] resource
pub fn play_queued_audio_system<Source: Asset + Decodable>(
audio_output: NonSend<AudioOutput<Source>>,
audio_sources: Option<Res<Assets<Source>>>,
mut audio: ResMut<Audio<Source>>,
mut sinks: ResMut<Assets<AudioSink>>,
) {
if let Some(audio_sources) = audio_sources {
audio_output.try_play_queued(&*audio_sources, &mut *audio, &mut *sinks);
};
}
/// Asset controlling the playback of a sound
///
/// ```
/// # use bevy_ecs::system::{Local, Res};
/// # use bevy_asset::{Assets, Handle};
/// # use bevy_audio::AudioSink;
/// // Execution of this system should be controlled by a state or input,
/// // otherwise it would just toggle between play and pause every frame.
/// fn pause(
/// audio_sinks: Res<Assets<AudioSink>>,
/// music_controller: Local<Handle<AudioSink>>,
/// ) {
/// if let Some(sink) = audio_sinks.get(&*music_controller) {
/// if sink.is_paused() {
/// sink.play()
/// } else {
/// sink.pause()
/// }
/// }
/// }
/// ```
///
#[derive(TypeUuid)]
#[uuid = "8BEE570C-57C2-4FC0-8CFB-983A22F7D981"]
pub struct AudioSink {
// This field is an Option in order to allow us to have a safe drop that will detach the sink.
// It will never be None during its life
sink: Option<Sink>,
}
impl Drop for AudioSink {
fn drop(&mut self) {
self.sink.take().unwrap().detach();
}
}
impl AudioSink {
/// Gets the volume of the sound.
///
/// The value `1.0` is the "normal" volume (unfiltered input). Any value other than `1.0`
/// will multiply each sample by this value.
pub fn volume(&self) -> f32 {
self.sink.as_ref().unwrap().volume()
}
/// Changes the volume of the sound.
///
/// The value `1.0` is the "normal" volume (unfiltered input). Any value other than `1.0`
/// will multiply each sample by this value.
pub fn set_volume(&self, volume: f32) {
self.sink.as_ref().unwrap().set_volume(volume);
}
/// Gets the speed of the sound.
///
/// The value `1.0` is the "normal" speed (unfiltered input). Any value other than `1.0`
/// will change the play speed of the sound.
pub fn speed(&self) -> f32 {
self.sink.as_ref().unwrap().speed()
}
/// Changes the speed of the sound.
///
/// The value `1.0` is the "normal" speed (unfiltered input). Any value other than `1.0`
/// will change the play speed of the sound.
pub fn set_speed(&self, speed: f32) {
self.sink.as_ref().unwrap().set_speed(speed);
}
/// Resumes playback of a paused sink.
///
/// No effect if not paused.
pub fn play(&self) {
self.sink.as_ref().unwrap().play();
}
/// Pauses playback of this sink.
///
/// No effect if already paused.
/// A paused sink can be resumed with [`play`](Self::play).
pub fn pause(&self) {
self.sink.as_ref().unwrap().pause();
}
/// Is this sink paused?
///
/// Sinks can be paused and resumed using [`pause`](Self::pause) and [`play`](Self::play).
pub fn is_paused(&self) -> bool {
self.sink.as_ref().unwrap().is_paused()
}
/// Stops the sink.
///
/// It won't be possible to restart it afterwards.
pub fn stop(&self) {
self.sink.as_ref().unwrap().stop();
}
}
|
play_source
|
data_source_notification_topic.go
|
package hsdp
import (
"context"
"net/http"
"github.com/hashicorp/terraform-plugin-sdk/v2/diag"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
"github.com/philips-software/go-hsdp-api/notification"
)
func
|
() *schema.Resource {
return &schema.Resource{
ReadContext: dataSourceNotificationTopicRead,
Schema: map[string]*schema.Schema{
"topic_id": {
Type: schema.TypeString,
Required: true,
},
"name": {
Type: schema.TypeString,
Computed: true,
},
"producer_id": {
Type: schema.TypeString,
Computed: true,
},
"scope": {
Type: schema.TypeString,
Computed: true,
},
"allowed_scopes": {
Type: schema.TypeSet,
Computed: true,
Elem: &schema.Schema{Type: schema.TypeString},
},
"is_auditable": {
Type: schema.TypeBool,
Computed: true,
},
"description": {
Type: schema.TypeString,
Computed: true,
},
},
}
}
func dataSourceNotificationTopicRead(_ context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics {
config := meta.(*Config)
var diags diag.Diagnostics
client, err := config.NotificationClient()
if err != nil {
return diag.FromErr(err)
}
defer client.Close()
topicID := d.Get("topic_id").(string)
topic, resp, err := client.Topic.GetTopic(topicID) // Get all producers
if err != nil {
if resp == nil || resp.StatusCode != http.StatusForbidden { // Do not error on permission issues
return diag.FromErr(err)
}
topic = ¬ification.Topic{}
}
d.SetId(topicID)
_ = d.Set("name", topic.Name)
_ = d.Set("producer_id", topic.ProducerID)
_ = d.Set("scope", topic.Scope)
_ = d.Set("allowed_scopes", topic.AllowedScopes)
_ = d.Set("is_auditable", topic.IsAuditable)
_ = d.Set("description", topic.Description)
return diags
}
|
dataSourceNotificationTopic
|
test_changeprefixcommand.py
|
import unittest
from unittest.mock import patch, Mock
import discord
import datetime
from commands import ChangePrefixCommand
from serverobjects.server import DiscordServer
class TestChangePrefixCommand(unittest.TestCase):
def setUp(self):
self.command = ChangePrefixCommand()
self.time = datetime.datetime.now()
self.server_json = {
'server_id' : 1,
'awake' : True,
'timeout_duration_seconds': 1800,
'prefix': '!vt',
'banned_words': [{
'rowid': 1,
'server_id': 1,
'banned_word': 'vore',
'infracted_at': (self.time - datetime.timedelta(minutes=20)).strftime("%Y-%m-%d %H:%M:%S"),
'calledout_at': (self.time - datetime.timedelta(minutes=20)).strftime("%Y-%m-%d %H:%M:%S"),
'record': {
'record_seconds': 2400,
'infraction_count': 0
}
}]
}
def test_is_command_authorized__no_permissions_disallowed(self):
result = self.command.is_command_authorized()
self.assertFalse(result)
def
|
(self):
permissions = discord.Permissions()
result = self.command.is_command_authorized(permissions)
self.assertFalse(result)
def test_is_command_authorized__admin_allowed(self):
permissions = discord.Permissions.all()
result = self.command.is_command_authorized(permissions)
self.assertTrue(result)
@patch('serverobjects.server.DiscordServer.update_server_settings')
def test_execute__change_full_time_valid(self, prefix_patch):
message = Mock(**{
'server': Mock(**{
'id': 1
}),
'content': "!vtprefix !testin",
'author': Mock(**{
'id': 2,
'mention': "@test",
'bot': False
}),
})
server = DiscordServer(self.server_json, self.time, None)
retval = self.command.execute(server, self.time, message.content, message.author)
prefix_patch.assert_called_with({ 'prefix': '!testin' })
self.assertEqual(
retval,
"Cool, from now on you'll need to start a message with '!testin' for me to treat it as a command."
)
self.assertTrue(prefix_patch.called)
@patch('serverobjects.server.DiscordServer.update_server_settings')
def test_execute__change_prefix_too_long(self, prefix_patch):
prefix_patch.return_value = False
message = Mock(**{
'server': Mock(**{
'id': 1
}),
'content': "!vtprefix asdfasdfasdf",
'author': Mock(**{
'id': 2,
'mention': "@test",
'bot': False
}),
})
server = DiscordServer(self.server_json, self.time, None)
retval = self.command.execute(server, self.time, message.content, message.author)
self.assertEqual(
retval,
"Sorry, I don't understand that formatting. I was expecting a new prefix between 1 and 10 characters long."
)
@patch('serverobjects.server.DiscordServer.update_server_settings')
def test_execute__change_no_time_invalid(self, prefix_patch):
message = Mock(**{
'server': Mock(**{
'id': 1
}),
'content': "!vtprefix",
'author': Mock(**{
'id': 2,
'mention': "@test",
'bot': False
}),
})
server = DiscordServer(self.server_json, self.time, None)
self.command.execute(server, self.time, message.content, message.author)
self.assertFalse(prefix_patch.called)
|
test_is_command_authorized__non_admin_disallowed
|
test_requester.py
|
import syncconnect
import responses
import unittest
|
def queue(self, status_code, **kwargs):
""" queue fake responses with passed status code """
if not kwargs:
json = {'message': self.EXPECTED}
else:
json = kwargs
responses.add('GET', self.URL, status=status_code, json=json)
def check(self, exception):
self.assertRaisesRegexp(
exception,
self.EXPECTED,
syncconnect.requester.call,
'GET',
self.URL)
@responses.activate
def test_user_agent(self):
self.queue(200)
syncconnect.requester.call('GET', self.URL)
self.assertEqual(
responses.calls[0].request.headers['User-Agent'],
'fordpass-na/353 CFNetwork/1121.2.2 Darwin/19.3.0',
)
@responses.activate
def test_oauth_error(self):
self.queue(401, error_description='unauthorized')
try:
syncconnect.requester.call('GET', self.URL)
except syncconnect.AuthenticationException as err:
self.assertEqual(err.message, 'unauthorized')
@responses.activate
def test_unknown_error(self):
self.queue(401, error_description='unknown error')
try:
syncconnect.requester.call('GET', self.URL)
except syncconnect.AuthenticationException as err:
self.assertEqual(err.message, 'unknown error')
@responses.activate
def test_400(self):
self.queue(400)
self.check(syncconnect.ValidationException)
@responses.activate
def test_401(self):
self.queue(401)
self.check(syncconnect.AuthenticationException)
@responses.activate
def test_403(self):
self.queue(403)
self.check(syncconnect.PermissionException)
@responses.activate
def test_404(self):
self.queue(404)
self.check(syncconnect.ResourceNotFoundException)
@responses.activate
def test_429(self):
self.queue(429)
self.check(syncconnect.RateLimitingException)
@responses.activate
def test_429(self):
self.queue(429)
self.check(syncconnect.RateLimitingException)
@responses.activate
def test_500(self):
self.queue(500)
self.check(syncconnect.ServerException)
@responses.activate
def test_504(self):
responses.add('GET', self.URL, status=504, json={
'error': 'some error', 'message': self.EXPECTED})
self.check(syncconnect.GatewayTimeoutException)
@responses.activate
def test_other(self):
self.queue(503)
with self.assertRaises(syncconnect.SyncException) as se:
syncconnect.requester.call('GET', self.URL)
self.assertEquals(se.exception.message, 'Unexpected error')
|
class TestRequester(unittest.TestCase):
EXPECTED = 'expected'
URL = 'http://ford.url'
|
bitcoin_el_GR.ts
|
<?xml version="1.0" ?><!DOCTYPE TS><TS language="el_GR" version="2.0">
<defaultcodec>UTF-8</defaultcodec>
<context>
<name>AboutDialog</name>
<message>
<location filename="../forms/aboutdialog.ui" line="+14"/>
<source>About Parrotcoin</source>
<translation>Σχετικά με το Parrotcoin</translation>
</message>
<message>
<location line="+39"/>
<source><b>Parrotcoin</b> version</source>
<translation>Έκδοση Parrotcoin</translation>
</message>
<message>
<location line="+57"/>
<source>
This is experimental software.
Distributed under the MIT/X11 software license, see the accompanying file COPYING or http://www.opensource.org/licenses/mit-license.php.
This product includes software developed by the OpenSSL Project for use in the OpenSSL Toolkit (http://www.openssl.org/) and cryptographic software written by Eric Young ([email protected]) and UPnP software written by Thomas Bernard.</source>
<translation>
This is experimental software.
Distributed under the MIT/X11 software license, see the accompanying file COPYING or http://www.opensource.org/licenses/mit-license.php.
This product includes software developed by the OpenSSL Project for use in the OpenSSL Toolkit (http://www.openssl.org/) and cryptographic software written by Eric Young ([email protected]) and UPnP software written by Thomas Bernard.</translation>
</message>
<message>
<location filename="../aboutdialog.cpp" line="+14"/>
<source>Copyright</source>
<translation>Πνευματική ιδιοκτησία </translation>
</message>
<message>
<location line="+0"/>
<source>The Parrotcoin developers</source>
<translation>Οι Parrotcoin προγραμματιστές </translation>
</message>
</context>
<context>
<name>AddressBookPage</name>
<message>
<location filename="../forms/addressbookpage.ui" line="+14"/>
<source>Address Book</source>
<translation>Βιβλίο Διευθύνσεων</translation>
</message>
<message>
<location line="+19"/>
<source>Double-click to edit address or label</source>
<translation>Διπλό-κλικ για επεξεργασία της διεύθυνσης ή της ετικέτας</translation>
</message>
<message>
<location line="+27"/>
<source>Create a new address</source>
<translation>Δημιούργησε νέα διεύθυνση</translation>
</message>
<message>
<location line="+14"/>
<source>Copy the currently selected address to the system clipboard</source>
<translation>Αντέγραψε την επιλεγμένη διεύθυνση στο πρόχειρο του συστήματος</translation>
</message>
<message>
<location line="-11"/>
<source>&New Address</source>
<translation>&Νέα διεύθυνση</translation>
</message>
<message>
<location filename="../addressbookpage.cpp" line="+63"/>
<source>These are your Parrotcoin addresses for receiving payments. You may want to give a different one to each sender so you can keep track of who is paying you.</source>
<translation>Αυτές είναι οι Parrotcoin διευθύνσεις σας για να λαμβάνετε πληρωμές. Δίνοντας μία ξεχωριστή διεύθυνση σε κάθε αποστολέα, θα μπορείτε να ελέγχετε ποιος σας πληρώνει.</translation>
</message>
<message>
<location filename="../forms/addressbookpage.ui" line="+14"/>
<source>&Copy Address</source>
<translation>&Αντιγραφή διεύθυνσης</translation>
</message>
<message>
<location line="+11"/>
<source>Show &QR Code</source>
<translation>Δείξε &QR κωδικα</translation>
</message>
<message>
<location line="+11"/>
<source>Sign a message to prove you own a Parrotcoin address</source>
<translation>Υπογράψτε ένα μήνυμα για ν' αποδείξετε πως σας ανήκει μια συγκεκριμένη διεύθυνση Parrotcoin</translation>
</message>
<message>
<location line="+3"/>
<source>Sign &Message</source>
<translation>&Υπέγραψε το μήνυμα</translation>
</message>
<message>
<location line="+25"/>
<source>Delete the currently selected address from the list</source>
<translation>Αντιγραφη της επιλεγμενης διεύθυνσης στο πρόχειρο του συστηματος</translation>
</message>
<message>
<location line="+27"/>
<source>Export the data in the current tab to a file</source>
<translation>Εξαγωγή δεδομένων καρτέλας σε αρχείο</translation>
</message>
<message>
<location line="+3"/>
<source>&Export</source>
<translation>&Εξαγωγή</translation>
</message>
<message>
<location line="-44"/>
<source>Verify a message to ensure it was signed with a specified Parrotcoin address</source>
<translation>Υπογράψτε ένα μήνυμα για ν' αποδείξετε πως ανήκει μια συγκεκριμένη διεύθυνση Parrotcoin</translation>
</message>
<message>
<location line="+3"/>
<source>&Verify Message</source>
<translation>&Επιβεβαίωση μηνύματος</translation>
</message>
<message>
<location line="+14"/>
<source>&Delete</source>
<translation>&Διαγραφή</translation>
</message>
<message>
<location filename="../addressbookpage.cpp" line="-5"/>
<source>These are your Parrotcoin addresses for sending payments. Always check the amount and the receiving address before sending coins.</source>
<translation>Αυτές είναι οι Parrotcoin διευθύνσεις σας για να λαμβάνετε πληρωμές. Δίνοντας μία ξεχωριστή διεύθυνση σε κάθε αποστολέα, θα μπορείτε να ελέγχετε ποιος σας πληρώνει.</translation>
</message>
<message>
<location line="+13"/>
<source>Copy &Label</source>
<translation>Αντιγραφή &επιγραφής</translation>
</message>
<message>
<location line="+1"/>
<source>&Edit</source>
<translation>&Επεξεργασία</translation>
</message>
<message>
<location line="+1"/>
<source>Send &Coins</source>
<translation>Αποστολή νομισμάτων</translation>
</message>
<message>
<location line="+260"/>
<source>Export Address Book Data</source>
<translation>Εξαγωγή Δεδομενων Βιβλίου Διευθύνσεων</translation>
</message>
<message>
<location line="+1"/>
<source>Comma separated file (*.csv)</source>
<translation>Αρχείο οριοθετημένο με κόμματα (*.csv)</translation>
</message>
<message>
<location line="+13"/>
<source>Error exporting</source>
<translation>Εξαγωγή λαθών</translation>
</message>
<message>
<location line="+0"/>
<source>Could not write to file %1.</source>
<translation>Αδυναμία εγγραφής στο αρχείο %1.</translation>
</message>
</context>
<context>
<name>AddressTableModel</name>
<message>
<location filename="../addresstablemodel.cpp" line="+144"/>
<source>Label</source>
<translation>Ετικέτα</translation>
</message>
<message>
<location line="+0"/>
<source>Address</source>
<translation>Διεύθυνση</translation>
</message>
<message>
<location line="+36"/>
<source>(no label)</source>
<translation>(χωρίς ετικέτα)</translation>
</message>
</context>
<context>
<name>AskPassphraseDialog</name>
<message>
<location filename="../forms/askpassphrasedialog.ui" line="+26"/>
<source>Passphrase Dialog</source>
<translation>Φράση πρόσβασης </translation>
</message>
<message>
<location line="+21"/>
<source>Enter passphrase</source>
<translation>Βάλτε κωδικό πρόσβασης</translation>
</message>
<message>
<location line="+14"/>
<source>New passphrase</source>
<translation>Νέος κωδικός πρόσβασης</translation>
</message>
<message>
<location line="+14"/>
<source>Repeat new passphrase</source>
<translation>Επανέλαβε τον νέο κωδικό πρόσβασης</translation>
</message>
<message>
<location filename="../askpassphrasedialog.cpp" line="+33"/>
<source>Enter the new passphrase to the wallet.<br/>Please use a passphrase of <b>10 or more random characters</b>, or <b>eight or more words</b>.</source>
<translation>Εισάγετε τον νέο κωδικό πρόσβασης στον πορτοφόλι <br/> Παρακαλώ χρησιμοποιείστε ένα κωδικό με <b> 10 ή περισσότερους τυχαίους χαρακτήρες</b> ή <b> οχτώ ή παραπάνω λέξεις</b>.</translation>
</message>
<message>
<location line="+1"/>
<source>Encrypt wallet</source>
<translation>Κρυπτογράφησε το πορτοφόλι</translation>
</message>
<message>
<location line="+3"/>
<source>This operation needs your wallet passphrase to unlock the wallet.</source>
<translation>Αυτη η ενεργεία χρειάζεται τον κωδικό του πορτοφολιού για να ξεκλειδώσει το πορτοφόλι.</translation>
</message>
<message>
<location line="+5"/>
<source>Unlock wallet</source>
<translation>Ξεκλειδωσε το πορτοφολι</translation>
</message>
<message>
<location line="+3"/>
<source>This operation needs your wallet passphrase to decrypt the wallet.</source>
<translation>Αυτη η ενεργεια χρειάζεται τον κωδικο του πορτοφολιου για να αποκρυπτογραφησειι το πορτοφολι.</translation>
</message>
<message>
<location line="+5"/>
<source>Decrypt wallet</source>
<translation>Αποκρυπτογράφησε το πορτοφολι</translation>
</message>
<message>
<location line="+3"/>
<source>Change passphrase</source>
<translation>Άλλαξε κωδικο πρόσβασης</translation>
</message>
<message>
<location line="+1"/>
<source>Enter the old and new passphrase to the wallet.</source>
<translation>Εισάγετε τον παλιό και τον νεο κωδικο στο πορτοφολι.</translation>
</message>
<message>
<location line="+46"/>
<source>Confirm wallet encryption</source>
<translation>Επιβεβαίωσε την κρυπτογραφηση του πορτοφολιού</translation>
</message>
<message>
<location line="+1"/>
<source>Warning: If you encrypt your wallet and lose your passphrase, you will <b>LOSE ALL OF YOUR LITECOINS</b>!</source>
<translation>Προσοχη: Εαν κρυπτογραφησεις το πορτοφολι σου και χάσεις τον κωδικο σου θα χάσεις <b> ΟΛΑ ΣΟΥ ΤΑ LITECOINS</b>!
Είσαι σίγουρος ότι θέλεις να κρυπτογραφησεις το πορτοφολι;</translation>
</message>
<message>
<location line="+0"/>
<source>Are you sure you wish to encrypt your wallet?</source>
<translation>Είστε σίγουροι ότι θέλετε να κρυπτογραφήσετε το πορτοφόλι σας;</translation>
</message>
<message>
<location line="+15"/>
<source>IMPORTANT: Any previous backups you have made of your wallet file should be replaced with the newly generated, encrypted wallet file. For security reasons, previous backups of the unencrypted wallet file will become useless as soon as you start using the new, encrypted wallet.</source>
<translation>ΣΗΜΑΝΤΙΚΟ: Τα προηγούμενα αντίγραφα ασφαλείας που έχετε κάνει από το αρχείο του πορτοφόλιου σας θα πρέπει να αντικατασταθουν με το νέο που δημιουργείται, κρυπτογραφημένο αρχείο πορτοφόλιου. Για λόγους ασφαλείας, τα προηγούμενα αντίγραφα ασφαλείας του μη κρυπτογραφημένου αρχείου πορτοφόλιου θα καταστουν άχρηστα μόλις αρχίσετε να χρησιμοποιείτε το νέο κρυπτογραφημένο πορτοφόλι. </translation>
</message>
<message>
<location line="+100"/>
<location line="+24"/>
<source>Warning: The Caps Lock key is on!</source>
<translation>Προσοχη: το πλήκτρο Caps Lock είναι ενεργο.</translation>
</message>
<message>
<location line="-130"/>
<location line="+58"/>
<source>Wallet encrypted</source>
<translation>Κρυπτογραφημενο πορτοφολι</translation>
</message>
<message>
<location line="-56"/>
<source>Parrotcoin will close now to finish the encryption process. Remember that encrypting your wallet cannot fully protect your parrotcoins from being stolen by malware infecting your computer.</source>
<translation>Το Parrotcoin θα κλεισει τώρα για να τελειώσει την διαδικασία κρυπτογραφησης. Θυμησου ότι κρυπτογραφώντας το πορτοφολι σου δεν μπορείς να προστατέψεις πλήρως τα parrotcoins σου από κλοπή στην περίπτωση όπου μολυνθεί ο υπολογιστής σου με κακόβουλο λογισμικο.</translation>
</message>
<message>
<location line="+13"/>
<location line="+7"/>
<location line="+42"/>
<location line="+6"/>
<source>Wallet encryption failed</source>
<translation>Η κρυπτογραφηση του πορτοφολιού απέτυχε</translation>
</message>
<message>
<location line="-54"/>
<source>Wallet encryption failed due to an internal error. Your wallet was not encrypted.</source>
<translation>Η κρυπτογράφηση του πορτοφολιού απέτυχε λογω εσωτερικού σφάλματος. Το πορτοφολι δεν κρυπτογραφηθηκε.</translation>
</message>
<message>
<location line="+7"/>
<location line="+48"/>
<source>The supplied passphrases do not match.</source>
<translation>Οι εισαχθέντες κωδικοί δεν ταιριάζουν.</translation>
</message>
<message>
<location line="-37"/>
<source>Wallet unlock failed</source>
<translation>το ξεκλείδωμα του πορτοφολιού απέτυχε</translation>
</message>
<message>
<location line="+1"/>
<location line="+11"/>
<location line="+19"/>
<source>The passphrase entered for the wallet decryption was incorrect.</source>
<translation>Ο κωδικος που εισήχθη για την αποκρυπτογραφηση του πορτοφολιού ήταν λαθος.</translation>
</message>
<message>
<location line="-20"/>
<source>Wallet decryption failed</source>
<translation>Η αποκρυπτογραφηση του πορτοφολιού απέτυχε</translation>
</message>
<message>
<location line="+14"/>
<source>Wallet passphrase was successfully changed.</source>
<translation>Ο κωδικος του πορτοφολιού άλλαξε με επιτυχία.</translation>
</message>
</context>
<context>
<name>BitcoinGUI</name>
<message>
<location filename="../bitcoingui.cpp" line="+233"/>
<source>Sign &message...</source>
<translation>Υπογραφή &Μηνύματος...</translation>
</message>
<message>
<location line="+280"/>
<source>Synchronizing with network...</source>
<translation>Συγχρονισμός με το δίκτυο...</translation>
</message>
<message>
<location line="-349"/>
<source>&Overview</source>
<translation>&Επισκόπηση</translation>
</message>
<message>
<location line="+1"/>
<source>Show general overview of wallet</source>
<translation>Εμφάνισε γενική εικονα του πορτοφολιού</translation>
</message>
<message>
<location line="+20"/>
<source>&Transactions</source>
<translation>&Συναλλαγές</translation>
</message>
<message>
<location line="+1"/>
<source>Browse transaction history</source>
<translation>Περιήγηση στο ιστορικο συνναλαγων</translation>
</message>
<message>
<location line="+7"/>
<source>Edit the list of stored addresses and labels</source>
<translation>Εξεργασια της λιστας των αποθηκευμενων διευθύνσεων και ετικετων</translation>
</message>
<message>
<location line="-14"/>
<source>Show the list of addresses for receiving payments</source>
<translation>Εμφάνισε την λίστα των διευθύνσεων για την παραλαβή πληρωμων</translation>
</message>
<message>
<location line="+31"/>
<source>E&xit</source>
<translation>Έ&ξοδος</translation>
</message>
<message>
<location line="+1"/>
<source>Quit application</source>
<translation>Εξοδος από την εφαρμογή</translation>
</message>
<message>
<location line="+4"/>
<source>Show information about Parrotcoin</source>
<translation>Εμφάνισε πληροφορίες σχετικά με το Parrotcoin</translation>
</message>
<message>
<location line="+2"/>
<source>About &Qt</source>
<translation>Σχετικά με &Qt</translation>
</message>
<message>
<location line="+1"/>
<source>Show information about Qt</source>
<translation>Εμφάνισε πληροφορίες σχετικά με Qt</translation>
</message>
<message>
<location line="+2"/>
<source>&Options...</source>
<translation>&Επιλογές...</translation>
</message>
<message>
<location line="+6"/>
<source>&Encrypt Wallet...</source>
<translation>&Κρυπτογράφησε το πορτοφόλι</translation>
</message>
<message>
<location line="+3"/>
<source>&Backup Wallet...</source>
<translation>&Αντίγραφο ασφαλείας του πορτοφολιού</translation>
</message>
<message>
<location line="+2"/>
<source>&Change Passphrase...</source>
<translation>&Άλλαξε κωδικο πρόσβασης</translation>
</message>
<message>
<location line="+285"/>
<source>Importing blocks from disk...</source>
<translation>Εισαγωγή μπλοκ από τον σκληρο δίσκο ... </translation>
</message>
<message>
<location line="+3"/>
<source>Reindexing blocks on disk...</source>
<translation>Φόρτωση ευρετηρίου μπλοκ στον σκληρο δισκο...</translation>
</message>
<message>
<location line="-347"/>
<source>Send coins to a Parrotcoin address</source>
<translation>Στείλε νομισματα σε μια διεύθυνση parrotcoin</translation>
</message>
<message>
<location line="+49"/>
<source>Modify configuration options for Parrotcoin</source>
<translation>Επεργασία ρυθμισεων επιλογών για το Parrotcoin</translation>
</message>
<message>
<location line="+9"/>
<source>Backup wallet to another location</source>
<translation>Δημιουργία αντιγράφου ασφαλείας πορτοφολιού σε άλλη τοποθεσία</translation>
</message>
<message>
<location line="+2"/>
<source>Change the passphrase used for wallet encryption</source>
<translation>Αλλαγή του κωδικού κρυπτογράφησης του πορτοφολιού</translation>
</message>
<message>
<location line="+6"/>
<source>&Debug window</source>
<translation>&Παράθυρο αποσφαλμάτωσης</translation>
</message>
<message>
<location line="+1"/>
<source>Open debugging and diagnostic console</source>
<translation>Άνοιγμα κονσόλας αποσφαλμάτωσης και διαγνωστικών</translation>
</message>
<message>
<location line="-4"/>
<source>&Verify message...</source>
<translation>&Επιβεβαίωση μηνύματος</translation>
</message>
<message>
<location line="-165"/>
<location line="+530"/>
<source>Parrotcoin</source>
<translation>Parrotcoin</translation>
</message>
<message>
<location line="-530"/>
<source>Wallet</source>
<translation>Πορτοφόλι</translation>
</message>
<message>
<location line="+101"/>
<source>&Send</source>
<translation>&Αποστολή</translation>
</message>
<message>
<location line="+7"/>
<source>&Receive</source>
<translation>&Παραλαβή </translation>
</message>
<message>
<location line="+14"/>
<source>&Addresses</source>
<translation>&Διεύθυνσεις</translation>
</message>
<message>
<location line="+22"/>
<source>&About Parrotcoin</source>
<translation>&Σχετικα:Parrotcoin</translation>
</message>
<message>
<location line="+9"/>
<source>&Show / Hide</source>
<translation>&Εμφάνισε/Κρύψε</translation>
</message>
<message>
<location line="+1"/>
<source>Show or hide the main Window</source>
<translation>Εμφάνιση ή αποκρύψη του κεντρικου παράθυρου </translation>
</message>
<message>
<location line="+3"/>
<source>Encrypt the private keys that belong to your wallet</source>
<translation>Κρυπτογραφήστε τα ιδιωτικά κλειδιά που ανήκουν στο πορτοφόλι σας </translation>
</message>
<message>
<location line="+7"/>
<source>Sign messages with your Parrotcoin addresses to prove you own them</source>
<translation>Υπογράψτε ένα μήνυμα για να βεβαιώσετε πως είστε ο κάτοχος αυτής της διεύθυνσης</translation>
</message>
<message>
<location line="+2"/>
<source>Verify messages to ensure they were signed with specified Parrotcoin addresses</source>
<translation>Υπογράψτε ένα μήνυμα για ν' αποδείξετε πως ανήκει μια συγκεκριμένη διεύθυνση Parrotcoin</translation>
</message>
<message>
<location line="+28"/>
<source>&File</source>
<translation>&Αρχείο</translation>
</message>
<message>
<location line="+7"/>
<source>&Settings</source>
<translation>&Ρυθμίσεις</translation>
</message>
<message>
<location line="+6"/>
<source>&Help</source>
<translation>&Βοήθεια</translation>
</message>
<message>
<location line="+9"/>
<source>Tabs toolbar</source>
<translation>Εργαλειοθήκη καρτελών</translation>
</message>
<message>
<location line="+17"/>
<location line="+10"/>
<source>[testnet]</source>
<translation>[testnet]</translation>
</message>
<message>
<location line="+47"/>
<source>Parrotcoin client</source>
<translation>Πελάτης Parrotcoin</translation>
</message>
<message numerus="yes">
<location line="+141"/>
<source>%n active connection(s) to Parrotcoin network</source>
<translation><numerusform>%n ενεργή σύνδεση στο δίκτυο Parrotcoin</numerusform><numerusform>%n ενεργές συνδέσεις στο δίκτυο Βitcoin</numerusform></translation>
</message>
<message>
<location line="+22"/>
<source>No block source available...</source>
<translation>Η πηγή του μπλοκ δεν ειναι διαθέσιμη... </translation>
</message>
<message>
<location line="+12"/>
<source>Processed %1 of %2 (estimated) blocks of transaction history.</source>
<translation>Μεταποιημένα %1 απο % 2 (κατ 'εκτίμηση) μπλοκ της ιστορίας της συναλλαγής. </translation>
</message>
<message>
<location line="+4"/>
<source>Processed %1 blocks of transaction history.</source>
<translation>Έγινε λήψη %1 μπλοκ ιστορικού συναλλαγών</translation>
</message>
<message numerus="yes">
<location line="+20"/>
<source>%n hour(s)</source>
<translation><numerusform>%n ώρες </numerusform><numerusform>%n ώρες </numerusform></translation>
</message>
<message numerus="yes">
<location line="+4"/>
<source>%n day(s)</source>
<translation><numerusform>%n ημέρες </numerusform><numerusform>%n ημέρες </numerusform></translation>
</message>
<message numerus="yes">
<location line="+4"/>
<source>%n week(s)</source>
<translation><numerusform>%n εβδομαδες</numerusform><numerusform>%n εβδομαδες</numerusform></translation>
</message>
<message>
<location line="+4"/>
<source>%1 behind</source>
<translation>%1 πίσω</translation>
</message>
<message>
<location line="+14"/>
<source>Last received block was generated %1 ago.</source>
<translation>Το τελευταίο μπλοκ που ελήφθη δημιουργήθηκε %1 πριν.</translation>
</message>
<message>
<location line="+2"/>
<source>Transactions after this will not yet be visible.</source>
<translation>Οι συναλλαγές μετά από αυτό δεν θα είναι ακόμη ορατες.</translation>
</message>
<message>
<location line="+22"/>
<source>Error</source>
<translation>Σφάλμα</translation>
</message>
<message>
<location line="+3"/>
<source>Warning</source>
<translation>Προειδοποίηση</translation>
</message>
<message>
<location line="+3"/>
<source>Information</source>
<translation>Πληροφορία</translation>
</message>
<message>
<location line="+70"/>
<source>This transaction is over the size limit. You can still send it for a fee of %1, which goes to the nodes that process your transaction and helps to support the network. Do you want to pay the fee?</source>
<translation>Η συναλλαγή ξεπερνάει το όριο.
Μπορεί να ολοκληρωθεί με μια αμοιβή των %1, η οποία αποδίδεται στους κόμβους που επεξεργάζονται τις συναλλαγές και βοηθούν στην υποστήριξη του δικτύου.
Θέλετε να συνεχίσετε;</translation>
</message>
<message>
<location line="-140"/>
<source>Up to date</source>
<translation>Ενημερωμένο</translation>
</message>
<message>
<location line="+31"/>
<source>Catching up...</source>
<translation>Ενημέρωση...</translation>
</message>
<message>
<location line="+113"/>
<source>Confirm transaction fee</source>
<translation>Επιβεβαίωση αμοιβής συναλλαγής</translation>
</message>
<message>
<location line="+8"/>
<source>Sent transaction</source>
<translation>Η συναλλαγή απεστάλη</translation>
</message>
<message>
<location line="+0"/>
<source>Incoming transaction</source>
<translation>Εισερχόμενη συναλλαγή</translation>
</message>
<message>
<location line="+1"/>
<source>Date: %1
Amount: %2
Type: %3
Address: %4
</source>
<translation>Ημερομηνία: %1
Ποσό: %2
Τύπος: %3
Διεύθυνση: %4
</translation>
</message>
<message>
<location line="+33"/>
<location line="+23"/>
<source>URI handling</source>
<translation>Χειρισμός URI</translation>
</message>
<message>
<location line="-23"/>
<location line="+23"/>
<source>URI can not be parsed! This can be caused by an invalid Parrotcoin address or malformed URI parameters.</source>
<translation>Το URI δεν μπορεί να αναλυθεί! Αυτό μπορεί να προκληθεί από μια μη έγκυρη διεύθυνση Parrotcoin ή ακατάλληλη παραμέτρο URI.</translation>
</message>
<message>
<location line="+17"/>
<source>Wallet is <b>encrypted</b> and currently <b>unlocked</b></source>
<translation>Το πορτοφόλι είναι <b>κρυπτογραφημένο</b> και <b>ξεκλείδωτο</b></translation>
</message>
<message>
<location line="+8"/>
<source>Wallet is <b>encrypted</b> and currently <b>locked</b></source>
<translation>Το πορτοφόλι είναι <b>κρυπτογραφημένο</b> και <b>κλειδωμένο</b></translation>
</message>
<message>
<location filename="../bitcoin.cpp" line="+111"/>
<source>A fatal error occurred. Parrotcoin can no longer continue safely and will quit.</source>
<translation>Παρουσιάστηκε ανεπανόρθωτο σφάλμα. Το Parrotcoin δεν μπορεί πλέον να συνεχίσει με ασφάλεια και θα τερματισθει.</translation>
</message>
</context>
<context>
<name>ClientModel</name>
<message>
<location filename="../clientmodel.cpp" line="+104"/>
<source>Network Alert</source>
<translation>Ειδοποίηση Δικτύου</translation>
</message>
</context>
<context>
<name>EditAddressDialog</name>
<message>
<location filename="../forms/editaddressdialog.ui" line="+14"/>
<source>Edit Address</source>
<translation>Επεξεργασία Διεύθυνσης</translation>
</message>
<message>
<location line="+11"/>
<source>&Label</source>
<translation>&Επιγραφή</translation>
</message>
<message>
<location line="+10"/>
<source>The label associated with this address book entry</source>
<translation>Η επιγραφή που σχετίζεται με αυτή την καταχώρηση του βιβλίου διευθύνσεων</translation>
</message>
<message>
<location line="+7"/>
<source>&Address</source>
<translation>&Διεύθυνση</translation>
</message>
<message>
<location line="+10"/>
<source>The address associated with this address book entry. This can only be modified for sending addresses.</source>
<translation>Η διεύθυνση που σχετίζεται με αυτή την καταχώρηση του βιβλίου διευθύνσεων. Μπορεί να τροποποιηθεί μόνο για τις διευθύνσεις αποστολής.</translation>
</message>
<message>
<location filename="../editaddressdialog.cpp" line="+21"/>
<source>New receiving address</source>
<translation>Νέα διεύθυνση λήψης</translation>
</message>
<message>
<location line="+4"/>
<source>New sending address</source>
<translation>Νέα διεύθυνση αποστολής</translation>
</message>
<message>
<location line="+3"/>
<source>Edit receiving address</source>
<translation>Επεξεργασία διεύθυνσης λήψης</translation>
</message>
<message>
<location line="+4"/>
<source>Edit sending address</source>
<translation>Επεξεργασία διεύθυνσης αποστολής</translation>
</message>
<message>
<location line="+76"/>
<source>The entered address "%1" is already in the address book.</source>
<translation>Η διεύθυνση "%1" βρίσκεται ήδη στο βιβλίο διευθύνσεων.</translation>
</message>
<message>
<location line="-5"/>
<source>The entered address "%1" is not a valid Parrotcoin address.</source>
<translation>Η διεύθυνση "%1" δεν είναι έγκυρη Parrotcoin διεύθυνση.</translation>
</message>
<message>
<location line="+10"/>
<source>Could not unlock wallet.</source>
<translation>Δεν είναι δυνατό το ξεκλείδωμα του πορτοφολιού.</translation>
</message>
<message>
<location line="+5"/>
<source>New key generation failed.</source>
<translation>Η δημιουργία νέου κλειδιού απέτυχε.</translation>
</message>
</context>
<context>
<name>GUIUtil::HelpMessageBox</name>
<message>
<location filename="../guiutil.cpp" line="+424"/>
<location line="+12"/>
<source>Parrotcoin-Qt</source>
<translation>parrotcoin-qt</translation>
</message>
<message>
<location line="-12"/>
<source>version</source>
<translation>έκδοση</translation>
</message>
<message>
<location line="+2"/>
<source>Usage:</source>
<translation>Χρήση:</translation>
</message>
<message>
<location line="+1"/>
<source>command-line options</source>
<translation>επιλογής γραμμής εντολών</translation>
</message>
<message>
<location line="+4"/>
<source>UI options</source>
<translation>επιλογές UI</translation>
</message>
<message>
<location line="+1"/>
<source>Set language, for example "de_DE" (default: system locale)</source>
<translation>Όρισε γλώσσα, για παράδειγμα "de_DE"(προεπιλογή:τοπικές ρυθμίσεις)</translation>
</message>
<message>
<location line="+1"/>
<source>Start minimized</source>
<translation>Έναρξη ελαχιστοποιημένο</translation>
</message>
<message>
<location line="+1"/>
<source>Show splash screen on startup (default: 1)</source>
<translation>Εμφάνισε την οθόνη εκκίνησης κατά την εκκίνηση(προεπιλογή:1)</translation>
</message>
</context>
<context>
<name>OptionsDialog</name>
<message>
<location filename="../forms/optionsdialog.ui" line="+14"/>
<source>Options</source>
<translation>Ρυθμίσεις</translation>
</message>
<message>
<location line="+16"/>
<source>&Main</source>
<translation>&Κύριο</translation>
</message>
<message>
<location line="+6"/>
<source>Optional transaction fee per kB that helps make sure your transactions are processed quickly. Most transactions are 1 kB.</source>
<translation>Η προαιρετική αμοιβή για κάθε kB επισπεύδει την επεξεργασία των συναλλαγών σας. Οι περισσότερες συναλλαγές είναι 1 kB. </translation>
</message>
<message>
<location line="+15"/>
<source>Pay transaction &fee</source>
<translation>Αμοιβή &συναλλαγής</translation>
</message>
<message>
<location line="+31"/>
<source>Automatically start Parrotcoin after logging in to the system.</source>
<translation>Αυτόματη εκκίνηση του Parrotcoin μετά την εισαγωγή στο σύστημα</translation>
</message>
<message>
<location line="+3"/>
<source>&Start Parrotcoin on system login</source>
<translation>&Έναρξη του Βιtcoin κατά την εκκίνηση του συστήματος</translation>
</message>
<message>
<location line="+35"/>
<source>Reset all client options to default.</source>
<translation>Επαναφορα όλων των επιλογων του πελάτη σε default.</translation>
</message>
<message>
<location line="+3"/>
<source>&Reset Options</source>
<translation>Επαναφορα ρυθμίσεων</translation>
</message>
<message>
<location line="+13"/>
<source>&Network</source>
<translation>&Δίκτυο</translation>
</message>
<message>
<location line="+6"/>
<source>Automatically open the Parrotcoin client port on the router. This only works when your router supports UPnP and it is enabled.</source>
<translation>Αυτόματο άνοιγμα των θυρών Parrotcoin στον δρομολογητή. Λειτουργεί μόνο αν ο δρομολογητής σας υποστηρίζει τη λειτουργία UPnP.</translation>
</message>
<message>
<location line="+3"/>
<source>Map port using &UPnP</source>
<translation>Απόδοση θυρών με χρήστη &UPnP</translation>
</message>
<message>
<location line="+7"/>
<source>Connect to the Parrotcoin network through a SOCKS proxy (e.g. when connecting through Tor).</source>
<translation>Σύνδεση στο Parrotcoin δίκτυο μέσω διαμεσολαβητή SOCKS4 (π.χ. για σύνδεση μέσω Tor)</translation>
</message>
<message>
<location line="+3"/>
<source>&Connect through SOCKS proxy:</source>
<translation>&Σύνδεση μέσω διαμεσολαβητή SOCKS</translation>
</message>
<message>
<location line="+9"/>
<source>Proxy &IP:</source>
<translation>&IP διαμεσολαβητή:</translation>
</message>
<message>
<location line="+19"/>
<source>IP address of the proxy (e.g. 127.0.0.1)</source>
<translation>Διεύθυνση IP του διαμεσολαβητή (π.χ. 127.0.0.1)</translation>
</message>
<message>
<location line="+7"/>
<source>&Port:</source>
<translation>&Θύρα:</translation>
</message>
<message>
<location line="+19"/>
<source>Port of the proxy (e.g. 9050)</source>
<translation>Θύρα διαμεσολαβητή</translation>
</message>
<message>
<location line="+7"/>
<source>SOCKS &Version:</source>
<translation>SOCKS &Έκδοση:</translation>
</message>
<message>
<location line="+13"/>
<source>SOCKS version of the proxy (e.g. 5)</source>
<translation>SOCKS εκδοση του διαμεσολαβητη (e.g. 5)</translation>
</message>
<message>
<location line="+36"/>
<source>&Window</source>
<translation>&Παράθυρο</translation>
</message>
<message>
<location line="+6"/>
<source>Show only a tray icon after minimizing the window.</source>
<translation>Εμφάνιση μόνο εικονιδίου στην περιοχή ειδοποιήσεων κατά την ελαχιστοποίηση</translation>
</message>
<message>
<location line="+3"/>
<source>&Minimize to the tray instead of the taskbar</source>
<translation>&Ελαχιστοποίηση στην περιοχή ειδοποιήσεων αντί της γραμμής εργασιών</translation>
</message>
<message>
<location line="+7"/>
<source>Minimize instead of exit the application when the window is closed. When this option is enabled, the application will be closed only after selecting Quit in the menu.</source>
<translation>Ελαχιστοποίηση αντί για έξοδο κατά το κλείσιμο του παραθύρου</translation>
</message>
<message>
<location line="+3"/>
<source>M&inimize on close</source>
<translation>Ε&λαχιστοποίηση κατά το κλείσιμο</translation>
</message>
<message>
<location line="+21"/>
<source>&Display</source>
<translation>%Απεικόνιση</translation>
</message>
<message>
<location line="+8"/>
<source>User Interface &language:</source>
<translation>Γλώσσα περιβάλλοντος εργασίας: </translation>
</message>
<message>
<location line="+13"/>
<source>The user interface language can be set here. This setting will take effect after restarting Parrotcoin.</source>
<translation>Εδώ μπορεί να ρυθμιστεί η γλώσσα διεπαφής χρήστη. Αυτή η ρύθμιση θα ισχύσει μετά την επανεκκίνηση του Parrotcoin.</translation>
</message>
<message>
<location line="+11"/>
<source>&Unit to show amounts in:</source>
<translation>&Μονάδα μέτρησης:</translation>
</message>
<message>
<location line="+13"/>
<source>Choose the default subdivision unit to show in the interface and when sending coins.</source>
<translation>Διαλέξτε την προεπιλεγμένη υποδιαίρεση που θα εμφανίζεται όταν στέλνετε νομίσματα.</translation>
</message>
<message>
<location line="+9"/>
<source>Whether to show Parrotcoin addresses in the transaction list or not.</source>
<translation>Επιλέξτε αν θέλετε να εμφανίζονται οι διευθύνσεις Parrotcoin στη λίστα συναλλαγών.</translation>
</message>
<message>
<location line="+3"/>
<source>&Display addresses in transaction list</source>
<translation>Εμφάνιση διευθύνσεων στη λίστα συναλλαγών</translation>
</message>
<message>
<location line="+71"/>
<source>&OK</source>
<translation>&ΟΚ</translation>
</message>
<message>
<location line="+7"/>
<source>&Cancel</source>
<translation>&Ακύρωση</translation>
</message>
<message>
<location line="+10"/>
<source>&Apply</source>
<translation>&Εφαρμογή</translation>
</message>
<message>
<location filename="../optionsdialog.cpp" line="+53"/>
<source>default</source>
<translation>προεπιλογή</translation>
</message>
<message>
<location line="+130"/>
<source>Confirm options reset</source>
<translation>Επιβεβαιώση των επιλογων επαναφοράς </translation>
</message>
<message>
<location line="+1"/>
<source>Some settings may require a client restart to take effect.</source>
<translation>Για ορισμένες ρυθμίσεις πρεπει η επανεκκίνηση να τεθεί σε ισχύ.</translation>
</message>
<message>
<location line="+0"/>
<source>Do you want to proceed?</source>
<translation>Θέλετε να προχωρήσετε;</translation>
</message>
<message>
<location line="+42"/>
<location line="+9"/>
<source>Warning</source>
<translation>Προειδοποίηση</translation>
</message>
<message>
<location line="-9"/>
<location line="+9"/>
<source>This setting will take effect after restarting Parrotcoin.</source>
<translation>Αυτή η ρύθμιση θα ισχύσει μετά την επανεκκίνηση του Parrotcoin.</translation>
</message>
<message>
<location line="+29"/>
<source>The supplied proxy address is invalid.</source>
<translation>Δεν είναι έγκυρη η διεύθυνση διαμεσολαβητή</translation>
</message>
</context>
<context>
<name>OverviewPage</name>
<message>
<location filename="../forms/overviewpage.ui" line="+14"/>
<source>Form</source>
<translation>Φόρμα</translation>
</message>
<message>
<location line="+50"/>
<location line="+166"/>
<source>The displayed information may be out of date. Your wallet automatically synchronizes with the Parrotcoin network after a connection is established, but this process has not completed yet.</source>
<translation>Οι πληροφορίες που εμφανίζονται μπορεί να είναι ξεπερασμένες. Το πορτοφόλι σας συγχρονίζεται αυτόματα με το δίκτυο Parrotcoin μετά από μια σύνδεση, αλλά αυτή η διαδικασία δεν έχει ακόμη ολοκληρωθεί. </translation>
</message>
<message>
<location line="-124"/>
<source>Balance:</source>
<translation>Υπόλοιπο</translation>
</message>
<message>
<location line="+29"/>
<source>Unconfirmed:</source>
<translation>Ανεπιβεβαίωτες</translation>
</message>
<message>
<location line="-78"/>
<source>Wallet</source>
<translation>Πορτοφόλι</translation>
</message>
<message>
<location line="+107"/>
<source>Immature:</source>
<translation>Ανώριμος</translation>
</message>
<message>
<location line="+13"/>
<source>Mined balance that has not yet matured</source>
<translation>Εξορυγμενο υπόλοιπο που δεν έχει ακόμα ωριμάσει </translation>
</message>
<message>
<location line="+46"/>
<source><b>Recent transactions</b></source>
<translation><b>Πρόσφατες συναλλαγές</b></translation>
</message>
<message>
<location line="-101"/>
<source>Your current balance</source>
<translation>Το τρέχον υπόλοιπο</translation>
</message>
<message>
<location line="+29"/>
<source>Total of transactions that have yet to be confirmed, and do not yet count toward the current balance</source>
<translation>Το άθροισμα των συναλλαγών που δεν έχουν ακόμα επιβεβαιωθεί και δεν προσμετρώνται στο τρέχον υπόλοιπό σας</translation>
</message>
<message>
<location filename="../overviewpage.cpp" line="+116"/>
<location line="+1"/>
<source>out of sync</source>
<translation>εκτός συγχρονισμού</translation>
</message>
</context>
<context>
<name>PaymentServer</name>
<message>
<location filename="../paymentserver.cpp" line="+107"/>
<source>Cannot start parrotcoin: click-to-pay handler</source>
<translation>Δεν είναι δυνατή η εκκίνηση του Parrotcoin: click-to-pay handler</translation>
</message>
</context>
<context>
<name>QRCodeDialog</name>
<message>
<location filename="../forms/qrcodedialog.ui" line="+14"/>
<source>QR Code Dialog</source>
<translation>Κώδικας QR</translation>
</message>
<message>
<location line="+59"/>
<source>Request Payment</source>
<translation>Αίτηση πληρωμής</translation>
</message>
<message>
<location line="+56"/>
<source>Amount:</source>
<translation>Ποσό:</translation>
</message>
<message>
<location line="-44"/>
<source>Label:</source>
<translation>Επιγραφή:</translation>
</message>
<message>
<location line="+19"/>
<source>Message:</source>
<translation>Μήνυμα:</translation>
</message>
<message>
<location line="+71"/>
<source>&Save As...</source>
<translation>&Αποθήκευση ως...</translation>
</message>
<message>
<location filename="../qrcodedialog.cpp" line="+62"/>
<source>Error encoding URI into QR Code.</source>
<translation>Σφάλμα κατά την κωδικοποίηση του URI σε κώδικα QR</translation>
</message>
<message>
<location line="+40"/>
<source>The entered amount is invalid, please check.</source>
<translation>Το αναγραφόμενο ποσό δεν είναι έγκυρο, παρακαλούμε να το ελέγξετε.</translation>
</message>
<message>
<location line="+23"/>
<source>Resulting URI too long, try to reduce the text for label / message.</source>
<translation>Το αποτέλεσμα της διεύθυνσης είναι πολύ μεγάλο. Μειώστε το μέγεθος για το κείμενο της ετικέτας/ μηνύματος.</translation>
</message>
<message>
<location line="+25"/>
<source>Save QR Code</source>
<translation>Αποθήκευση κώδικα QR</translation>
</message>
<message>
<location line="+0"/>
<source>PNG Images (*.png)</source>
<translation>Εικόνες PNG (*.png)</translation>
</message>
</context>
<context>
<name>RPCConsole</name>
<message>
<location filename="../forms/rpcconsole.ui" line="+46"/>
<source>Client name</source>
<translation>Όνομα Πελάτη</translation>
</message>
<message>
<location line="+10"/>
<location line="+23"/>
<location line="+26"/>
<location line="+23"/>
<location line="+23"/>
<location line="+36"/>
<location line="+53"/>
<location line="+23"/>
<location line="+23"/>
<location filename="../rpcconsole.cpp" line="+339"/>
<source>N/A</source>
<translation>Μη διαθέσιμο</translation>
</message>
<message>
<location line="-217"/>
<source>Client version</source>
<translation>Έκδοση Πελάτη</translation>
</message>
<message>
<location line="-45"/>
<source>&Information</source>
<translation>&Πληροφορία</translation>
</message>
<message>
<location line="+68"/>
<source>Using OpenSSL version</source>
<translation>Χρησιμοποιηση της OpenSSL εκδοσης</translation>
</message>
<message>
<location line="+49"/>
<source>Startup time</source>
<translation>Χρόνος εκκίνησης</translation>
</message>
<message>
<location line="+29"/>
<source>Network</source>
<translation>Δίκτυο</translation>
</message>
<message>
<location line="+7"/>
<source>Number of connections</source>
<translation>Αριθμός συνδέσεων</translation>
</message>
<message>
<location line="+23"/>
<source>On testnet</source>
<translation>Στο testnet</translation>
</message>
<message>
<location line="+23"/>
<source>Block chain</source>
<translation>αλυσίδα εμποδισμού</translation>
</message>
<message>
<location line="+7"/>
<source>Current number of blocks</source>
<translation>Τρέχον αριθμός μπλοκ</translation>
</message>
<message>
<location line="+23"/>
<source>Estimated total blocks</source>
<translation>Κατ' εκτίμηση συνολικά μπλοκς</translation>
</message>
<message>
<location line="+23"/>
<source>Last block time</source>
<translation>Χρόνος τελευταίου μπλοκ</translation>
</message>
<message>
<location line="+52"/>
<source>&Open</source>
<translation>&Άνοιγμα</translation>
</message>
<message>
<location line="+16"/>
<source>Command-line options</source>
<translation>επιλογής γραμμής εντολών</translation>
</message>
<message>
<location line="+7"/>
<source>Show the Parrotcoin-Qt help message to get a list with possible Parrotcoin command-line options.</source>
<translation>Εμφανιση του Parrotcoin-Qt μήνυματος βοήθειας για να πάρετε μια λίστα με τις πιθανές επιλογές Parrotcoin γραμμής εντολών.</translation>
</message>
<message>
<location line="+3"/>
<source>&Show</source>
<translation>&Εμφάνιση</translation>
</message>
<message>
<location line="+24"/>
<source>&Console</source>
<translation>&Κονσόλα</translation>
</message>
<message>
<location line="-260"/>
<source>Build date</source>
<translation>Ημερομηνία κατασκευής</translation>
</message>
<message>
<location line="-104"/>
<source>Parrotcoin - Debug window</source>
<translation>Parrotcoin - Παράθυρο αποσφαλμάτωσης</translation>
</message>
<message>
<location line="+25"/>
<source>Parrotcoin Core</source>
<translation>Parrotcoin Core</translation>
</message>
<message>
<location line="+279"/>
<source>Debug log file</source>
<translation>Αρχείο καταγραφής εντοπισμού σφαλμάτων </translation>
</message>
<message>
<location line="+7"/>
<source>Open the Parrotcoin debug log file from the current data directory. This can take a few seconds for large log files.</source>
<translation>Ανοίξτε το αρχείο καταγραφής εντοπισμού σφαλμάτων από τον τρέχοντα κατάλογο δεδομένων. Αυτό μπορεί να πάρει μερικά δευτερόλεπτα για τα μεγάλα αρχεία καταγραφής. </translation>
</message>
<message>
<location line="+102"/>
<source>Clear console</source>
<translation>Καθαρισμός κονσόλας</translation>
</message>
<message>
<location filename="../rpcconsole.cpp" line="-30"/>
<source>Welcome to the Parrotcoin RPC console.</source>
<translation>Καλώς ήρθατε στην Parrotcoin RPC κονσόλα.</translation>
</message>
<message>
<location line="+1"/>
<source>Use up and down arrows to navigate history, and <b>Ctrl-L</b> to clear screen.</source>
<translation>Χρησιμοποιήστε το πάνω και κάτω βέλος για να περιηγηθείτε στο ιστορικο, και <b>Ctrl-L</b> για εκκαθαριση οθονης.</translation>
</message>
<message>
<location line="+1"/>
<source>Type <b>help</b> for an overview of available commands.</source>
<translation>Γράψτε <b>βοήθεια</b> για μια επισκόπηση των διαθέσιμων εντολών</translation>
</message>
</context>
<context>
<name>SendCoinsDialog</name>
<message>
<location filename="../forms/sendcoinsdialog.ui" line="+14"/>
<location filename="../sendcoinsdialog.cpp" line="+124"/>
<location line="+5"/>
<location line="+5"/>
<location line="+5"/>
<location line="+6"/>
<location line="+5"/>
<location line="+5"/>
<source>Send Coins</source>
<translation>Αποστολή νομισμάτων</translation>
</message>
<message>
<location line="+50"/>
<source>Send to multiple recipients at once</source>
<translation>Αποστολή σε πολλούς αποδέκτες ταυτόχρονα</translation>
</message>
<message>
<location line="+3"/>
<source>Add &Recipient</source>
<translation>&Προσθήκη αποδέκτη</translation>
</message>
<message>
<location line="+20"/>
<source>Remove all transaction fields</source>
<translation>Διαγραφή όλων των πεδίων συναλλαγής</translation>
</message>
<message>
<location line="+3"/>
<source>Clear &All</source>
<translation>Καθαρισμός &Όλων</translation>
</message>
<message>
<location line="+22"/>
<source>Balance:</source>
<translation>Υπόλοιπο:</translation>
</message>
<message>
<location line="+10"/>
<source>123.456 BTC</source>
<translation>123,456 BTC</translation>
</message>
<message>
<location line="+31"/>
<source>Confirm the send action</source>
<translation>Επιβεβαίωση αποστολής</translation>
</message>
<message>
<location line="+3"/>
<source>S&end</source>
<translation>Αποστολη</translation>
</message>
<message>
<location filename="../sendcoinsdialog.cpp" line="-59"/>
<source><b>%1</b> to %2 (%3)</source>
<translation><b>%1</b> σε %2 (%3)</translation>
</message>
<message>
<location line="+5"/>
<source>Confirm send coins</source>
<translation>Επιβεβαίωση αποστολής νομισμάτων</translation>
</message>
<message>
<location line="+1"/>
<source>Are you sure you want to send %1?</source>
<translation>Είστε βέβαιοι για την αποστολή %1;</translation>
</message>
<message>
<location line="+0"/>
<source> and </source>
<translation>και</translation>
</message>
<message>
<location line="+23"/>
<source>The recipient address is not valid, please recheck.</source>
<translation>Η διεύθυνση του αποδέκτη δεν είναι σωστή. Παρακαλώ ελέγξτε ξανά.</translation>
</message>
<message>
<location line="+5"/>
<source>The amount to pay must be larger than 0.</source>
<translation>Το ποσό πληρωμής πρέπει να είναι μεγαλύτερο από 0.</translation>
</message>
<message>
<location line="+5"/>
<source>The amount exceeds your balance.</source>
<translation>Το ποσό ξεπερνάει το διαθέσιμο υπόλοιπο</translation>
</message>
<message>
<location line="+5"/>
<source>The total exceeds your balance when the %1 transaction fee is included.</source>
<translation>Το σύνολο υπερβαίνει το υπόλοιπό σας όταν συμπεριληφθεί και η αμοιβή %1</translation>
</message>
<message>
<location line="+6"/>
<source>Duplicate address found, can only send to each address once per send operation.</source>
<translation>Βρέθηκε η ίδια διεύθυνση δύο φορές. Επιτρέπεται μία μόνο εγγραφή για κάθε διεύθυνση, σε κάθε διαδικασία αποστολής.</translation>
</message>
<message>
<location line="+5"/>
<source>Error: Transaction creation failed!</source>
<translation>Σφάλμα: Η δημιουργία της συναλλαγής απέτυχε</translation>
</message>
<message>
<location line="+5"/>
<source>Error: The transaction was rejected. This might happen if some of the coins in your wallet were already spent, such as if you used a copy of wallet.dat and coins were spent in the copy but not marked as spent here.</source>
<translation>Σφάλμα: Η συναλλαγή απερρίφθη. Αυτό ενδέχεται να συμβαίνει αν κάποια από τα νομίσματα έχουν ήδη ξοδευθεί, όπως αν χρησιμοποιήσατε αντίγραφο του wallet.dat και τα νομίσματα ξοδεύθηκαν εκεί.</translation>
</message>
</context>
<context>
<name>SendCoinsEntry</name>
<message>
<location filename="../forms/sendcoinsentry.ui" line="+14"/>
<source>Form</source>
<translation>Φόρμα</translation>
</message>
<message>
<location line="+15"/>
<source>A&mount:</source>
<translation>&Ποσό:</translation>
</message>
<message>
<location line="+13"/>
<source>Pay &To:</source>
<translation>Πληρωμή &σε:</translation>
</message>
<message>
<location line="+34"/>
<source>The address to send the payment to (e.g. Ler4HNAEfwYhBmGXcFP2Po1NpRUEiK8km2)</source>
<translation>Διεύθυνση αποστολής της πληρωμής (e.g. Ler4HNAEfwYhBmGXcFP2Po1NpRUEiK8km2)</translation>
</message>
<message>
<location line="+60"/>
<location filename="../sendcoinsentry.cpp" line="+26"/>
<source>Enter a label for this address to add it to your address book</source>
<translation>Εισάγετε μια επιγραφή για αυτή τη διεύθυνση ώστε να καταχωρηθεί στο βιβλίο διευθύνσεων</translation>
</message>
<message>
<location line="-78"/>
<source>&Label:</source>
<translation>&Επιγραφή</translation>
</message>
<message>
<location line="+28"/>
<source>Choose address from address book</source>
<translation>Επιλογή διεύθυνσης από το βιβλίο διευθύνσεων</translation>
</message>
<message>
<location line="+10"/>
<source>Alt+A</source>
<translation>Alt+A</translation>
</message>
<message>
<location line="+7"/>
<source>Paste address from clipboard</source>
<translation>Επικόλληση διεύθυνσης από το πρόχειρο</translation>
</message>
<message>
<location line="+10"/>
<source>Alt+P</source>
<translation>Alt+P</translation>
</message>
<message>
<location line="+7"/>
<source>Remove this recipient</source>
<translation>Αφαίρεση αποδέκτη</translation>
</message>
<message>
<location filename="../sendcoinsentry.cpp" line="+1"/>
<source>Enter a Parrotcoin address (e.g. Ler4HNAEfwYhBmGXcFP2Po1NpRUEiK8km2)</source>
<translation>Εισάγετε μια διεύθυνση Parrotcoin (π.χ. Ler4HNAEfwYhBmGXcFP2Po1NpRUEiK8km2)</translation>
</message>
</context>
<context>
<name>SignVerifyMessageDialog</name>
<message>
<location filename="../forms/signverifymessagedialog.ui" line="+14"/>
<source>Signatures - Sign / Verify a Message</source>
<translation>Υπογραφές - Είσοδος / Επαλήθευση μήνυματος </translation>
</message>
<message>
<location line="+13"/>
<source>&Sign Message</source>
<translation>&Υπογραφή Μηνύματος</translation>
</message>
<message>
<location line="+6"/>
<source>You can sign messages with your addresses to prove you own them. Be careful not to sign anything vague, as phishing attacks may try to trick you into signing your identity over to them. Only sign fully-detailed statements you agree to.</source>
<translation>Μπορείτε να υπογράφετε μηνύματα με τις διευθύνσεις σας, ώστε ν' αποδεικνύετε πως αυτές σας ανήκουν. Αποφεύγετε να υπογράφετε κάτι αόριστο καθώς ενδέχεται να εξαπατηθείτε. Υπογράφετε μόνο πλήρης δηλώσεις με τις οποίες συμφωνείτε.</translation>
</message>
<message>
<location line="+18"/>
<source>The address to sign the message with (e.g. Ler4HNAEfwYhBmGXcFP2Po1NpRUEiK8km2)</source>
<translation>Εισάγετε μια διεύθυνση Parrotcoin (π.χ. Ler4HNAEfwYhBmGXcFP2Po1NpRUEiK8km2)</translation>
</message>
<message>
<location line="+10"/>
<location line="+213"/>
<source>Choose an address from the address book</source>
<translation>Επιλογή διεύθυνσης από το βιβλίο διευθύνσεων</translation>
</message>
<message>
<location line="-203"/>
<location line="+213"/>
<source>Alt+A</source>
<translation>Alt+A</translation>
</message>
<message>
<location line="-203"/>
<source>Paste address from clipboard</source>
<translation>Επικόλληση διεύθυνσης από το βιβλίο διευθύνσεων</translation>
</message>
<message>
<location line="+10"/>
<source>Alt+P</source>
<translation>Alt+P</translation>
</message>
<message>
<location line="+12"/>
<source>Enter the message you want to sign here</source>
<translation>Εισάγετε εδώ το μήνυμα που θέλετε να υπογράψετε</translation>
</message>
<message>
<location line="+7"/>
<source>Signature</source>
<translation>Υπογραφή</translation>
</message>
<message>
<location line="+27"/>
<source>Copy the current signature to the system clipboard</source>
<translation>Αντέγραφη της επιλεγμενης διεύθυνσης στο πρόχειρο του συστηματος</translation>
</message>
<message>
<location line="+21"/>
<source>Sign the message to prove you own this Parrotcoin address</source>
<translation>Υπογράψτε ένα μήνυμα για ν' αποδείξετε πως σας ανήκει μια συγκεκριμένη διεύθυνση Parrotcoin</translation>
</message>
<message>
<location line="+3"/>
<source>Sign &Message</source>
<translation>Υπογραφη μήνυματος</translation>
</message>
<message>
<location line="+14"/>
<source>Reset all sign message fields</source>
<translation>Επαναφορά όλων των πεδίων μήνυματος</translation>
</message>
<message>
<location line="+3"/>
<location line="+146"/>
<source>Clear &All</source>
<translation>Καθαρισμός &Όλων</translation>
</message>
<message>
<location line="-87"/>
<source>&Verify Message</source>
<translation>&Επιβεβαίωση μηνύματος</translation>
</message>
<message>
<location line="+6"/>
<source>Enter the signing address, message (ensure you copy line breaks, spaces, tabs, etc. exactly) and signature below to verify the message. Be careful not to read more into the signature than what is in the signed message itself, to avoid being tricked by a man-in-the-middle attack.</source>
<translation>Πληκτρολογήστε την υπογραφή διεύθυνσης, μήνυμα (βεβαιωθείτε ότι έχετε αντιγράψει τις αλλαγές γραμμής, κενά, tabs, κ.λπ. ακριβώς) και την υπογραφή παρακάτω, για να ελέγξει το μήνυμα. Να είστε προσεκτικοί για να μην διαβάσετε περισσότερα στην υπογραφή ό, τι είναι στην υπογραφή ίδιο το μήνυμα , για να μην εξαπατηθούν από έναν άνθρωπο -in - the-middle επίθεση.</translation>
</message>
<message>
<location line="+21"/>
<source>The address the message was signed with (e.g. Ler4HNAEfwYhBmGXcFP2Po1NpRUEiK8km2)</source>
<translation>Εισάγετε μια διεύθυνση Parrotcoin (π.χ. Ler4HNAEfwYhBmGXcFP2Po1NpRUEiK8km2)</translation>
</message>
<message>
<location line="+40"/>
<source>Verify the message to ensure it was signed with the specified Parrotcoin address</source>
<translation>Υπογράψτε ένα μήνυμα για ν' αποδείξετε πως υπογραφθηκε απο μια συγκεκριμένη διεύθυνση Parrotcoin</translation>
</message>
<message>
<location line="+3"/>
<source>Verify &Message</source>
<translation>Επιβεβαίωση μηνύματος</translation>
</message>
<message>
<location line="+14"/>
<source>Reset all verify message fields</source>
<translation>Επαναφορά όλων επαλήθευμενων πεδίων μήνυματος </translation>
</message>
<message>
<location filename="../signverifymessagedialog.cpp" line="+27"/>
<location line="+3"/>
<source>Enter a Parrotcoin address (e.g. Ler4HNAEfwYhBmGXcFP2Po1NpRUEiK8km2)</source>
<translation>Εισάγετε μια διεύθυνση Parrotcoin (π.χ. Ler4HNAEfwYhBmGXcFP2Po1NpRUEiK8km2)</translation>
</message>
<message>
<location line="-2"/>
<source>Click "Sign Message" to generate signature</source>
<translation>Κάντε κλικ στο "Υπογραφή Μηνύματος" για να λάβετε την υπογραφή</translation>
|
<location line="+3"/>
<source>Enter Parrotcoin signature</source>
<translation>Εισαγωγή υπογραφής Parrotcoin</translation>
</message>
<message>
<location line="+82"/>
<location line="+81"/>
<source>The entered address is invalid.</source>
<translation>Η διεύθυνση που εισήχθη είναι λάθος.</translation>
</message>
<message>
<location line="-81"/>
<location line="+8"/>
<location line="+73"/>
<location line="+8"/>
<source>Please check the address and try again.</source>
<translation>Παρακαλούμε ελέγξτε την διεύθυνση και δοκιμάστε ξανά.</translation>
</message>
<message>
<location line="-81"/>
<location line="+81"/>
<source>The entered address does not refer to a key.</source>
<translation>Η διεύθυνση που έχει εισαχθεί δεν αναφέρεται σε ένα πλήκτρο.</translation>
</message>
<message>
<location line="-73"/>
<source>Wallet unlock was cancelled.</source>
<translation>το ξεκλείδωμα του πορτοφολιού απέτυχε</translation>
</message>
<message>
<location line="+8"/>
<source>Private key for the entered address is not available.</source>
<translation>Το προσωπικό κλειδί εισαγμενης διευθυνσης δεν είναι διαθέσιμο.</translation>
</message>
<message>
<location line="+12"/>
<source>Message signing failed.</source>
<translation>Η υπογραφή του μηνύματος απέτυχε.</translation>
</message>
<message>
<location line="+5"/>
<source>Message signed.</source>
<translation>Μήνυμα υπεγράφη.</translation>
</message>
<message>
<location line="+59"/>
<source>The signature could not be decoded.</source>
<translation>Η υπογραφή δεν μπόρεσε να αποκρυπτογραφηθεί.</translation>
</message>
<message>
<location line="+0"/>
<location line="+13"/>
<source>Please check the signature and try again.</source>
<translation>Παρακαλούμε ελέγξτε την υπογραφή και δοκιμάστε ξανά.</translation>
</message>
<message>
<location line="+0"/>
<source>The signature did not match the message digest.</source>
<translation>Η υπογραφή δεν ταιριάζει με το μήνυμα. </translation>
</message>
<message>
<location line="+7"/>
<source>Message verification failed.</source>
<translation>Η επιβεβαίωση του μηνύματος απέτυχε</translation>
</message>
<message>
<location line="+5"/>
<source>Message verified.</source>
<translation>Μήνυμα επιβεβαιώθηκε.</translation>
</message>
</context>
<context>
<name>SplashScreen</name>
<message>
<location filename="../splashscreen.cpp" line="+22"/>
<source>The Parrotcoin developers</source>
<translation>Οι Parrotcoin προγραμματιστές </translation>
</message>
<message>
<location line="+1"/>
<source>[testnet]</source>
<translation>[testnet]</translation>
</message>
</context>
<context>
<name>TransactionDesc</name>
<message>
<location filename="../transactiondesc.cpp" line="+20"/>
<source>Open until %1</source>
<translation>Ανοιχτό μέχρι %1</translation>
</message>
<message>
<location line="+6"/>
<source>%1/offline</source>
<translation>%1/χωρίς σύνδεση;</translation>
</message>
<message>
<location line="+2"/>
<source>%1/unconfirmed</source>
<translation>%1/χωρίς επιβεβαίωση</translation>
</message>
<message>
<location line="+2"/>
<source>%1 confirmations</source>
<translation>%1 επιβεβαιώσεις</translation>
</message>
<message>
<location line="+18"/>
<source>Status</source>
<translation>Κατάσταση</translation>
</message>
<message numerus="yes">
<location line="+7"/>
<source>, broadcast through %n node(s)</source>
<translation><numerusform>, έχει μεταδοθεί μέσω %n κόμβων</numerusform><numerusform>, έχει μεταδοθεί μέσω %n κόμβων</numerusform></translation>
</message>
<message>
<location line="+4"/>
<source>Date</source>
<translation>Ημερομηνία</translation>
</message>
<message>
<location line="+7"/>
<source>Source</source>
<translation>Πηγή</translation>
</message>
<message>
<location line="+0"/>
<source>Generated</source>
<translation>Δημιουργία </translation>
</message>
<message>
<location line="+5"/>
<location line="+17"/>
<source>From</source>
<translation>Από</translation>
</message>
<message>
<location line="+1"/>
<location line="+22"/>
<location line="+58"/>
<source>To</source>
<translation>Προς</translation>
</message>
<message>
<location line="-77"/>
<location line="+2"/>
<source>own address</source>
<translation> δική σας διεύθυνση </translation>
</message>
<message>
<location line="-2"/>
<source>label</source>
<translation>eπιγραφή</translation>
</message>
<message>
<location line="+37"/>
<location line="+12"/>
<location line="+45"/>
<location line="+17"/>
<location line="+30"/>
<source>Credit</source>
<translation>Πίστωση </translation>
</message>
<message numerus="yes">
<location line="-102"/>
<source>matures in %n more block(s)</source>
<translation><numerusform>ωρίμανση σε %n επιπλέον μπλοκ</numerusform><numerusform>ωρίμανση σε %n επιπλέον μπλοκ</numerusform></translation>
</message>
<message>
<location line="+2"/>
<source>not accepted</source>
<translation>μη αποδεκτό</translation>
</message>
<message>
<location line="+44"/>
<location line="+8"/>
<location line="+15"/>
<location line="+30"/>
<source>Debit</source>
<translation>Debit</translation>
</message>
<message>
<location line="-39"/>
<source>Transaction fee</source>
<translation>Τέλος συναλλαγής </translation>
</message>
<message>
<location line="+16"/>
<source>Net amount</source>
<translation>Καθαρό ποσό</translation>
</message>
<message>
<location line="+6"/>
<source>Message</source>
<translation>Μήνυμα</translation>
</message>
<message>
<location line="+2"/>
<source>Comment</source>
<translation>Σχόλιο:</translation>
</message>
<message>
<location line="+2"/>
<source>Transaction ID</source>
<translation>ID Συναλλαγής:</translation>
</message>
<message>
<location line="+3"/>
<source>Generated coins must mature 120 blocks before they can be spent. When you generated this block, it was broadcast to the network to be added to the block chain. If it fails to get into the chain, its state will change to "not accepted" and it won't be spendable. This may occasionally happen if another node generates a block within a few seconds of yours.</source>
<translation>Πρέπει να περιμένετε 120 μπλοκ πριν μπορέσετε να χρησιμοποιήσετε τα νομίσματα που έχετε δημιουργήσει. Το μπλοκ που δημιουργήσατε μεταδόθηκε στο δίκτυο για να συμπεριληφθεί στην αλυσίδα των μπλοκ. Αν δεν μπει σε αυτή θα μετατραπεί σε "μη αποδεκτό" και δε θα μπορεί να καταναλωθεί. Αυτό συμβαίνει σπάνια όταν κάποιος άλλος κόμβος δημιουργήσει ένα μπλοκ λίγα δευτερόλεπτα πριν από εσάς.</translation>
</message>
<message>
<location line="+7"/>
<source>Debug information</source>
<translation>Πληροφορίες αποσφαλμάτωσης</translation>
</message>
<message>
<location line="+8"/>
<source>Transaction</source>
<translation>Συναλλαγή</translation>
</message>
<message>
<location line="+3"/>
<source>Inputs</source>
<translation>εισροές </translation>
</message>
<message>
<location line="+23"/>
<source>Amount</source>
<translation>Ποσό</translation>
</message>
<message>
<location line="+1"/>
<source>true</source>
<translation>αληθής</translation>
</message>
<message>
<location line="+0"/>
<source>false</source>
<translation>αναληθής </translation>
</message>
<message>
<location line="-209"/>
<source>, has not been successfully broadcast yet</source>
<translation>, δεν έχει ακόμα μεταδοθεί μ' επιτυχία</translation>
</message>
<message numerus="yes">
<location line="-35"/>
<source>Open for %n more block(s)</source>
<translation><numerusform>Ανοιχτό για %n μπλοκ</numerusform><numerusform>Ανοιχτό για %n μπλοκ</numerusform></translation>
</message>
<message>
<location line="+70"/>
<source>unknown</source>
<translation>άγνωστο</translation>
</message>
</context>
<context>
<name>TransactionDescDialog</name>
<message>
<location filename="../forms/transactiondescdialog.ui" line="+14"/>
<source>Transaction details</source>
<translation>Λεπτομέρειες συναλλαγής</translation>
</message>
<message>
<location line="+6"/>
<source>This pane shows a detailed description of the transaction</source>
<translation>Αυτό το παράθυρο δείχνει μια λεπτομερή περιγραφή της συναλλαγής</translation>
</message>
</context>
<context>
<name>TransactionTableModel</name>
<message>
<location filename="../transactiontablemodel.cpp" line="+225"/>
<source>Date</source>
<translation>Ημερομηνία</translation>
</message>
<message>
<location line="+0"/>
<source>Type</source>
<translation>Τύπος</translation>
</message>
<message>
<location line="+0"/>
<source>Address</source>
<translation>Διεύθυνση</translation>
</message>
<message>
<location line="+0"/>
<source>Amount</source>
<translation>Ποσό</translation>
</message>
<message numerus="yes">
<location line="+57"/>
<source>Open for %n more block(s)</source>
<translation><numerusform>Ανοιχτό για %n μπλοκ</numerusform><numerusform>Ανοιχτό για %n μπλοκ</numerusform></translation>
</message>
<message>
<location line="+3"/>
<source>Open until %1</source>
<translation>Ανοιχτό μέχρι %1</translation>
</message>
<message>
<location line="+3"/>
<source>Offline (%1 confirmations)</source>
<translation>Χωρίς σύνδεση (%1 επικυρώσεις)</translation>
</message>
<message>
<location line="+3"/>
<source>Unconfirmed (%1 of %2 confirmations)</source>
<translation>Χωρίς επιβεβαίωση (%1 από %2 επικυρώσεις)</translation>
</message>
<message>
<location line="+3"/>
<source>Confirmed (%1 confirmations)</source>
<translation>Επικυρωμένη (%1 επικυρώσεις)</translation>
</message>
<message numerus="yes">
<location line="+8"/>
<source>Mined balance will be available when it matures in %n more block(s)</source>
<translation><numerusform>Το υπόλοιπο από την εξόρυξη θα είναι διαθέσιμο μετά από %n μπλοκ</numerusform><numerusform>Το υπόλοιπο από την εξόρυξη θα είναι διαθέσιμο μετά από %n μπλοκ</numerusform></translation>
</message>
<message>
<location line="+5"/>
<source>This block was not received by any other nodes and will probably not be accepted!</source>
<translation>Αυτό το μπλοκ δεν έχει παραληφθεί από κανέναν άλλο κόμβο και κατά πάσα πιθανότητα θα απορριφθεί!</translation>
</message>
<message>
<location line="+3"/>
<source>Generated but not accepted</source>
<translation>Δημιουργήθηκε αλλά απορρίφθηκε</translation>
</message>
<message>
<location line="+43"/>
<source>Received with</source>
<translation>Παραλαβή με</translation>
</message>
<message>
<location line="+2"/>
<source>Received from</source>
<translation>Ελήφθη από</translation>
</message>
<message>
<location line="+3"/>
<source>Sent to</source>
<translation>Αποστολή προς</translation>
</message>
<message>
<location line="+2"/>
<source>Payment to yourself</source>
<translation>Πληρωμή προς εσάς</translation>
</message>
<message>
<location line="+2"/>
<source>Mined</source>
<translation>Εξόρυξη</translation>
</message>
<message>
<location line="+38"/>
<source>(n/a)</source>
<translation>(δ/α)</translation>
</message>
<message>
<location line="+199"/>
<source>Transaction status. Hover over this field to show number of confirmations.</source>
<translation>Κατάσταση συναλλαγής. Πηγαίνετε το ποντίκι πάνω από αυτό το πεδίο για να δείτε τον αριθμό των επικυρώσεων</translation>
</message>
<message>
<location line="+2"/>
<source>Date and time that the transaction was received.</source>
<translation>Ημερομηνία κι ώρα λήψης της συναλλαγής.</translation>
</message>
<message>
<location line="+2"/>
<source>Type of transaction.</source>
<translation>Είδος συναλλαγής.</translation>
</message>
<message>
<location line="+2"/>
<source>Destination address of transaction.</source>
<translation>Διεύθυνση αποστολής της συναλλαγής.</translation>
</message>
<message>
<location line="+2"/>
<source>Amount removed from or added to balance.</source>
<translation>Ποσό που αφαιρέθηκε ή προστέθηκε στο υπόλοιπο.</translation>
</message>
</context>
<context>
<name>TransactionView</name>
<message>
<location filename="../transactionview.cpp" line="+52"/>
<location line="+16"/>
<source>All</source>
<translation>Όλα</translation>
</message>
<message>
<location line="-15"/>
<source>Today</source>
<translation>Σήμερα</translation>
</message>
<message>
<location line="+1"/>
<source>This week</source>
<translation>Αυτή την εβδομάδα</translation>
</message>
<message>
<location line="+1"/>
<source>This month</source>
<translation>Αυτόν τον μήνα</translation>
</message>
<message>
<location line="+1"/>
<source>Last month</source>
<translation>Τον προηγούμενο μήνα</translation>
</message>
<message>
<location line="+1"/>
<source>This year</source>
<translation>Αυτό το έτος</translation>
</message>
<message>
<location line="+1"/>
<source>Range...</source>
<translation>Έκταση...</translation>
</message>
<message>
<location line="+11"/>
<source>Received with</source>
<translation>Ελήφθη με</translation>
</message>
<message>
<location line="+2"/>
<source>Sent to</source>
<translation>Απεστάλη προς</translation>
</message>
<message>
<location line="+2"/>
<source>To yourself</source>
<translation>Προς εσάς</translation>
</message>
<message>
<location line="+1"/>
<source>Mined</source>
<translation>Εξόρυξη</translation>
</message>
<message>
<location line="+1"/>
<source>Other</source>
<translation>Άλλο</translation>
</message>
<message>
<location line="+7"/>
<source>Enter address or label to search</source>
<translation>Αναζήτηση με βάση τη διεύθυνση ή την επιγραφή</translation>
</message>
<message>
<location line="+7"/>
<source>Min amount</source>
<translation>Ελάχιστο ποσό</translation>
</message>
<message>
<location line="+34"/>
<source>Copy address</source>
<translation>Αντιγραφή διεύθυνσης</translation>
</message>
<message>
<location line="+1"/>
<source>Copy label</source>
<translation>Αντιγραφή επιγραφής</translation>
</message>
<message>
<location line="+1"/>
<source>Copy amount</source>
<translation>Αντιγραφή ποσού</translation>
</message>
<message>
<location line="+1"/>
<source>Copy transaction ID</source>
<translation>Αντιγραφη του ID Συναλλαγής</translation>
</message>
<message>
<location line="+1"/>
<source>Edit label</source>
<translation>Επεξεργασία επιγραφής</translation>
</message>
<message>
<location line="+1"/>
<source>Show transaction details</source>
<translation>Εμφάνιση λεπτομερειών συναλλαγής</translation>
</message>
<message>
<location line="+139"/>
<source>Export Transaction Data</source>
<translation>Εξαγωγή Στοιχείων Συναλλαγών</translation>
</message>
<message>
<location line="+1"/>
<source>Comma separated file (*.csv)</source>
<translation>Αρχείο οριοθετημένο με κόμματα (*.csv)</translation>
</message>
<message>
<location line="+8"/>
<source>Confirmed</source>
<translation>Επικυρωμένες</translation>
</message>
<message>
<location line="+1"/>
<source>Date</source>
<translation>Ημερομηνία</translation>
</message>
<message>
<location line="+1"/>
<source>Type</source>
<translation>Τύπος</translation>
</message>
<message>
<location line="+1"/>
<source>Label</source>
<translation>Επιγραφή</translation>
</message>
<message>
<location line="+1"/>
<source>Address</source>
<translation>Διεύθυνση</translation>
</message>
<message>
<location line="+1"/>
<source>Amount</source>
<translation>Ποσό</translation>
</message>
<message>
<location line="+1"/>
<source>ID</source>
<translation>ID</translation>
</message>
<message>
<location line="+4"/>
<source>Error exporting</source>
<translation>Σφάλμα εξαγωγής</translation>
</message>
<message>
<location line="+0"/>
<source>Could not write to file %1.</source>
<translation>Αδυναμία εγγραφής στο αρχείο %1.</translation>
</message>
<message>
<location line="+100"/>
<source>Range:</source>
<translation>Έκταση:</translation>
</message>
<message>
<location line="+8"/>
<source>to</source>
<translation>έως</translation>
</message>
</context>
<context>
<name>WalletModel</name>
<message>
<location filename="../walletmodel.cpp" line="+193"/>
<source>Send Coins</source>
<translation>Αποστολή νομισμάτων</translation>
</message>
</context>
<context>
<name>WalletView</name>
<message>
<location filename="../walletview.cpp" line="+42"/>
<source>&Export</source>
<translation>&Εξαγωγή</translation>
</message>
<message>
<location line="+1"/>
<source>Export the data in the current tab to a file</source>
<translation>Εξαγωγή δεδομένων καρτέλας σε αρχείο</translation>
</message>
<message>
<location line="+193"/>
<source>Backup Wallet</source>
<translation>Αντίγραφο ασφαλείας του πορτοφολιού</translation>
</message>
<message>
<location line="+0"/>
<source>Wallet Data (*.dat)</source>
<translation>Αρχεία δεδομένων πορτοφολιού (*.dat)</translation>
</message>
<message>
<location line="+3"/>
<source>Backup Failed</source>
<translation>Αποτυχία κατά τη δημιουργία αντιγράφου</translation>
</message>
<message>
<location line="+0"/>
<source>There was an error trying to save the wallet data to the new location.</source>
<translation>Παρουσιάστηκε σφάλμα κατά την αποθήκευση των δεδομένων πορτοφολιού στη νέα τοποθεσία.</translation>
</message>
<message>
<location line="+4"/>
<source>Backup Successful</source>
<translation>Η δημιουργια αντιγραφου ασφαλειας πετυχε</translation>
</message>
<message>
<location line="+0"/>
<source>The wallet data was successfully saved to the new location.</source>
<translation>Τα δεδομένα πορτοφόλιου αποθηκεύτηκαν με επιτυχία στη νέα θέση. </translation>
</message>
</context>
<context>
<name>bitcoin-core</name>
<message>
<location filename="../bitcoinstrings.cpp" line="+94"/>
<source>Parrotcoin version</source>
<translation>Έκδοση Parrotcoin</translation>
</message>
<message>
<location line="+102"/>
<source>Usage:</source>
<translation>Χρήση:</translation>
</message>
<message>
<location line="-29"/>
<source>Send command to -server or parrotcoind</source>
<translation>Αποστολή εντολής στον εξυπηρετητή ή στο parrotcoind</translation>
</message>
<message>
<location line="-23"/>
<source>List commands</source>
<translation>Λίστα εντολών</translation>
</message>
<message>
<location line="-12"/>
<source>Get help for a command</source>
<translation>Επεξήγηση εντολής</translation>
</message>
<message>
<location line="+24"/>
<source>Options:</source>
<translation>Επιλογές:</translation>
</message>
<message>
<location line="+24"/>
<source>Specify configuration file (default: parrotcoin.conf)</source>
<translation>Ορίστε αρχείο ρυθμίσεων (προεπιλογή: parrotcoin.conf)</translation>
</message>
<message>
<location line="+3"/>
<source>Specify pid file (default: parrotcoind.pid)</source>
<translation>Ορίστε αρχείο pid (προεπιλογή: parrotcoind.pid)</translation>
</message>
<message>
<location line="-1"/>
<source>Specify data directory</source>
<translation>Ορισμός φακέλου δεδομένων</translation>
</message>
<message>
<location line="-9"/>
<source>Set database cache size in megabytes (default: 25)</source>
<translation>Όρισε το μέγεθος της βάσης προσωρινής αποθήκευσης σε megabytes(προεπιλογή:25)</translation>
</message>
<message>
<location line="-28"/>
<source>Listen for connections on <port> (default: 9333 or testnet: 19333)</source>
<translation>Εισερχόμενες συνδέσεις στη θύρα <port> (προεπιλογή: 9333 ή στο testnet: 19333)</translation>
</message>
<message>
<location line="+5"/>
<source>Maintain at most <n> connections to peers (default: 125)</source>
<translation>Μέγιστες αριθμός συνδέσεων με τους peers <n> (προεπιλογή: 125)</translation>
</message>
<message>
<location line="-48"/>
<source>Connect to a node to retrieve peer addresses, and disconnect</source>
<translation>Σύνδεση σε έναν κόμβο για την ανάκτηση διευθύνσεων από ομοτίμους, και αποσυνδέσh</translation>
</message>
<message>
<location line="+82"/>
<source>Specify your own public address</source>
<translation>Διευκρινίστε τη δικιά σας δημόσια διεύθυνση.</translation>
</message>
<message>
<location line="+3"/>
<source>Threshold for disconnecting misbehaving peers (default: 100)</source>
<translation>Όριο αποσύνδεσης προβληματικών peers (προεπιλογή: 100)</translation>
</message>
<message>
<location line="-134"/>
<source>Number of seconds to keep misbehaving peers from reconnecting (default: 86400)</source>
<translation>Δευτερόλεπτα πριν επιτραπεί ξανά η σύνδεση των προβληματικών peers (προεπιλογή: 86400)</translation>
</message>
<message>
<location line="-29"/>
<source>An error occurred while setting up the RPC port %u for listening on IPv4: %s</source>
<translation>Ένα σφάλμα συνέβη καθώς προετοιμαζόταν η πόρτα RPC %u για αναμονή IPv4: %s</translation>
</message>
<message>
<location line="+27"/>
<source>Listen for JSON-RPC connections on <port> (default: 9332 or testnet: 19332)</source>
<translation>Εισερχόμενες συνδέσεις JSON-RPC στη θύρα <port> (προεπιλογή: 9332 or testnet: 19332)</translation>
</message>
<message>
<location line="+37"/>
<source>Accept command line and JSON-RPC commands</source>
<translation>Αποδοχή εντολών κονσόλας και JSON-RPC</translation>
</message>
<message>
<location line="+76"/>
<source>Run in the background as a daemon and accept commands</source>
<translation>Εκτέλεση στο παρασκήνιο κι αποδοχή εντολών</translation>
</message>
<message>
<location line="+37"/>
<source>Use the test network</source>
<translation>Χρήση του δοκιμαστικού δικτύου</translation>
</message>
<message>
<location line="-112"/>
<source>Accept connections from outside (default: 1 if no -proxy or -connect)</source>
<translation>Να δέχεσαι συνδέσεις από έξω(προεπιλογή:1)</translation>
</message>
<message>
<location line="-80"/>
<source>%s, you must set a rpcpassword in the configuration file:
%s
It is recommended you use the following random password:
rpcuser=parrotcoinrpc
rpcpassword=%s
(you do not need to remember this password)
The username and password MUST NOT be the same.
If the file does not exist, create it with owner-readable-only file permissions.
It is also recommended to set alertnotify so you are notified of problems;
for example: alertnotify=echo %%s | mail -s "Parrotcoin Alert" [email protected]
</source>
<translation>%s, you must set a rpcpassword in the configuration file:
%s
It is recommended you use the following random password:
rpcuser=parrotcoinrpc
rpcpassword=%s
(you do not need to remember this password)
The username and password MUST NOT be the same.
If the file does not exist, create it with owner-readable-only file permissions.
It is also recommended to set alertnotify so you are notified of problems;
for example: alertnotify=echo %%s | mail -s "Parrotcoin Alert" [email protected]
</translation>
</message>
<message>
<location line="+17"/>
<source>An error occurred while setting up the RPC port %u for listening on IPv6, falling back to IPv4: %s</source>
<translation>Ένα σφάλμα συνέβη καθώς προετοιμαζόταν η υποδοχη RPC %u για αναμονη του IPv6, επεσε πισω στο IPv4:%s</translation>
</message>
<message>
<location line="+3"/>
<source>Bind to given address and always listen on it. Use [host]:port notation for IPv6</source>
<translation>Αποθηκευση σε συγκεκριμένη διεύθυνση. Χρησιμοποιήστε τα πλήκτρα [Host] : συμβολισμός θύρα για IPv6</translation>
</message>
<message>
<location line="+3"/>
<source>Cannot obtain a lock on data directory %s. Parrotcoin is probably already running.</source>
<translation>Αδυναμία κλειδώματος του φακέλου δεδομένων %s. Πιθανώς το Parrotcoin να είναι ήδη ενεργό.</translation>
</message>
<message>
<location line="+3"/>
<source>Error: The transaction was rejected! This might happen if some of the coins in your wallet were already spent, such as if you used a copy of wallet.dat and coins were spent in the copy but not marked as spent here.</source>
<translation>Σφάλμα: Η συναλλαγή απορρίφθηκε.
Αυτό ίσως οφείλεται στο ότι τα νομίσματά σας έχουν ήδη ξοδευτεί, π.χ. με την αντιγραφή του wallet.dat σε άλλο σύστημα και την χρήση τους εκεί, χωρίς η συναλλαγή να έχει καταγραφεί στο παρόν σύστημα.</translation>
</message>
<message>
<location line="+4"/>
<source>Error: This transaction requires a transaction fee of at least %s because of its amount, complexity, or use of recently received funds!</source>
<translation>Σφάλμα: Αυτή η συναλλαγή απαιτεί αμοιβή συναλλαγής τουλάχιστον %s λόγω του μεγέθους, πολυπλοκότητας ή της χρήσης πρόσφατης παραλαβής κεφαλαίου</translation>
</message>
<message>
<location line="+3"/>
<source>Execute command when a relevant alert is received (%s in cmd is replaced by message)</source>
<translation>Εκτέλεση της εντολής όταν το καλύτερο μπλοκ αλλάξει(%s στην εντολή αντικαθίσταται από το hash του μπλοκ)</translation>
</message>
<message>
<location line="+3"/>
<source>Execute command when a wallet transaction changes (%s in cmd is replaced by TxID)</source>
<translation>Εκτέλεσε την εντολή όταν το καλύτερο μπλοκ αλλάξει(%s στην εντολή αντικαθίσταται από το hash του μπλοκ)</translation>
</message>
<message>
<location line="+11"/>
<source>Set maximum size of high-priority/low-fee transactions in bytes (default: 27000)</source>
<translation>Ορίστε το μέγιστο μέγεθος των high-priority/low-fee συναλλαγων σε bytes (προεπιλογή: 27000)</translation>
</message>
<message>
<location line="+6"/>
<source>This is a pre-release test build - use at your own risk - do not use for mining or merchant applications</source>
<translation>Αυτό είναι ένα προ-τεστ κυκλοφορίας - χρησιμοποιήστε το με δική σας ευθύνη - δεν χρησιμοποιείτε για εξόρυξη ή για αλλες εφαρμογές</translation>
</message>
<message>
<location line="+5"/>
<source>Warning: -paytxfee is set very high! This is the transaction fee you will pay if you send a transaction.</source>
<translation>Προειδοποίηση: Η παράμετρος -paytxfee είναι πολύ υψηλή. Πρόκειται για την αμοιβή που θα πληρώνετε για κάθε συναλλαγή που θα στέλνετε.</translation>
</message>
<message>
<location line="+3"/>
<source>Warning: Displayed transactions may not be correct! You may need to upgrade, or other nodes may need to upgrade.</source>
<translation>Προειδοποίηση: Εμφανίσεις συναλλαγων δεν μπορεί να είναι σωστες! Μπορεί να χρειαστεί να αναβαθμίσετε, ή άλλοι κόμβοι μπορεί να χρειαστεί να αναβαθμίστουν. </translation>
</message>
<message>
<location line="+3"/>
<source>Warning: Please check that your computer's date and time are correct! If your clock is wrong Parrotcoin will not work properly.</source>
<translation>Προειδοποίηση: Παρακαλώ βεβαιωθείτε πως η ημερομηνία κι ώρα του συστήματός σας είναι σωστές. Αν το ρολόι του υπολογιστή σας πάει λάθος, ενδέχεται να μη λειτουργεί σωστά το Parrotcoin.</translation>
</message>
<message>
<location line="+3"/>
<source>Warning: error reading wallet.dat! All keys read correctly, but transaction data or address book entries might be missing or incorrect.</source>
<translation>Προειδοποίηση : Σφάλμα wallet.dat κατα την ανάγνωση ! Όλα τα κλειδιά αναγνωρισθηκαν σωστά, αλλά τα δεδομένα των συναλλαγών ή καταχωρήσεις στο βιβλίο διευθύνσεων μπορεί να είναι ελλιπείς ή λανθασμένα. </translation>
</message>
<message>
<location line="+3"/>
<source>Warning: wallet.dat corrupt, data salvaged! Original wallet.dat saved as wallet.{timestamp}.bak in %s; if your balance or transactions are incorrect you should restore from a backup.</source>
<translation>Προειδοποίηση : το αρχειο wallet.dat ειναι διεφθαρμένο, τα δεδομένα σώζονται ! Original wallet.dat αποθηκεύονται ως πορτοφόλι { timestamp } bak στο % s ? . . Αν το υπόλοιπο του ή τις συναλλαγές σας, είναι λάθος θα πρέπει να επαναφέρετε από ένα αντίγραφο ασφαλείας</translation>
</message>
<message>
<location line="+14"/>
<source>Attempt to recover private keys from a corrupt wallet.dat</source>
<translation>Προσπάθεια για ανακτησει ιδιωτικων κλειδιων από ενα διεφθαρμένο αρχειο wallet.dat </translation>
</message>
<message>
<location line="+2"/>
<source>Block creation options:</source>
<translation>Αποκλεισμός επιλογων δημιουργίας: </translation>
</message>
<message>
<location line="+5"/>
<source>Connect only to the specified node(s)</source>
<translation>Σύνδεση μόνο με ορισμένους κόμβους</translation>
</message>
<message>
<location line="+3"/>
<source>Corrupted block database detected</source>
<translation>Εντοπισθηκε διεφθαρμενη βαση δεδομενων των μπλοκ</translation>
</message>
<message>
<location line="+1"/>
<source>Discover own IP address (default: 1 when listening and no -externalip)</source>
<translation>Ανακαλύψτε την δικη σας IP διεύθυνση (προεπιλογή: 1 όταν ακούει και δεν - externalip) </translation>
</message>
<message>
<location line="+1"/>
<source>Do you want to rebuild the block database now?</source>
<translation>Θελετε να δημιουργηθει τωρα η βαση δεδομενων του μπλοκ? </translation>
</message>
<message>
<location line="+2"/>
<source>Error initializing block database</source>
<translation>Σφάλμα κατά την ενεργοποίηση της βάσης δεδομένων μπλοκ</translation>
</message>
<message>
<location line="+1"/>
<source>Error initializing wallet database environment %s!</source>
<translation>Σφάλμα κατά την ενεργοποίηση της βάσης δεδομένων πορτοφόλιου %s!</translation>
</message>
<message>
<location line="+1"/>
<source>Error loading block database</source>
<translation>Σφάλμα φορτωσης της βασης δεδομενων των μπλοκ</translation>
</message>
<message>
<location line="+4"/>
<source>Error opening block database</source>
<translation>Σφάλμα φορτωσης της βασης δεδομενων των μπλοκ</translation>
</message>
<message>
<location line="+2"/>
<source>Error: Disk space is low!</source>
<translation>Προειδοποίηση: Χαμηλός χώρος στο δίσκο </translation>
</message>
<message>
<location line="+1"/>
<source>Error: Wallet locked, unable to create transaction!</source>
<translation>Σφάλμα: το πορτοφόλι είναι κλειδωμένο, δεν μπορεί να δημιουργηθεί συναλλαγή</translation>
</message>
<message>
<location line="+1"/>
<source>Error: system error: </source>
<translation>Λάθος: λάθος συστήματος:</translation>
</message>
<message>
<location line="+1"/>
<source>Failed to listen on any port. Use -listen=0 if you want this.</source>
<translation>ταλαιπωρηθειτε για να ακούσετε σε οποιαδήποτε θύρα. Χρήση - ακούστε = 0 , αν θέλετε αυτό.</translation>
</message>
<message>
<location line="+1"/>
<source>Failed to read block info</source>
<translation>Αποτυχία αναγνωσης των block πληροφοριων</translation>
</message>
<message>
<location line="+1"/>
<source>Failed to read block</source>
<translation>Η αναγνωση του μπλοκ απετυχε</translation>
</message>
<message>
<location line="+1"/>
<source>Failed to sync block index</source>
<translation>Ο συγχρονισμος του μπλοκ ευρετηριου απετυχε</translation>
</message>
<message>
<location line="+1"/>
<source>Failed to write block index</source>
<translation>Η δημιουργια του μπλοκ ευρετηριου απετυχε</translation>
</message>
<message>
<location line="+1"/>
<source>Failed to write block info</source>
<translation>Η δημιουργια των μπλοκ πληροφοριων απετυχε</translation>
</message>
<message>
<location line="+1"/>
<source>Failed to write block</source>
<translation>Η δημιουργια του μπλοκ απετυχε</translation>
</message>
<message>
<location line="+1"/>
<source>Failed to write file info</source>
<translation>Αδυναμία εγγραφής πληροφοριων αρχειου</translation>
</message>
<message>
<location line="+1"/>
<source>Failed to write to coin database</source>
<translation>Αποτυχία εγγραφής στη βάση δεδομένων νομίσματος</translation>
</message>
<message>
<location line="+1"/>
<source>Failed to write transaction index</source>
<translation>Αποτυχία εγγραφής δείκτη συναλλαγών </translation>
</message>
<message>
<location line="+1"/>
<source>Failed to write undo data</source>
<translation>Αποτυχία εγγραφής αναίρεσης δεδομένων </translation>
</message>
<message>
<location line="+2"/>
<source>Find peers using DNS lookup (default: 1 unless -connect)</source>
<translation>Βρες ομότιμους υπολογιστές χρησιμοποιώντας αναζήτηση DNS(προεπιλογή:1)</translation>
</message>
<message>
<location line="+1"/>
<source>Generate coins (default: 0)</source>
<translation>Δημιουργία νομισμάτων (προκαθορισμος: 0)</translation>
</message>
<message>
<location line="+2"/>
<source>How many blocks to check at startup (default: 288, 0 = all)</source>
<translation>Πόσα μπλοκ να ελέγχθουν κατά την εκκίνηση (προεπιλογή:288,0=όλα)</translation>
</message>
<message>
<location line="+1"/>
<source>How thorough the block verification is (0-4, default: 3)</source>
<translation>Πόσο εξονυχιστική να είναι η επιβεβαίωση του μπλοκ(0-4, προεπιλογή:3)</translation>
</message>
<message>
<location line="+19"/>
<source>Not enough file descriptors available.</source>
<translation>Δεν ειναι αρκετες περιγραφες αρχείων διαθέσιμες.</translation>
</message>
<message>
<location line="+8"/>
<source>Rebuild block chain index from current blk000??.dat files</source>
<translation>Εισαγωγή μπλοκ από εξωτερικό αρχείο blk000?.dat</translation>
</message>
<message>
<location line="+16"/>
<source>Set the number of threads to service RPC calls (default: 4)</source>
<translation>Ορίσμος του αριθμόυ θεματων στην υπηρεσία κλήσεων RPC (προεπιλογή: 4) </translation>
</message>
<message>
<location line="+26"/>
<source>Verifying blocks...</source>
<translation>Επαλήθευση των μπλοκ... </translation>
</message>
<message>
<location line="+1"/>
<source>Verifying wallet...</source>
<translation>Επαλήθευση πορτοφολιου... </translation>
</message>
<message>
<location line="-69"/>
<source>Imports blocks from external blk000??.dat file</source>
<translation>Εισαγωγή μπλοκ από εξωτερικό αρχείο blk000?.dat</translation>
</message>
<message>
<location line="-76"/>
<source>Set the number of script verification threads (up to 16, 0 = auto, <0 = leave that many cores free, default: 0)</source>
<translation>Ορίσμος του αριθμό των νημάτων ελέγχου σεναρίου (μέχρι 16, 0 = auto, <0 = αφήνουν τους πολλους πυρήνες δωρεάν, default: 0)</translation>
</message>
<message>
<location line="+77"/>
<source>Information</source>
<translation>Πληροφορία</translation>
</message>
<message>
<location line="+3"/>
<source>Invalid -tor address: '%s'</source>
<translation>Δεν είναι έγκυρη η διεύθυνση διαμεσολαβητή: '%s'</translation>
</message>
<message>
<location line="+1"/>
<source>Invalid amount for -minrelaytxfee=<amount>: '%s'</source>
<translation>Μη έγκυρο ποσό για την παράμετρο -paytxfee=<amount>: '%s'</translation>
</message>
<message>
<location line="+1"/>
<source>Invalid amount for -mintxfee=<amount>: '%s'</source>
<translation>Μη έγκυρο ποσό για την παράμετρο -paytxfee=<amount>: '%s'</translation>
</message>
<message>
<location line="+8"/>
<source>Maintain a full transaction index (default: 0)</source>
<translation>Διατηρήση ένος πλήρες ευρετήριου συναλλαγών (προεπιλογή: 0) </translation>
</message>
<message>
<location line="+2"/>
<source>Maximum per-connection receive buffer, <n>*1000 bytes (default: 5000)</source>
<translation>Μέγιστος buffer λήψης ανά σύνδεση, <n>*1000 bytes (προεπιλογή: 5000)</translation>
</message>
<message>
<location line="+1"/>
<source>Maximum per-connection send buffer, <n>*1000 bytes (default: 1000)</source>
<translation>Μέγιστος buffer αποστολής ανά σύνδεση, <n>*1000 bytes (προεπιλογή: 1000)</translation>
</message>
<message>
<location line="+2"/>
<source>Only accept block chain matching built-in checkpoints (default: 1)</source>
<translation>Μονο αποδοχη αλυσίδας μπλοκ που ταιριάζει με τα ενσωματωμένα σημεία ελέγχου (προεπιλογή: 1) </translation>
</message>
<message>
<location line="+1"/>
<source>Only connect to nodes in network <net> (IPv4, IPv6 or Tor)</source>
<translation> Συνδέση μόνο σε κόμβους του δικτύου <net> (IPv4, IPv6 ή Tor) </translation>
</message>
<message>
<location line="+2"/>
<source>Output extra debugging information. Implies all other -debug* options</source>
<translation>Έξοδος επιπλέον πληροφοριών εντοπισμού σφαλμάτων</translation>
</message>
<message>
<location line="+1"/>
<source>Output extra network debugging information</source>
<translation>Έξοδος επιπλέον πληροφοριών εντοπισμού σφαλμάτων</translation>
</message>
<message>
<location line="+2"/>
<source>Prepend debug output with timestamp</source>
<translation>Χρονοσφραγίδα πληροφοριών εντοπισμού σφαλμάτων</translation>
</message>
<message>
<location line="+5"/>
<source>SSL options: (see the Parrotcoin Wiki for SSL setup instructions)</source>
<translation>Ρυθμίσεις SSL: (ανατρέξτε στο Parrotcoin Wiki για οδηγίες ρυθμίσεων SSL)</translation>
</message>
<message>
<location line="+1"/>
<source>Select the version of socks proxy to use (4-5, default: 5)</source>
<translation>Επιλέξτε την έκδοση του διαμεσολαβητη για να χρησιμοποιήσετε (4-5 , προεπιλογή: 5)</translation>
</message>
<message>
<location line="+3"/>
<source>Send trace/debug info to console instead of debug.log file</source>
<translation>Αποστολή πληροφοριών εντοπισμού σφαλμάτων στην κονσόλα αντί του αρχείου debug.log</translation>
</message>
<message>
<location line="+1"/>
<source>Send trace/debug info to debugger</source>
<translation>Αποστολή πληροφοριών εντοπισμού σφαλμάτων στον debugger</translation>
</message>
<message>
<location line="+5"/>
<source>Set maximum block size in bytes (default: 250000)</source>
<translation>Ορίσμος του μέγιστου μέγεθος block σε bytes (προεπιλογή: 250000)</translation>
</message>
<message>
<location line="+1"/>
<source>Set minimum block size in bytes (default: 0)</source>
<translation>Ορίστε το μέγιστο μέγεθος block σε bytes (προεπιλογή: 0)</translation>
</message>
<message>
<location line="+2"/>
<source>Shrink debug.log file on client startup (default: 1 when no -debug)</source>
<translation>Συρρίκνωση του αρχείο debug.log κατα την εκκίνηση του πελάτη (προεπιλογή: 1 όταν δεν-debug)</translation>
</message>
<message>
<location line="+1"/>
<source>Signing transaction failed</source>
<translation>Η υπογραφή συναλλαγής απέτυχε </translation>
</message>
<message>
<location line="+2"/>
<source>Specify connection timeout in milliseconds (default: 5000)</source>
<translation>Ορισμός λήξης χρονικού ορίου σε χιλιοστά του δευτερολέπτου(προεπιλογή:5000)</translation>
</message>
<message>
<location line="+4"/>
<source>System error: </source>
<translation>Λάθος Συστήματος:</translation>
</message>
<message>
<location line="+4"/>
<source>Transaction amount too small</source>
<translation>Το ποσό της συναλλαγής είναι πολύ μικρο </translation>
</message>
<message>
<location line="+1"/>
<source>Transaction amounts must be positive</source>
<translation>Τα ποσά των συναλλαγών πρέπει να είναι θετικα</translation>
</message>
<message>
<location line="+1"/>
<source>Transaction too large</source>
<translation>Η συναλλαγή ειναι πολύ μεγάλη </translation>
</message>
<message>
<location line="+7"/>
<source>Use UPnP to map the listening port (default: 0)</source>
<translation>Χρησιμοποίηση του UPnP για την χρήση της πόρτας αναμονής (προεπιλογή:0)</translation>
</message>
<message>
<location line="+1"/>
<source>Use UPnP to map the listening port (default: 1 when listening)</source>
<translation>Χρησιμοποίηση του UPnP για την χρήση της πόρτας αναμονής (προεπιλογή:1)</translation>
</message>
<message>
<location line="+1"/>
<source>Use proxy to reach tor hidden services (default: same as -proxy)</source>
<translation>Χρήση διακομιστή μεσολάβησης για την επίτευξη των Tor κρυμμένων υπηρεσιων (προεπιλογή: ίδιο με το-proxy) </translation>
</message>
<message>
<location line="+2"/>
<source>Username for JSON-RPC connections</source>
<translation>Όνομα χρήστη για τις συνδέσεις JSON-RPC</translation>
</message>
<message>
<location line="+4"/>
<source>Warning</source>
<translation>Προειδοποίηση</translation>
</message>
<message>
<location line="+1"/>
<source>Warning: This version is obsolete, upgrade required!</source>
<translation>Προειδοποίηση: Αυτή η έκδοση είναι ξεπερασμένη, απαιτείται αναβάθμιση </translation>
</message>
<message>
<location line="+1"/>
<source>You need to rebuild the databases using -reindex to change -txindex</source>
<translation>Θα πρέπει να ξαναχτίστουν οι βάσεις δεδομένων που χρησιμοποιούντε-Αναδημιουργία αλλάγων-txindex </translation>
</message>
<message>
<location line="+1"/>
<source>wallet.dat corrupt, salvage failed</source>
<translation>Το αρχειο wallet.dat ειναι διεφθαρμένο, η διάσωση απέτυχε</translation>
</message>
<message>
<location line="-50"/>
<source>Password for JSON-RPC connections</source>
<translation>Κωδικός για τις συνδέσεις JSON-RPC</translation>
</message>
<message>
<location line="-67"/>
<source>Allow JSON-RPC connections from specified IP address</source>
<translation>Αποδοχή συνδέσεων JSON-RPC από συγκεκριμένη διεύθυνση IP</translation>
</message>
<message>
<location line="+76"/>
<source>Send commands to node running on <ip> (default: 127.0.0.1)</source>
<translation>Αποστολή εντολών στον κόμβο <ip> (προεπιλογή: 127.0.0.1)</translation>
</message>
<message>
<location line="-120"/>
<source>Execute command when the best block changes (%s in cmd is replaced by block hash)</source>
<translation>Εκτέλεσε την εντολή όταν το καλύτερο μπλοκ αλλάξει(%s στην εντολή αντικαθίσταται από το hash του μπλοκ)</translation>
</message>
<message>
<location line="+147"/>
<source>Upgrade wallet to latest format</source>
<translation>Αναβάθμισε το πορτοφόλι στην τελευταία έκδοση</translation>
</message>
<message>
<location line="-21"/>
<source>Set key pool size to <n> (default: 100)</source>
<translation>Όριο πλήθους κλειδιών pool <n> (προεπιλογή: 100)</translation>
</message>
<message>
<location line="-12"/>
<source>Rescan the block chain for missing wallet transactions</source>
<translation>Επανέλεγχος της αλυσίδας μπλοκ για απούσες συναλλαγές</translation>
</message>
<message>
<location line="+35"/>
<source>Use OpenSSL (https) for JSON-RPC connections</source>
<translation>Χρήση του OpenSSL (https) για συνδέσεις JSON-RPC</translation>
</message>
<message>
<location line="-26"/>
<source>Server certificate file (default: server.cert)</source>
<translation>Αρχείο πιστοποιητικού του διακομιστή (προεπιλογή: server.cert)</translation>
</message>
<message>
<location line="+1"/>
<source>Server private key (default: server.pem)</source>
<translation>Προσωπικό κλειδί του διακομιστή (προεπιλογή: server.pem)</translation>
</message>
<message>
<location line="-151"/>
<source>Acceptable ciphers (default: TLSv1+HIGH:!SSLv2:!aNULL:!eNULL:!AH:!3DES:@STRENGTH)</source>
<translation>Αποδεκτά κρυπτογραφήματα (προεπιλογή: TLSv1+HIGH:!SSLv2:!aNULL:!eNULL:!AH:!3DES:@STRENGTH)</translation>
</message>
<message>
<location line="+165"/>
<source>This help message</source>
<translation>Αυτό το κείμενο βοήθειας</translation>
</message>
<message>
<location line="+6"/>
<source>Unable to bind to %s on this computer (bind returned error %d, %s)</source>
<translation>Αδύνατη η σύνδεση με τη θύρα %s αυτού του υπολογιστή (bind returned error %d, %s) </translation>
</message>
<message>
<location line="-91"/>
<source>Connect through socks proxy</source>
<translation>Σύνδεση μέσω διαμεσολαβητή socks</translation>
</message>
<message>
<location line="-10"/>
<source>Allow DNS lookups for -addnode, -seednode and -connect</source>
<translation>Να επιτρέπονται οι έλεγχοι DNS για προσθήκη και σύνδεση κόμβων</translation>
</message>
<message>
<location line="+55"/>
<source>Loading addresses...</source>
<translation>Φόρτωση διευθύνσεων...</translation>
</message>
<message>
<location line="-35"/>
<source>Error loading wallet.dat: Wallet corrupted</source>
<translation>Σφάλμα φόρτωσης wallet.dat: Κατεστραμμένο Πορτοφόλι</translation>
</message>
<message>
<location line="+1"/>
<source>Error loading wallet.dat: Wallet requires newer version of Parrotcoin</source>
<translation>Σφάλμα φόρτωσης wallet.dat: Το Πορτοφόλι απαιτεί μια νεότερη έκδοση του Parrotcoin</translation>
</message>
<message>
<location line="+93"/>
<source>Wallet needed to be rewritten: restart Parrotcoin to complete</source>
<translation>Απαιτείται η επανεγγραφή του Πορτοφολιού, η οποία θα ολοκληρωθεί στην επανεκκίνηση του Parrotcoin</translation>
</message>
<message>
<location line="-95"/>
<source>Error loading wallet.dat</source>
<translation>Σφάλμα φόρτωσης αρχείου wallet.dat</translation>
</message>
<message>
<location line="+28"/>
<source>Invalid -proxy address: '%s'</source>
<translation>Δεν είναι έγκυρη η διεύθυνση διαμεσολαβητή: '%s'</translation>
</message>
<message>
<location line="+56"/>
<source>Unknown network specified in -onlynet: '%s'</source>
<translation>Άγνωστo δίκτυο ορίζεται σε onlynet: '%s'</translation>
</message>
<message>
<location line="-1"/>
<source>Unknown -socks proxy version requested: %i</source>
<translation>Άγνωστo δίκτυο ορίζεται: %i</translation>
</message>
<message>
<location line="-96"/>
<source>Cannot resolve -bind address: '%s'</source>
<translation>Δεν μπορώ να γράψω την προεπιλεγμένη διεύθυνση: '%s'</translation>
</message>
<message>
<location line="+1"/>
<source>Cannot resolve -externalip address: '%s'</source>
<translation>Δεν μπορώ να γράψω την προεπιλεγμένη διεύθυνση: '%s'</translation>
</message>
<message>
<location line="+44"/>
<source>Invalid amount for -paytxfee=<amount>: '%s'</source>
<translation>Μη έγκυρο ποσό για την παράμετρο -paytxfee=<amount>: '%s'</translation>
</message>
<message>
<location line="+1"/>
<source>Invalid amount</source>
<translation>Λάθος ποσότητα</translation>
</message>
<message>
<location line="-6"/>
<source>Insufficient funds</source>
<translation>Ανεπαρκές κεφάλαιο</translation>
</message>
<message>
<location line="+10"/>
<source>Loading block index...</source>
<translation>Φόρτωση ευρετηρίου μπλοκ...</translation>
</message>
<message>
<location line="-57"/>
<source>Add a node to connect to and attempt to keep the connection open</source>
<translation>Προσέθεσε ένα κόμβο για σύνδεση και προσπάθησε να κρατήσεις την σύνδεση ανοιχτή</translation>
</message>
<message>
<location line="-25"/>
<source>Unable to bind to %s on this computer. Parrotcoin is probably already running.</source>
<translation>Αδύνατη η σύνδεση με τη θύρα %s αυτού του υπολογιστή. Το Parrotcoin είναι πιθανώς ήδη ενεργό.</translation>
</message>
<message>
<location line="+64"/>
<source>Fee per KB to add to transactions you send</source>
<translation>Αμοιβή ανά KB που θα προστίθεται στις συναλλαγές που στέλνεις</translation>
</message>
<message>
<location line="+19"/>
<source>Loading wallet...</source>
<translation>Φόρτωση πορτοφολιού...</translation>
</message>
<message>
<location line="-52"/>
<source>Cannot downgrade wallet</source>
<translation>Δεν μπορώ να υποβαθμίσω το πορτοφόλι</translation>
</message>
<message>
<location line="+3"/>
<source>Cannot write default address</source>
<translation>Δεν μπορώ να γράψω την προεπιλεγμένη διεύθυνση</translation>
</message>
<message>
<location line="+64"/>
<source>Rescanning...</source>
<translation>Ανίχνευση...</translation>
</message>
<message>
<location line="-57"/>
<source>Done loading</source>
<translation>Η φόρτωση ολοκληρώθηκε</translation>
</message>
<message>
<location line="+82"/>
<source>To use the %s option</source>
<translation>Χρήση της %s επιλογής</translation>
</message>
<message>
<location line="-74"/>
<source>Error</source>
<translation>Σφάλμα</translation>
</message>
<message>
<location line="-31"/>
<source>You must set rpcpassword=<password> in the configuration file:
%s
If the file does not exist, create it with owner-readable-only file permissions.</source>
<translation>Πρέπει να βάλεις ένα κωδικό στο αρχείο παραμέτρων: %s
Εάν το αρχείο δεν υπάρχει, δημιούργησε το με δικαιώματα μόνο για ανάγνωση από τον δημιουργό</translation>
</message>
</context>
</TS>
|
</message>
<message>
|
typecoders_test.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Unit tests for the typecoders module."""
# pytype: skip-file
import unittest
from apache_beam.coders import coders
from apache_beam.coders import typecoders
from apache_beam.internal import pickler
from apache_beam.tools import utils
from apache_beam.typehints import typehints
class CustomClass(object):
def __init__(self, n):
self.number = n
def __eq__(self, other):
return self.number == other.number
def __hash__(self):
return self.number
class CustomCoder(coders.Coder):
def encode(self, value):
return str(value.number)
def decode(self, encoded):
return CustomClass(int(encoded))
def is_deterministic(self):
# This coder is deterministic. Though we don't use need this coder to be
# deterministic for this test, we annotate this as such to follow best
# practices.
return True
class TypeCodersTest(unittest.TestCase):
def setUp(self):
try:
utils.check_compiled('apache_beam.coders')
except RuntimeError:
self.skipTest('Cython is not installed')
def test_register_non_type_coder(self):
coder = CustomCoder()
with self.assertRaises(TypeError) as e:
# When registering a coder the coder class must be specified.
typecoders.registry.register_coder(CustomClass, coder)
self.assertEqual(
e.exception.message,
'Coder registration requires a coder class object. '
'Received %r instead.' % coder)
def test_get_coder_with_custom_coder(self):
typecoders.registry.register_coder(CustomClass, CustomCoder)
self.assertEqual(
CustomCoder, typecoders.registry.get_coder(CustomClass).__class__)
def test_get_coder_with_composite_custom_coder(self):
typecoders.registry.register_coder(CustomClass, CustomCoder)
coder = typecoders.registry.get_coder(typehints.KV[CustomClass, str])
revived_coder = pickler.loads(pickler.dumps(coder))
self.assertEqual(
(CustomClass(123), 'abc'),
revived_coder.decode(revived_coder.encode((CustomClass(123), 'abc'))))
def test_get_coder_with_standard_coder(self):
self.assertEqual(
coders.BytesCoder, typecoders.registry.get_coder(bytes).__class__)
def test_fallbackcoder(self):
coder = typecoders.registry.get_coder(typehints.Any)
self.assertEqual(('abc', 123), coder.decode(coder.encode(('abc', 123))))
def test_get_coder_can_be_pickled(self):
coder = typecoders.registry.get_coder(typehints.Tuple[str, int])
revived_coder = pickler.loads(pickler.dumps(coder))
self.assertEqual(('abc', 123),
revived_coder.decode(revived_coder.encode(('abc', 123))))
def test_standard_int_coder(self):
real_coder = typecoders.registry.get_coder(int)
expected_coder = coders.VarIntCoder()
self.assertEqual(real_coder.encode(0x0404), expected_coder.encode(0x0404))
self.assertEqual(0x0404, real_coder.decode(real_coder.encode(0x0404)))
self.assertEqual(
real_coder.encode(0x040404040404),
expected_coder.encode(0x040404040404))
self.assertEqual(
|
def test_standard_str_coder(self):
real_coder = typecoders.registry.get_coder(bytes)
expected_coder = coders.BytesCoder()
self.assertEqual(real_coder.encode(b'abc'), expected_coder.encode(b'abc'))
self.assertEqual(b'abc', real_coder.decode(real_coder.encode(b'abc')))
def test_standard_bool_coder(self):
real_coder = typecoders.registry.get_coder(bool)
expected_coder = coders.BooleanCoder()
self.assertEqual(real_coder.encode(True), expected_coder.encode(True))
self.assertEqual(True, real_coder.decode(real_coder.encode(True)))
self.assertEqual(real_coder.encode(False), expected_coder.encode(False))
self.assertEqual(False, real_coder.decode(real_coder.encode(False)))
def test_iterable_coder(self):
real_coder = typecoders.registry.get_coder(typehints.Iterable[bytes])
expected_coder = coders.IterableCoder(coders.BytesCoder())
values = [b'abc', b'xyz']
self.assertEqual(expected_coder, real_coder)
self.assertEqual(real_coder.encode(values), expected_coder.encode(values))
def test_list_coder(self):
real_coder = typecoders.registry.get_coder(typehints.List[bytes])
expected_coder = coders.IterableCoder(coders.BytesCoder())
values = [b'abc', b'xyz']
self.assertEqual(expected_coder, real_coder)
self.assertEqual(real_coder.encode(values), expected_coder.encode(values))
# IterableCoder.decode() always returns a list. Its implementation,
# IterableCoderImpl, *can* return a non-list if it is provided a read_state
# object, but this is not possible using the atomic IterableCoder interface.
self.assertIs(
list, type(expected_coder.decode(expected_coder.encode(values))))
def test_nullable_coder(self):
expected_coder = coders.NullableCoder(coders.BytesCoder())
real_coder = typecoders.registry.get_coder(typehints.Optional(bytes))
self.assertEqual(expected_coder, real_coder)
self.assertEqual(expected_coder.encode(None), real_coder.encode(None))
self.assertEqual(expected_coder.encode(b'abc'), real_coder.encode(b'abc'))
if __name__ == '__main__':
unittest.main()
|
0x040404040404, real_coder.decode(real_coder.encode(0x040404040404)))
|
test.rs
|
use anyhow::{Error, Result};
use std::{
cmp,
sync::{Arc, Mutex},
task::{Poll, Waker},
};
pub struct HostShellStdin {
state: Arc<Mutex<MockStdin>>,
}
struct MockStdin {
data: Vec<u8>,
closed: bool,
wakers: Vec<Waker>,
}
pub struct HostShellStdout {
state: Arc<Mutex<Vec<u8>>>,
}
pub struct HostShellResizeWatcher {
state: Arc<Mutex<MockResizeWatcher>>,
}
struct MockResizeWatcher {
data: Vec<(u16, u16)>,
closed: bool,
wakers: Vec<Waker>,
}
pub struct HostShell {
stdin: Arc<Mutex<MockStdin>>,
stdout: Arc<Mutex<Vec<u8>>>,
resize: Arc<Mutex<MockResizeWatcher>>,
}
impl HostShellStdin {
pub async fn read(&mut self, buff: &mut [u8]) -> Result<usize>
|
}
impl HostShellStdout {
pub async fn write(&mut self, buff: &[u8]) -> Result<()> {
let mut stdout = self.state.lock().unwrap();
stdout.extend_from_slice(buff);
Ok(())
}
}
impl HostShellResizeWatcher {
pub async fn next(&mut self) -> Result<(u16, u16)> {
futures::future::poll_fn(|cx| {
let mut resize = self.state.lock().unwrap();
if resize.data.len() == 0 {
if resize.closed {
return Poll::Ready(Err(Error::msg("resize watcher closed")));
};
resize.wakers.push(cx.waker().clone());
return Poll::Pending;
}
Poll::Ready(Ok(resize.data.remove(0)))
})
.await
}
}
impl HostShell {
pub fn new() -> Result<Self> {
Ok(Self {
stdin: Arc::new(Mutex::new(MockStdin {
data: vec![],
closed: false,
wakers: vec![],
})),
stdout: Arc::new(Mutex::new(vec![])),
resize: Arc::new(Mutex::new(MockResizeWatcher {
data: vec![],
closed: false,
wakers: vec![],
})),
})
}
pub async fn println(&self, output: &str) {
let mut stdout = self.stdout.lock().unwrap();
stdout.extend_from_slice(output.as_bytes());
}
pub fn enable_raw_mode(&mut self) -> Result<()> {
Ok(())
}
pub fn disable_raw_mode(&mut self) -> Result<()> {
Ok(())
}
pub fn stdin(&self) -> Result<HostShellStdin> {
Ok(HostShellStdin {
state: Arc::clone(&self.stdin),
})
}
pub fn stdout(&self) -> Result<HostShellStdout> {
Ok(HostShellStdout {
state: Arc::clone(&self.stdout),
})
}
pub fn resize_watcher(&self) -> Result<HostShellResizeWatcher> {
Ok(HostShellResizeWatcher {
state: Arc::clone(&self.resize),
})
}
pub fn term(&self) -> Result<String> {
Ok("MOCK".to_owned())
}
pub async fn size(&self) -> Result<(u16, u16)> {
Ok((100, 100))
}
pub fn write_to_stdin(&self, buf: &[u8]) {
let mut stdin = self.stdin.lock().unwrap();
stdin.data.extend_from_slice(buf);
stdin.wakers.drain(..).map(|i| i.wake()).for_each(drop);
}
pub fn drain_stdout(&self) -> Vec<u8> {
let mut stdin = self.stdout.lock().unwrap();
stdin.drain(..).collect()
}
pub fn send_resize(&self, size: (u16, u16)) {
let mut resize = self.resize.lock().unwrap();
resize.data.push(size);
resize.wakers.drain(..).map(|i| i.wake()).for_each(drop);
}
}
impl Clone for HostShell {
fn clone(&self) -> Self {
Self {
stdin: Arc::clone(&self.stdin),
stdout: Arc::clone(&self.stdout),
resize: Arc::clone(&self.resize),
}
}
}
|
{
futures::future::poll_fn(|cx| {
let mut stdin = self.state.lock().unwrap();
if stdin.data.len() == 0 {
if stdin.closed {
return Poll::Ready(Ok(0));
};
stdin.wakers.push(cx.waker().clone());
return Poll::Pending;
}
let len = cmp::min(buff.len(), stdin.data.len());
buff[..len].copy_from_slice(&stdin.data[..len]);
stdin.data.drain(..len);
Poll::Ready(Ok(len))
})
.await
}
|
PassphraseAutocomplete.tsx
|
import React, { useEffect, useState } from 'react'
import { ScrollView } from 'react-native'
import { useTranslation } from 'react-i18next'
import MatchingWord from './MatchingWord'
import wordlist from '../../../constants/wordlists/english.json'
import TextInput from '../../../components/TextInput'
import Text from '../../../components/Text'
import TextTransform from '../../../components/TextTransform'
import Box from '../../../components/Box'
type Props = {
onSelectWord: (fullWord: string, idx: number) => void
|
wordIdx: number
}
export const TOTAL_WORDS = 12
const PassphraseAutocomplete = ({ onSelectWord, wordIdx }: Props) => {
const [word, setWord] = useState('')
const [matchingWords, setMatchingWords] = useState<Array<string>>([])
const { t } = useTranslation()
const ordinal = wordIdx <= TOTAL_WORDS ? t(`ordinals.${wordIdx}`) : ''
useEffect(() => {
setMatchingWords(
wordlist.filter((w) => w.indexOf(word.toLowerCase()) === 0),
)
}, [word])
const handleWordSelect = (selectedWord: string) => {
setWord('')
onSelectWord(selectedWord, wordIdx)
}
return (
<Box marginTop="m">
<Text
variant="h1"
numberOfLines={2}
adjustsFontSizeToFit
marginBottom="m"
>
{t('account_import.word_entry.title')}
</Text>
<Text variant="body1Light" color="grayLight">
{t('account_import.word_entry.subtitle')}
</Text>
<TextTransform
marginTop="lx"
variant="body2Light"
color="greenMain"
values={{ ordinal }}
i18nKey="account_import.word_entry.directions"
/>
<TextInput
padding="m"
variant="regular"
placeholder={t('account_import.word_entry.placeholder', {
ordinal,
})}
onChangeText={setWord}
value={word}
keyboardAppearance="dark"
autoCorrect={false}
autoCompleteType="off"
blurOnSubmit={false}
returnKeyType="next"
marginVertical="ms"
autoFocus
/>
<ScrollView
horizontal
keyboardShouldPersistTaps="always"
keyboardDismissMode="none"
showsHorizontalScrollIndicator={false}
>
{matchingWords.length <= 20 &&
matchingWords.map((matchingWord, idx) => (
<MatchingWord
// eslint-disable-next-line react/no-array-index-key
key={`${matchingWord}.${idx}`}
fullWord={matchingWord}
matchingText={word.toLowerCase()}
onPress={handleWordSelect}
/>
))}
</ScrollView>
</Box>
)
}
export default PassphraseAutocomplete
| |
websocket.rs
|
use dotenv::dotenv;
use futures::StreamExt;
use openlimits::{
exchange::Exchange, exchange_ws::OpenLimitsWs, model::websocket::Subscription, nash::Nash,
nash::NashStream,
};
use std::env;
#[tokio::test]
async fn orderbook() {
let mut client = init().await;
let sub = Subscription::OrderBook("btc_usdc".to_string(), 5);
client.subscribe(sub).await.unwrap();
let item = client.next().await;
println!("{:?}", item.unwrap().unwrap());
}
#[tokio::test]
async fn trades() {
let mut client = init().await;
|
client.subscribe(sub).await.unwrap();
let item = client.next().await;
println!("{:?}", item.unwrap().unwrap());
}
async fn init() -> OpenLimitsWs<NashStream> {
dotenv().ok();
let websocket = NashStream::with_credential(
&env::var("NASH_API_SECRET").unwrap(),
&env::var("NASH_API_KEY").unwrap(),
1234,
true,
10000,
)
.await;
OpenLimitsWs { websocket }
}
|
let sub = Subscription::Trade("btc_usdc".to_string());
|
defaults.js
|
import { effects, addEffect } from './effects'
export const options = {
// global initial state
// initialState: undefined,
// Should be one of ['browser', 'hash', 'memory']
// Learn more: https://github.com/ReactTraining/history/blob/master/README.md
historyMode: 'browser',
// A list of the standard Redux middleware
middlewares: [],
// An overwrite of the existing effect handler
addEffect: addEffect(effects),
}
const historyModes = ['browser', 'hash', 'memory']
export default function defaults(opts = {}) {
const {
historyMode,
middlewares,
addEffect,
} = opts
if (historyMode && !~historyModes.indexOf(historyMode)) {
throw new Error(`historyMode "${historyMode}" is invalid, must be one of ${historyModes.join(', ')}!`)
}
if (middlewares && !Array.isArray(middlewares)) {
throw new Error(`middlewares "${middlewares}" is invalid, must be an Array!`)
}
if (addEffect) {
if (typeof addEffect !== 'function' || typeof addEffect({}) !== 'function') {
throw new Error(`addEffect "${addEffect}" is invalid, must be a function that returns a function`)
} else {
// create effects handler with initial effects object
opts.addEffect = opts.addEffect(effects)
|
options[key] = opts[key]
})
}
|
}
}
Object.keys(opts).forEach(key => {
|
addonsconfiguration_controller.go
|
package controller
import (
"context"
"time"
"github.com/Masterminds/semver"
"github.com/kyma-project/kyma/components/helm-broker/internal"
add "github.com/kyma-project/kyma/components/helm-broker/internal/addon"
"github.com/kyma-project/kyma/components/helm-broker/internal/controller/addons"
"github.com/kyma-project/kyma/components/helm-broker/internal/storage"
addonsv1alpha1 "github.com/kyma-project/kyma/components/helm-broker/pkg/apis/addons/v1alpha1"
exerr "github.com/pkg/errors"
"github.com/sirupsen/logrus"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/helm/pkg/proto/hapi/chart"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/handler"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"sigs.k8s.io/controller-runtime/pkg/source"
)
// AddonsConfigurationController holds a controller logic
type AddonsConfigurationController struct {
reconciler reconcile.Reconciler
}
// NewAddonsConfigurationController creates a controller with a given reconciler
func NewAddonsConfigurationController(reconciler reconcile.Reconciler) *AddonsConfigurationController {
return &AddonsConfigurationController{reconciler: reconciler}
}
// Start starts a controller
func (acc *AddonsConfigurationController) Start(mgr manager.Manager) error {
// Create a new controller
c, err := controller.New("addonsconfiguration-controller", mgr, controller.Options{Reconciler: acc.reconciler})
if err != nil
|
// Watch for changes to AddonsConfiguration
err = c.Watch(&source.Kind{Type: &addonsv1alpha1.AddonsConfiguration{}}, &handler.EnqueueRequestForObject{})
if err != nil {
return err
}
return nil
}
var _ reconcile.Reconciler = &ReconcileAddonsConfiguration{}
// ReconcileAddonsConfiguration reconciles a AddonsConfiguration object
type ReconcileAddonsConfiguration struct {
log logrus.FieldLogger
client.Client
scheme *runtime.Scheme
chartStorage chartStorage
bundleStorage addonStorage
docsProvider docsProvider
brokerFacade brokerFacade
brokerSyncer brokerSyncer
bundleProvider addonProvider
protection protection
// syncBroker informs ServiceBroker should be resync, it should be true if
// operation insert/delete was made on storage
syncBroker bool
developMode bool
}
// NewReconcileAddonsConfiguration returns a new reconcile.Reconciler
func NewReconcileAddonsConfiguration(mgr manager.Manager, bp addonProvider, chartStorage chartStorage, bundleStorage addonStorage, brokerFacade brokerFacade, docsProvider docsProvider, brokerSyncer brokerSyncer, developMode bool) reconcile.Reconciler {
return &ReconcileAddonsConfiguration{
log: logrus.WithField("controller", "addons-configuration"),
Client: mgr.GetClient(),
scheme: mgr.GetScheme(),
chartStorage: chartStorage,
bundleStorage: bundleStorage,
bundleProvider: bp,
protection: protection{},
brokerSyncer: brokerSyncer,
brokerFacade: brokerFacade,
docsProvider: docsProvider,
developMode: developMode,
syncBroker: false,
}
}
// Reconcile reads that state of the cluster for a AddonsConfiguration object and makes changes based on the state read
// and what is in the AddonsConfiguration.Spec
func (r *ReconcileAddonsConfiguration) Reconcile(request reconcile.Request) (reconcile.Result, error) {
addon := &addonsv1alpha1.AddonsConfiguration{}
err := r.Get(context.TODO(), request.NamespacedName, addon)
if err != nil {
return reconcile.Result{}, err
}
r.syncBroker = false
if addon.DeletionTimestamp != nil {
if err := r.deleteAddonsProcess(addon); err != nil {
r.log.Errorf("while deleting AddonsConfiguration process: %v", err)
return reconcile.Result{RequeueAfter: time.Second * 15}, exerr.Wrapf(err, "while deleting AddonConfiguration %q", request.NamespacedName)
}
return reconcile.Result{}, nil
}
if addon.Status.ObservedGeneration == 0 {
r.log.Infof("Start add AddonsConfiguration %s/%s process", addon.Name, addon.Namespace)
preAddon, err := r.prepareForProcessing(addon)
if err != nil {
r.log.Errorf("while preparing for processing: %v", err)
return reconcile.Result{Requeue: true}, exerr.Wrapf(err, "while adding a finalizer to AddonsConfiguration %q", request.NamespacedName)
}
err = r.addAddonsProcess(preAddon, preAddon.Status)
if err != nil {
r.log.Errorf("while adding AddonsConfiguration process: %v", err)
return reconcile.Result{}, exerr.Wrapf(err, "while creating ClusterAddonsConfiguration %q", request.NamespacedName)
}
r.log.Info("Add AddonsConfiguration process completed")
} else if addon.Generation > addon.Status.ObservedGeneration {
r.log.Infof("Start update AddonsConfiguration %s/%s process", addon.Name, addon.Namespace)
lastAddon := addon.DeepCopy()
addon.Status = addonsv1alpha1.AddonsConfigurationStatus{}
err = r.addAddonsProcess(addon, lastAddon.Status)
if err != nil {
r.log.Errorf("while updating AddonsConfiguration process: %v", err)
return reconcile.Result{}, exerr.Wrapf(err, "while updating AddonsConfiguration %q", request.NamespacedName)
}
r.log.Info("Update AddonsConfiguration process completed")
}
return reconcile.Result{}, nil
}
func (r *ReconcileAddonsConfiguration) addAddonsProcess(addon *addonsv1alpha1.AddonsConfiguration, lastStatus addonsv1alpha1.AddonsConfigurationStatus) error {
r.log.Infof("- load bundles and charts for each addon")
repositories := r.loadAddons(addon)
r.log.Info("- check duplicate ID addons alongside repositories")
repositories.ReviseBundleDuplicationInRepository()
r.log.Info("- check duplicates ID addons in existing AddonsConfiguration")
list, err := r.existingAddonsConfigurations(addon)
if err != nil {
return exerr.Wrap(err, "while fetching AddonsConfiguration list")
}
repositories.ReviseBundleDuplicationInStorage(list)
if repositories.IsRepositoriesFailed() {
addon.Status.Phase = addonsv1alpha1.AddonsConfigurationFailed
} else {
addon.Status.Phase = addonsv1alpha1.AddonsConfigurationReady
}
r.log.Infof("- status: %s", addon.Status.Phase)
var deletedBundles []string
switch addon.Status.Phase {
case addonsv1alpha1.AddonsConfigurationFailed:
if _, err = r.updateAddonStatus(r.statusSnapshot(addon, repositories)); err != nil {
return exerr.Wrap(err, "while updating AddonsConfiguration status")
}
if lastStatus.Phase == addonsv1alpha1.AddonsConfigurationReady {
deletedBundles, err = r.deleteBundlesFromRepository(addon.Namespace, lastStatus.Repositories)
if err != nil {
return exerr.Wrap(err, "while deleting bundles from repository")
}
}
case addonsv1alpha1.AddonsConfigurationReady:
r.log.Info("- save ready bundles and charts in storage")
if err := r.saveBundle(internal.Namespace(addon.Namespace), repositories); err != nil {
return exerr.Wrap(err, "while saving ready bundles and charts in storage")
}
if _, err = r.updateAddonStatus(r.statusSnapshot(addon, repositories)); err != nil {
return exerr.Wrap(err, "while updating AddonsConfiguration status")
}
if lastStatus.Phase == addonsv1alpha1.AddonsConfigurationReady {
deletedBundles, err = r.deleteOrphanBundles(addon.Namespace, addon.Status.Repositories, lastStatus.Repositories)
if err != nil {
return exerr.Wrap(err, "while deleting orphan bundles from storage")
}
}
}
if r.syncBroker {
r.log.Info("- ensure ServiceBroker")
if err = r.ensureBroker(addon); err != nil {
return exerr.Wrap(err, "while ensuring ServiceBroker")
}
}
if len(deletedBundles) > 0 {
r.log.Info("- reprocessing conflicting addons configurations")
for _, key := range deletedBundles {
// reprocess ClusterAddonsConfiguration again if it contains a conflicting addons
if err := r.reprocessConflictingAddonsConfiguration(key, list); err != nil {
return exerr.Wrap(err, "while requesting processing of conflicting AddonsConfigurations")
}
}
}
return nil
}
func (r *ReconcileAddonsConfiguration) deleteAddonsProcess(addon *addonsv1alpha1.AddonsConfiguration) error {
r.log.Infof("Start delete AddonsConfiguration %s/%s process", addon.Name, addon.Namespace)
if addon.Status.Phase == addonsv1alpha1.AddonsConfigurationReady {
adds, err := r.existingAddonsConfigurations(addon)
if err != nil {
return exerr.Wrapf(err, "while listing AddonsConfigurations in namespace %s", addon.Namespace)
}
deleteBroker := true
for _, addon := range adds.Items {
if addon.Status.Phase != addonsv1alpha1.AddonsConfigurationReady {
// reprocess AddonConfig again if it was failed
if err := r.reprocessAddonsConfiguration(&addon); err != nil {
return exerr.Wrapf(err, "while requesting reprocess for AddonsConfiguration %s", addon.Name)
}
} else {
deleteBroker = false
}
}
if deleteBroker {
r.log.Info("- delete ServiceBroker from namespace %s", addon.Namespace)
if err := r.brokerFacade.Delete(addon.Namespace); err != nil {
return exerr.Wrapf(err, "while deleting ServiceBroker from namespace %s", addon.Namespace)
}
}
for _, repo := range addon.Status.Repositories {
for _, add := range repo.Addons {
id, err := r.removeBundle(add, internal.Namespace(addon.Namespace))
if err != nil && !storage.IsNotFoundError(err) {
return exerr.Wrapf(err, "while deleting bundle with charts for addon %s", add.Name)
}
if id != nil {
r.log.Infof("- delete DocsTopic for bundle %s", add)
if err := r.docsProvider.EnsureDocsTopicRemoved(string(*id), addon.Namespace); err != nil {
return exerr.Wrapf(err, "while ensuring DocsTopic for bundle %s is removed", *id)
}
}
}
}
if !deleteBroker && r.syncBroker {
if err := r.brokerSyncer.SyncServiceBroker(addon.Namespace); err != nil {
return exerr.Wrapf(err, "while syncing ClusterServiceBroker for addon %s", addon.Name)
}
}
}
if err := r.deleteFinalizer(addon); err != nil {
return exerr.Wrapf(err, "while deleting finalizer for AddonConfiguration %s/%s", addon.Name, addon.Namespace)
}
r.log.Info("Delete AddonsConfiguration process completed")
return nil
}
func (r *ReconcileAddonsConfiguration) loadAddons(addon *addonsv1alpha1.AddonsConfiguration) *addons.RepositoryCollection {
repositories := addons.NewRepositoryCollection()
for _, specRepository := range addon.Spec.Repositories {
r.log.Infof("- create addons for %q repository", specRepository.URL)
repo := addons.NewAddonsRepository(specRepository.URL)
if err := specRepository.VerifyURL(r.developMode); err != nil {
repo.FetchingError(err)
repositories.AddRepository(repo)
r.log.Errorf("url %q address is not valid: %s", specRepository.URL, err)
continue
}
adds, err := r.createAddons(specRepository.URL)
if err != nil {
repo.FetchingError(err)
repositories.AddRepository(repo)
r.log.Errorf("while creating addons for repository from %q: %s", specRepository.URL, err)
continue
}
repo.Addons = adds
repositories.AddRepository(repo)
}
return repositories
}
func (r *ReconcileAddonsConfiguration) ensureBroker(addon *addonsv1alpha1.AddonsConfiguration) error {
exist, err := r.brokerFacade.Exist(addon.Namespace)
if err != nil {
return exerr.Wrapf(err, "while checking if ServiceBroker exist in namespace %s", addon.Namespace)
}
if !exist {
r.log.Infof("- creating ServiceBroker in namespace %s", addon.Namespace)
if err := r.brokerFacade.Create(addon.Namespace); err != nil {
return exerr.Wrapf(err, "while creating ServiceBroker for AddonConfiguration %s/%s", addon.Name, addon.Namespace)
}
} else {
if err := r.brokerSyncer.SyncServiceBroker(addon.Namespace); err != nil {
return exerr.Wrapf(err, "while syncing ServiceBroker for AddonConfiguration %s/%s", addon.Name, addon.Namespace)
}
}
return nil
}
func (r *ReconcileAddonsConfiguration) createAddons(URL string) ([]*addons.AddonController, error) {
adds := []*addons.AddonController{}
// fetch repository index
index, err := r.bundleProvider.GetIndex(URL)
if err != nil {
return adds, exerr.Wrap(err, "while reading repository index")
}
// for each repository entry create addon
for _, entries := range index.Entries {
for _, entry := range entries {
addon := addons.NewAddon(string(entry.Name), string(entry.Version), URL)
completeBundle, err := r.bundleProvider.LoadCompleteAddon(entry)
if add.IsFetchingError(err) {
addon.FetchingError(err)
adds = append(adds, addon)
r.log.Errorf("while fetching addon: %s", err)
continue
}
if add.IsLoadingError(err) {
addon.LoadingError(err)
adds = append(adds, addon)
r.log.Errorf("while loading addon: %s", err)
continue
}
addon.ID = string(completeBundle.Addon.ID)
addon.Bundle = completeBundle.Addon
addon.Charts = completeBundle.Charts
adds = append(adds, addon)
}
}
return adds, nil
}
func (r *ReconcileAddonsConfiguration) existingAddonsConfigurations(addon *addonsv1alpha1.AddonsConfiguration) (*addonsv1alpha1.AddonsConfigurationList, error) {
addonsList := &addonsv1alpha1.AddonsConfigurationList{}
addonsConfigurationList, err := r.addonsConfigurationList(addon.Namespace)
if err != nil {
return nil, exerr.Wrapf(err, "while listing AddonsConfigurations from namespace %s", addon.Namespace)
}
for _, existAddon := range addonsConfigurationList.Items {
if existAddon.Name != addon.Name {
addonsList.Items = append(addonsList.Items, existAddon)
}
}
return addonsList, nil
}
func (r *ReconcileAddonsConfiguration) addonsConfigurationList(namespace string) (*addonsv1alpha1.AddonsConfigurationList, error) {
addonsConfigurationList := &addonsv1alpha1.AddonsConfigurationList{}
err := r.Client.List(context.TODO(), &client.ListOptions{Namespace: namespace}, addonsConfigurationList)
if err != nil {
return addonsConfigurationList, exerr.Wrap(err, "during fetching AddonConfiguration list by client")
}
return addonsConfigurationList, nil
}
func (r *ReconcileAddonsConfiguration) deleteOrphanBundles(namespace string, repos []addonsv1alpha1.StatusRepository, lastRepos []addonsv1alpha1.StatusRepository) ([]string, error) {
addonsToStay := map[string]addonsv1alpha1.Addon{}
for _, repo := range repos {
for _, ad := range repo.Addons {
addonsToStay[ad.Key()] = ad
}
}
var deletedBundlesIDs []string
for _, repo := range lastRepos {
for _, ad := range repo.Addons {
if _, exist := addonsToStay[ad.Key()]; !exist {
if _, err := r.removeBundle(ad, internal.Namespace(namespace)); err != nil && !storage.IsNotFoundError(err) {
return nil, exerr.Wrapf(err, "while deleting bundles and charts for addon %s", ad.Name)
}
deletedBundlesIDs = append(deletedBundlesIDs, ad.Key())
}
}
}
return deletedBundlesIDs, nil
}
func (r *ReconcileAddonsConfiguration) deleteBundlesFromRepository(namespace string, repos []addonsv1alpha1.StatusRepository) ([]string, error) {
var deletedBundlesKeys []string
for _, repo := range repos {
for _, ad := range repo.Addons {
if _, err := r.removeBundle(ad, internal.Namespace(namespace)); err != nil && !storage.IsNotFoundError(err) {
return nil, exerr.Wrapf(err, "while deleting bundles and charts for addon %s", ad.Name)
}
deletedBundlesKeys = append(deletedBundlesKeys, ad.Key())
}
}
return deletedBundlesKeys, nil
}
func (r *ReconcileAddonsConfiguration) removeBundle(ad addonsv1alpha1.Addon, namespace internal.Namespace) (*internal.AddonID, error) {
r.log.Infof("- delete bundle %s from storage", ad.Name)
b, err := r.bundleStorage.Get(namespace, internal.AddonName(ad.Name), *semver.MustParse(ad.Version))
if err != nil {
return nil, err
}
err = r.bundleStorage.Remove(namespace, internal.AddonName(ad.Name), *semver.MustParse(ad.Version))
if err != nil {
return nil, err
}
r.syncBroker = true
for _, plan := range b.Plans {
err = r.chartStorage.Remove(namespace, plan.ChartRef.Name, plan.ChartRef.Version)
if err != nil {
return nil, err
}
}
return &b.ID, nil
}
func (r *ReconcileAddonsConfiguration) reprocessConflictingAddonsConfiguration(key string, list *addonsv1alpha1.AddonsConfigurationList) error {
for _, addonsCfg := range list.Items {
if addonsCfg.Status.Phase != addonsv1alpha1.AddonsConfigurationReady {
for _, repo := range addonsCfg.Status.Repositories {
if repo.Status != addonsv1alpha1.RepositoryStatusReady {
for _, add := range repo.Addons {
if add.Key() == key {
return r.reprocessAddonsConfiguration(&addonsCfg)
}
}
}
}
}
}
return nil
}
func (r *ReconcileAddonsConfiguration) reprocessAddonsConfiguration(addon *addonsv1alpha1.AddonsConfiguration) error {
ad := &addonsv1alpha1.AddonsConfiguration{}
if err := r.Client.Get(context.Background(), types.NamespacedName{Name: addon.Name, Namespace: addon.Namespace}, ad); err != nil {
return exerr.Wrapf(err, "while getting ClusterAddonsConfiguration %s", addon.Name)
}
ad.Spec.ReprocessRequest++
if err := r.Client.Update(context.Background(), ad); err != nil {
return exerr.Wrapf(err, "while incrementing a reprocess requests for ClusterAddonsConfiguration %s", addon.Name)
}
return nil
}
func (r *ReconcileAddonsConfiguration) saveBundle(namespace internal.Namespace, repositories *addons.RepositoryCollection) error {
for _, addon := range repositories.ReadyAddons() {
if len(addon.Bundle.Docs) == 1 {
r.log.Infof("- ensure DocsTopic for bundle %s in namespace %s", addon.Bundle.ID, namespace)
if err := r.docsProvider.EnsureDocsTopic(addon.Bundle, string(namespace)); err != nil {
return exerr.Wrapf(err, "While ensuring DocsTopic for bundle %s/%s: %v", addon.Bundle.ID, namespace, err)
}
}
exist, err := r.bundleStorage.Upsert(namespace, addon.Bundle)
if err != nil {
addon.RegisteringError(err)
r.log.Errorf("cannot upsert bundle %v:%v into storage", addon.Bundle.Name, addon.Bundle.Version)
continue
}
if exist {
r.log.Infof("bundle %v:%v already existed in storage, bundle was replaced", addon.Bundle.Name, addon.Bundle.Version)
}
err = r.saveCharts(namespace, addon.Charts)
if err != nil {
addon.RegisteringError(err)
r.log.Errorf("cannot upsert charts of %v:%v bunlde", addon.Bundle.Name, addon.Bundle.Version)
continue
}
r.syncBroker = true
}
return nil
}
func (r *ReconcileAddonsConfiguration) saveCharts(namespace internal.Namespace, charts []*chart.Chart) error {
for _, bundleChart := range charts {
exist, err := r.chartStorage.Upsert(namespace, bundleChart)
if err != nil {
return err
}
if exist {
r.log.Infof("chart %s already existed in storage, chart was replaced", bundleChart.Metadata.Name)
}
}
return nil
}
func (r *ReconcileAddonsConfiguration) statusSnapshot(addon *addonsv1alpha1.AddonsConfiguration, repositories *addons.RepositoryCollection) *addonsv1alpha1.AddonsConfiguration {
addon.Status.Repositories = nil
for _, repo := range repositories.Repositories {
addonsRepository := repo.Repository
addonsRepository.Addons = []addonsv1alpha1.Addon{}
for _, addon := range repo.Addons {
addonsRepository.Addons = append(addonsRepository.Addons, addon.Addon)
}
addon.Status.Repositories = append(addon.Status.Repositories, addonsRepository)
}
if repositories.IsRepositoriesFailed() {
addon.Status.Phase = addonsv1alpha1.AddonsConfigurationFailed
} else {
addon.Status.Phase = addonsv1alpha1.AddonsConfigurationReady
}
return addon
}
func (r *ReconcileAddonsConfiguration) updateAddonStatus(addon *addonsv1alpha1.AddonsConfiguration) (*addonsv1alpha1.AddonsConfiguration, error) {
addon.Status.ObservedGeneration = addon.Generation
addon.Status.LastProcessedTime = &v1.Time{Time: time.Now()}
r.log.Infof("- update AddonsConfiguration %s/%s status", addon.Name, addon.Namespace)
err := r.Status().Update(context.TODO(), addon)
if err != nil {
return nil, exerr.Wrap(err, "while update AddonsConfiguration status")
}
return addon, nil
}
func (r *ReconcileAddonsConfiguration) prepareForProcessing(addon *addonsv1alpha1.AddonsConfiguration) (*addonsv1alpha1.AddonsConfiguration, error) {
obj := addon.DeepCopy()
obj.Status.Phase = addonsv1alpha1.AddonsConfigurationPending
pendingInstance, err := r.updateAddonStatus(obj)
if err != nil {
return nil, err
}
if r.protection.hasFinalizer(pendingInstance.Finalizers) {
return pendingInstance, nil
}
r.log.Info("- add a finalizer")
pendingInstance.Finalizers = r.protection.addFinalizer(pendingInstance.Finalizers)
err = r.Client.Update(context.Background(), pendingInstance)
if err != nil {
return nil, err
}
return pendingInstance, nil
}
func (r *ReconcileAddonsConfiguration) deleteFinalizer(addon *addonsv1alpha1.AddonsConfiguration) error {
obj := addon.DeepCopy()
if !r.protection.hasFinalizer(obj.Finalizers) {
return nil
}
r.log.Info("- delete a finalizer")
obj.Finalizers = r.protection.removeFinalizer(obj.Finalizers)
return r.Client.Update(context.Background(), obj)
}
|
{
return err
}
|
yaegi_test.go
|
package main
import (
"bytes"
"context"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"strconv"
"strings"
"testing"
"time"
)
const (
// CITimeoutMultiplier is the multiplier for all timeouts in the CI.
CITimeoutMultiplier = 3
)
// Sleep pauses the current goroutine for at least the duration d.
func
|
(d time.Duration) {
d = applyCIMultiplier(d)
time.Sleep(d)
}
func applyCIMultiplier(timeout time.Duration) time.Duration {
ci := os.Getenv("CI")
if ci == "" {
return timeout
}
b, err := strconv.ParseBool(ci)
if err != nil || !b {
return timeout
}
return time.Duration(float64(timeout) * CITimeoutMultiplier)
}
func TestYaegiCmdCancel(t *testing.T) {
tmp, err := ioutil.TempDir("", "yaegi-")
if err != nil {
t.Fatalf("failed to create tmp directory: %v", err)
}
defer func() {
err = os.RemoveAll(tmp)
if err != nil {
t.Errorf("failed to clean up %v: %v", tmp, err)
}
}()
yaegi := filepath.Join(tmp, "yaegi")
build := exec.Command("go", "build", "-race", "-o", yaegi, ".")
out, err := build.CombinedOutput()
if err != nil {
t.Fatalf("failed to build yaegi command: %v: %s", err, out)
}
// Test src must be terminated by a single newline.
tests := []string{
"for {}\n",
"select {}\n",
}
for _, src := range tests {
cmd := exec.Command(yaegi)
in, err := cmd.StdinPipe()
if err != nil {
t.Errorf("failed to get stdin pipe to yaegi command: %v", err)
}
var outBuf, errBuf bytes.Buffer
cmd.Stdout = &outBuf
cmd.Stderr = &errBuf
// https://golang.org/doc/articles/race_detector.html#Options
cmd.Env = []string{`GORACE="halt_on_error=1"`}
err = cmd.Start()
if err != nil {
t.Fatalf("failed to start yaegi command: %v", err)
}
_, err = in.Write([]byte(src))
if err != nil {
t.Errorf("failed pipe test source to yaegi command: %v", err)
}
Sleep(200 * time.Millisecond)
err = cmd.Process.Signal(os.Interrupt)
if err != nil {
t.Errorf("failed to send os.Interrupt to yaegi command: %v", err)
}
_, err = in.Write([]byte("1+1\n"))
if err != nil {
t.Errorf("failed to probe race: %v", err)
}
err = in.Close()
if err != nil {
t.Errorf("failed to close stdin pipe: %v", err)
}
err = cmd.Wait()
if err != nil {
if cmd.ProcessState.ExitCode() == 66 { // See race_detector.html article.
t.Errorf("race detected running yaegi command canceling %q: %v", src, err)
if testing.Verbose() {
t.Log(&errBuf)
}
} else {
t.Errorf("error running yaegi command for %q: %v", src, err)
}
continue
}
if strings.TrimSuffix(errBuf.String(), "\n") != context.Canceled.Error() {
t.Errorf("unexpected error: %q", &errBuf)
}
}
}
|
Sleep
|
lib.rs
|
// Copyright 2020 The Fuchsia Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#![cfg(test)]
use {
fidl_fuchsia_paver::Configuration,
fidl_fuchsia_sys::{LauncherProxy, TerminationReason},
fidl_fuchsia_update as fidl_update,
fidl_fuchsia_update_ext::{
InstallationErrorData, InstallationProgress, InstallingData, State, UpdateInfo,
},
fidl_fuchsia_update_installer as fidl_installer,
fidl_fuchsia_update_installer_ext::{self as installer},
fuchsia_async as fasync,
fuchsia_component::{
client::{AppBuilder, Output},
server::{NestedEnvironment, ServiceFs},
},
fuchsia_zircon::{self as zx, EventPair, HandleBased, Peered},
futures::prelude::*,
matches::assert_matches,
mock_installer::{
CapturedRebootControllerRequest, CapturedUpdateInstallerRequest, MockUpdateInstallerService,
},
mock_paver::{MockPaverService, MockPaverServiceBuilder, PaverEvent},
mock_reboot::{MockRebootService, RebootReason},
parking_lot::Mutex,
pretty_assertions::assert_eq,
std::sync::Arc,
};
async fn run_commit_status_provider_service(
mut stream: fidl_update::CommitStatusProviderRequestStream,
p: Arc<EventPair>,
) {
while let Some(req) = stream.try_next().await.unwrap() {
let fidl_update::CommitStatusProviderRequest::IsCurrentSystemCommitted { responder } = req;
let pair = p.duplicate_handle(zx::Rights::BASIC).unwrap();
let () = responder.send(pair).unwrap();
}
}
#[derive(Default)]
struct TestEnvBuilder {
manager_states: Vec<State>,
installer_states: Vec<installer::State>,
commit_status_provider_response: Option<EventPair>,
paver_service: Option<MockPaverService>,
reboot_service: Option<MockRebootService>,
}
impl TestEnvBuilder {
fn manager_states(self, manager_states: Vec<State>) -> Self {
Self { manager_states, ..self }
}
fn installer_states(self, installer_states: Vec<installer::State>) -> Self {
Self { installer_states, ..self }
}
fn commit_status_provider_response(self, response: EventPair) -> Self {
Self { commit_status_provider_response: Some(response), ..self }
}
fn paver_service(self, paver_service: MockPaverService) -> Self {
Self { paver_service: Some(paver_service), ..self }
}
fn reboot_service(self, reboot_service: MockRebootService) -> Self {
Self { reboot_service: Some(reboot_service), ..self }
}
fn build(self) -> TestEnv {
let mut fs = ServiceFs::new();
let update_manager = Arc::new(MockUpdateManagerService::new(self.manager_states));
let update_manager_clone = Arc::clone(&update_manager);
fs.add_fidl_service(move |stream| {
fasync::Task::spawn(Arc::clone(&update_manager_clone).run_service(stream)).detach()
});
let update_installer =
Arc::new(MockUpdateInstallerService::with_states(self.installer_states));
let update_installer_clone = Arc::clone(&update_installer);
fs.add_fidl_service(move |stream| {
fasync::Task::spawn(Arc::clone(&update_installer_clone).run_service(stream)).detach()
});
if let Some(response) = self.commit_status_provider_response {
let response = Arc::new(response);
fs.add_fidl_service(move |stream| {
fasync::Task::spawn(run_commit_status_provider_service(
stream,
Arc::clone(&response),
))
.detach()
});
}
if let Some(paver_service) = self.paver_service {
let paver_service = Arc::new(paver_service);
fs.add_fidl_service(move |stream| {
fasync::Task::spawn(
Arc::clone(&paver_service)
.run_paver_service(stream)
.unwrap_or_else(|e| panic!("error running paver service: {:?}", e)),
)
.detach()
});
}
if let Some(reboot_service) = self.reboot_service {
let reboot_service = Arc::new(reboot_service);
fs.add_fidl_service(move |stream| {
fasync::Task::spawn(
Arc::clone(&reboot_service)
.run_reboot_service(stream)
.unwrap_or_else(|e| panic!("error running reboot service: {:?}", e)),
)
.detach()
});
}
let env = fs
.create_salted_nested_environment("update_env")
.expect("nested environment to create successfully");
fasync::Task::spawn(fs.collect()).detach();
TestEnv { env, update_manager, update_installer }
}
}
struct TestEnv {
env: NestedEnvironment,
update_manager: Arc<MockUpdateManagerService>,
update_installer: Arc<MockUpdateInstallerService>,
}
impl TestEnv {
fn builder() -> TestEnvBuilder {
TestEnvBuilder::default()
}
fn launcher(&self) -> &LauncherProxy {
self.env.launcher()
}
fn new() -> Self {
Self::builder().build()
}
async fn run_update<'a>(&'a self, args: Vec<&'a str>) -> Output {
let launcher = self.launcher();
let update =
AppBuilder::new("fuchsia-pkg://fuchsia.com/update-integration-tests#meta/update.cmx")
.args(args);
let output = update
.output(launcher)
.expect("update to launch")
.await
.expect("no errors while waiting for exit");
assert_eq!(output.exit_status.reason(), TerminationReason::Exited);
output
}
fn assert_update_manager_called_with(&self, expected_args: Vec<CapturedUpdateManagerRequest>) {
assert_eq!(*self.update_manager.captured_args.lock(), expected_args);
}
fn assert_update_installer_called_with(
&self,
expected_args: Vec<CapturedUpdateInstallerRequest>,
) {
self.update_installer.assert_installer_called_with(expected_args);
}
fn assert_reboot_controller_called_with(
&self,
expected_requests: Vec<CapturedRebootControllerRequest>,
) {
self.update_installer.assert_reboot_controller_called_with(expected_requests);
}
}
#[derive(PartialEq, Debug)]
enum CapturedUpdateManagerRequest {
CheckNow { options: fidl_update::CheckOptions, monitor_present: bool },
}
// fidl_update::CheckOptions does not impl Eq, but it is semantically Eq.
impl Eq for CapturedUpdateManagerRequest {}
struct MockUpdateManagerService {
states: Vec<State>,
captured_args: Mutex<Vec<CapturedUpdateManagerRequest>>,
check_now_response: Mutex<Result<(), fidl_update::CheckNotStartedReason>>,
}
impl MockUpdateManagerService {
fn new(states: Vec<State>) -> Self {
Self { states, captured_args: Mutex::new(vec![]), check_now_response: Mutex::new(Ok(())) }
}
async fn run_service(self: Arc<Self>, mut stream: fidl_update::ManagerRequestStream) {
while let Some(req) = stream.try_next().await.unwrap() {
match req {
fidl_update::ManagerRequest::CheckNow { options, monitor, responder } => {
self.captured_args.lock().push(CapturedUpdateManagerRequest::CheckNow {
options,
monitor_present: monitor.is_some(),
});
if let Some(monitor) = monitor {
let proxy = fidl_update::MonitorProxy::new(
fasync::Channel::from_channel(monitor.into_channel()).unwrap(),
);
fasync::Task::spawn(Self::send_states(proxy, self.states.clone())).detach();
}
responder.send(&mut *self.check_now_response.lock()).unwrap();
}
fidl_update::ManagerRequest::PerformPendingReboot { responder: _ } => {
panic!("update tool should not be calling perform pending reboot!");
}
}
}
}
async fn send_states(monitor: fidl_update::MonitorProxy, states: Vec<State>) {
for state in states.into_iter() {
monitor.on_state(&mut state.into()).await.unwrap();
}
}
}
fn assert_output(output: &Output, expected_stdout: &str, expected_stderr: &str, exit_code: i64) {
assert_eq!(output.exit_status.reason(), fidl_fuchsia_sys::TerminationReason::Exited);
let actual_stdout = std::str::from_utf8(&output.stdout).unwrap();
assert_eq!(actual_stdout, expected_stdout);
let actual_stderr = std::str::from_utf8(&output.stderr).unwrap();
assert_eq!(actual_stderr, expected_stderr);
assert_eq!(output.exit_status.code(), exit_code, "stdout: {}", actual_stdout);
}
#[fasync::run_singlethreaded(test)]
async fn force_install_fails_on_invalid_pkg_url() {
let env = TestEnv::new();
let output =
env.run_update(vec!["force-install", "not-fuchsia-pkg://fuchsia.com/update"]).await;
assert_matches!(output.exit_status.ok(), Err(_));
let stderr = std::str::from_utf8(&output.stderr).unwrap();
assert!(stderr.contains("Error: parsing update package url"), "stderr: {}", stderr);
env.assert_update_installer_called_with(vec![]);
env.assert_reboot_controller_called_with(vec![]);
}
#[fasync::run_singlethreaded(test)]
async fn force_install_reboot() {
let update_info = installer::UpdateInfo::builder().download_size(1000).build();
let env = TestEnv::builder()
.installer_states(vec![
installer::State::Prepare,
installer::State::Fetch(
installer::UpdateInfoAndProgress::new(update_info, installer::Progress::none())
.unwrap(),
),
installer::State::Stage(
installer::UpdateInfoAndProgress::new(
update_info,
installer::Progress::builder()
.fraction_completed(0.5)
.bytes_downloaded(500)
.build(),
)
.unwrap(),
),
installer::State::WaitToReboot(installer::UpdateInfoAndProgress::done(update_info)),
installer::State::Reboot(installer::UpdateInfoAndProgress::done(update_info)),
])
.build();
let output = env.run_update(vec!["force-install", "fuchsia-pkg://fuchsia.com/update"]).await;
assert_output(
&output,
"Installing an update.\n\
State: Prepare\n\
State: Fetch(UpdateInfoAndProgress { info: UpdateInfo { download_size: 1000 }, progress: Progress { fraction_completed: 0.0, bytes_downloaded: 0 } })\n\
State: Stage(UpdateInfoAndProgress { info: UpdateInfo { download_size: 1000 }, progress: Progress { fraction_completed: 0.5, bytes_downloaded: 500 } })\n\
State: WaitToReboot(UpdateInfoAndProgress { info: UpdateInfo { download_size: 1000 }, progress: Progress { fraction_completed: 1.0, bytes_downloaded: 1000 } })\n",
"",
0,
);
env.assert_update_installer_called_with(vec![CapturedUpdateInstallerRequest::StartUpdate {
url: "fuchsia-pkg://fuchsia.com/update".into(),
options: fidl_installer::Options {
initiator: Some(fidl_installer::Initiator::User),
should_write_recovery: Some(true),
allow_attach_to_existing_attempt: Some(true),
..fidl_installer::Options::EMPTY
},
reboot_controller_present: true,
}]);
env.assert_reboot_controller_called_with(vec![]);
}
#[fasync::run_singlethreaded(test)]
async fn force_install_no_reboot() {
let update_info = installer::UpdateInfo::builder().download_size(1000).build();
let env = TestEnv::builder()
.installer_states(vec![
installer::State::Prepare,
installer::State::Fetch(
installer::UpdateInfoAndProgress::new(update_info, installer::Progress::none())
.unwrap(),
),
installer::State::Stage(
installer::UpdateInfoAndProgress::new(
update_info,
installer::Progress::builder()
.fraction_completed(0.5)
.bytes_downloaded(500)
.build(),
)
.unwrap(),
),
installer::State::WaitToReboot(installer::UpdateInfoAndProgress::done(update_info)),
installer::State::DeferReboot(installer::UpdateInfoAndProgress::done(update_info)),
])
.build();
let output = env
.run_update(vec!["force-install", "fuchsia-pkg://fuchsia.com/update", "--reboot", "false"])
.await;
assert_output(
&output,
"Installing an update.\n\
State: Prepare\n\
State: Fetch(UpdateInfoAndProgress { info: UpdateInfo { download_size: 1000 }, progress: Progress { fraction_completed: 0.0, bytes_downloaded: 0 } })\n\
State: Stage(UpdateInfoAndProgress { info: UpdateInfo { download_size: 1000 }, progress: Progress { fraction_completed: 0.5, bytes_downloaded: 500 } })\n\
State: WaitToReboot(UpdateInfoAndProgress { info: UpdateInfo { download_size: 1000 }, progress: Progress { fraction_completed: 1.0, bytes_downloaded: 1000 } })\n\
State: DeferReboot(UpdateInfoAndProgress { info: UpdateInfo { download_size: 1000 }, progress: Progress { fraction_completed: 1.0, bytes_downloaded: 1000 } })\n",
"",
0,
);
env.assert_update_installer_called_with(vec![CapturedUpdateInstallerRequest::StartUpdate {
url: "fuchsia-pkg://fuchsia.com/update".into(),
options: fidl_installer::Options {
initiator: Some(fidl_installer::Initiator::User),
should_write_recovery: Some(true),
allow_attach_to_existing_attempt: Some(true),
..fidl_installer::Options::EMPTY
},
reboot_controller_present: true,
}]);
env.assert_reboot_controller_called_with(vec![CapturedRebootControllerRequest::Detach]);
}
#[fasync::run_singlethreaded(test)]
async fn force_install_failure_state() {
let env = TestEnv::builder()
.installer_states(vec![
installer::State::Prepare,
installer::State::FailPrepare(installer::PrepareFailureReason::Internal),
])
.build();
let output = env.run_update(vec!["force-install", "fuchsia-pkg://fuchsia.com/update"]).await;
assert_output(
&output,
"Installing an update.\n\
State: Prepare\n\
State: FailPrepare(Internal)\n",
"Error: Encountered failure state\n",
1,
);
env.assert_update_installer_called_with(vec![CapturedUpdateInstallerRequest::StartUpdate {
url: "fuchsia-pkg://fuchsia.com/update".into(),
options: fidl_installer::Options {
initiator: Some(fidl_installer::Initiator::User),
should_write_recovery: Some(true),
allow_attach_to_existing_attempt: Some(true),
..fidl_installer::Options::EMPTY
},
reboot_controller_present: true,
}]);
env.assert_reboot_controller_called_with(vec![]);
}
#[fasync::run_singlethreaded(test)]
async fn force_install_unexpected_end() {
let env = TestEnv::builder().installer_states(vec![installer::State::Prepare]).build();
let output = env.run_update(vec!["force-install", "fuchsia-pkg://fuchsia.com/update"]).await;
assert_output(
&output,
"Installing an update.\n\
State: Prepare\n",
"Error: Installation ended unexpectedly\n",
1,
);
env.assert_update_installer_called_with(vec![CapturedUpdateInstallerRequest::StartUpdate {
url: "fuchsia-pkg://fuchsia.com/update".into(),
options: fidl_installer::Options {
initiator: Some(fidl_installer::Initiator::User),
should_write_recovery: Some(true),
allow_attach_to_existing_attempt: Some(true),
..fidl_installer::Options::EMPTY
},
reboot_controller_present: true,
}]);
env.assert_reboot_controller_called_with(vec![]);
}
#[fasync::run_singlethreaded(test)]
async fn force_install_service_initiated_flag() {
let env = TestEnv::new();
let _output = env
.run_update(vec![
"force-install",
"fuchsia-pkg://fuchsia.com/update",
"--service-initiated",
])
.await;
env.assert_update_installer_called_with(vec![CapturedUpdateInstallerRequest::StartUpdate {
url: "fuchsia-pkg://fuchsia.com/update".into(),
options: fidl_installer::Options {
initiator: Some(fidl_installer::Initiator::Service),
should_write_recovery: Some(true),
allow_attach_to_existing_attempt: Some(true),
..fidl_installer::Options::EMPTY
},
reboot_controller_present: true,
}]);
}
#[fasync::run_singlethreaded(test)]
async fn check_now_service_initiated_flag() {
let env = TestEnv::new();
let output = env.run_update(vec!["check-now", "--service-initiated"]).await;
assert_output(&output, "Checking for an update.\n", "", 0);
env.assert_update_manager_called_with(vec![CapturedUpdateManagerRequest::CheckNow {
options: fidl_update::CheckOptions {
initiator: Some(fidl_update::Initiator::Service),
allow_attaching_to_existing_update_check: Some(true),
..fidl_update::CheckOptions::EMPTY
},
monitor_present: false,
}]);
}
#[fasync::run_singlethreaded(test)]
async fn check_now_error_if_throttled() {
let env = TestEnv::new();
*env.update_manager.check_now_response.lock() =
Err(fidl_update::CheckNotStartedReason::Throttled);
let output = env.run_update(vec!["check-now"]).await;
assert_output(&output, "", "Error: Update check failed to start: Throttled\n", 1);
env.assert_update_manager_called_with(vec![CapturedUpdateManagerRequest::CheckNow {
options: fidl_update::CheckOptions {
initiator: Some(fidl_update::Initiator::User),
allow_attaching_to_existing_update_check: Some(true),
..fidl_update::CheckOptions::EMPTY
},
monitor_present: false,
}]);
}
#[fasync::run_singlethreaded(test)]
async fn check_now_monitor_flag() {
let env = TestEnv::builder()
.manager_states(vec![
State::CheckingForUpdates,
State::InstallingUpdate(InstallingData {
update: Some(UpdateInfo {
version_available: Some("fake-versions".into()),
download_size: Some(4),
}),
installation_progress: Some(InstallationProgress {
fraction_completed: Some(0.5f32),
}),
}),
])
.build();
let output = env.run_update(vec!["check-now", "--monitor"]).await;
assert_output(
&output,
"Checking for an update.\n\
State: CheckingForUpdates\n\
State: InstallingUpdate(InstallingData { update: Some(UpdateInfo { version_available: Some(\"fake-versions\"), download_size: Some(4) }), installation_progress: Some(InstallationProgress { fraction_completed: Some(0.5) }) })\n",
"",
0,
);
env.assert_update_manager_called_with(vec![CapturedUpdateManagerRequest::CheckNow {
options: fidl_update::CheckOptions {
initiator: Some(fidl_update::Initiator::User),
allow_attaching_to_existing_update_check: Some(true),
..fidl_update::CheckOptions::EMPTY
},
monitor_present: true,
}]);
}
#[fasync::run_singlethreaded(test)]
async fn check_now_monitor_error_checking() {
let env = TestEnv::builder()
.manager_states(vec![State::CheckingForUpdates, State::ErrorCheckingForUpdate])
.build();
let output = env.run_update(vec!["check-now", "--monitor"]).await;
assert_output(
&output,
"Checking for an update.\n\
State: CheckingForUpdates\n",
"Error: Update failed: ErrorCheckingForUpdate\n",
1,
);
env.assert_update_manager_called_with(vec![CapturedUpdateManagerRequest::CheckNow {
options: fidl_update::CheckOptions {
initiator: Some(fidl_update::Initiator::User),
allow_attaching_to_existing_update_check: Some(true),
..fidl_update::CheckOptions::EMPTY
},
monitor_present: true,
}]);
}
#[fasync::run_singlethreaded(test)]
async fn check_now_monitor_error_installing() {
let env = TestEnv::builder()
.manager_states(vec![
State::CheckingForUpdates,
State::InstallingUpdate(InstallingData {
update: Some(UpdateInfo {
version_available: Some("fake-versions".into()),
download_size: Some(4),
}),
installation_progress: Some(InstallationProgress {
fraction_completed: Some(0.5f32),
}),
}),
State::InstallationError(InstallationErrorData {
update: Some(UpdateInfo {
version_available: Some("fake-versions".into()),
download_size: Some(4),
}),
installation_progress: Some(InstallationProgress {
fraction_completed: Some(0.5f32),
}),
}),
])
.build();
let output = env.run_update(vec!["check-now", "--monitor"]).await;
assert_output(
&output,
"Checking for an update.\n\
State: CheckingForUpdates\n\
State: InstallingUpdate(InstallingData { update: Some(UpdateInfo { version_available: Some(\"fake-versions\"), download_size: Some(4) }), installation_progress: Some(InstallationProgress { fraction_completed: Some(0.5) }) })\n",
"Error: Update failed: InstallationError(InstallationErrorData { update: Some(UpdateInfo { version_available: Some(\"fake-versions\"), download_size: Some(4) }), installation_progress: Some(InstallationProgress { fraction_completed: Some(0.5) }) })\n",
1,
);
env.assert_update_manager_called_with(vec![CapturedUpdateManagerRequest::CheckNow {
options: fidl_update::CheckOptions {
initiator: Some(fidl_update::Initiator::User),
allow_attaching_to_existing_update_check: Some(true),
..fidl_update::CheckOptions::EMPTY
},
monitor_present: true,
}]);
}
#[fasync::run_singlethreaded(test)]
async fn
|
() {
let (p0, p1) = EventPair::create().unwrap();
let env = TestEnv::builder().commit_status_provider_response(p1).build();
let () = p0.signal_peer(zx::Signals::NONE, zx::Signals::USER_0).unwrap();
let output = env.run_update(vec!["wait-for-commit"]).await;
assert_output(
&output,
"Waiting for commit.\n\
Committed!\n",
"",
0,
);
}
#[fasync::run_singlethreaded(test)]
async fn revert_success() {
#[derive(Debug, PartialEq)]
enum Interaction {
Paver(PaverEvent),
Reboot(RebootReason),
}
let interactions = Arc::new(Mutex::new(vec![]));
let env = TestEnv::builder()
.paver_service({
let interactions = Arc::clone(&interactions);
MockPaverServiceBuilder::new()
.event_hook(move |event| {
interactions.lock().push(Interaction::Paver(event.clone()));
})
.build()
})
.reboot_service({
let interactions = Arc::clone(&interactions);
MockRebootService::new(Box::new(move |reason| {
interactions.lock().push(Interaction::Reboot(reason));
Ok(())
}))
})
.build();
let output = env.run_update(vec!["revert"]).await;
assert_output(&output, "Reverting the update.\n", "", 0);
assert_eq!(
interactions.lock().as_slice(),
&[
Interaction::Paver(PaverEvent::QueryCurrentConfiguration),
Interaction::Paver(PaverEvent::SetConfigurationUnbootable {
configuration: Configuration::A
}),
Interaction::Paver(PaverEvent::BootManagerFlush),
Interaction::Reboot(RebootReason::UserRequest)
]
);
}
|
wait_for_commit_success
|
builtin_attrs.rs
|
//! Built-in attributes and `cfg` flag gating.
use AttributeType::*;
use AttributeGate::*;
use crate::{Features, Stability};
use rustc_data_structures::fx::FxHashMap;
use syntax_pos::symbol::{Symbol, sym};
use lazy_static::lazy_static;
type GateFn = fn(&Features) -> bool;
macro_rules! cfg_fn {
($field: ident) => {
(|features| { features.$field }) as GateFn
}
}
pub type GatedCfg = (Symbol, Symbol, GateFn);
/// `cfg(...)`'s that are feature gated.
const GATED_CFGS: &[GatedCfg] = &[
// (name in cfg, feature, function to check if the feature is enabled)
(sym::target_thread_local, sym::cfg_target_thread_local, cfg_fn!(cfg_target_thread_local)),
(sym::target_has_atomic, sym::cfg_target_has_atomic, cfg_fn!(cfg_target_has_atomic)),
(sym::target_has_atomic_load_store, sym::cfg_target_has_atomic, cfg_fn!(cfg_target_has_atomic)),
];
/// Find a gated cfg determined by the `pred`icate which is given the cfg's name.
pub fn find_gated_cfg(pred: impl Fn(Symbol) -> bool) -> Option<&'static GatedCfg> {
GATED_CFGS.iter().find(|(cfg_sym, ..)| pred(*cfg_sym))
}
// If you change this, please modify `src/doc/unstable-book` as well. You must
// move that documentation into the relevant place in the other docs, and
// remove the chapter on the flag.
#[derive(Copy, Clone, PartialEq, Debug)]
pub enum AttributeType {
/// Normal, builtin attribute that is consumed
/// by the compiler before the unused_attribute check
Normal,
/// Builtin attribute that may not be consumed by the compiler
/// before the unused_attribute check. These attributes
/// will be ignored by the unused_attribute lint
Whitelisted,
/// Builtin attribute that is only allowed at the crate level
CrateLevel,
}
#[derive(Clone, Copy)]
pub enum AttributeGate {
/// Is gated by a given feature gate, reason
/// and function to check if enabled
Gated(Stability, Symbol, &'static str, fn(&Features) -> bool),
/// Ungated attribute, can be used on all release channels
Ungated,
}
// fn() is not Debug
impl std::fmt::Debug for AttributeGate {
fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match *self {
Self::Gated(ref stab, name, expl, _) =>
write!(fmt, "Gated({:?}, {}, {})", stab, name, expl),
Self::Ungated => write!(fmt, "Ungated")
}
}
}
impl AttributeGate {
fn is_deprecated(&self) -> bool {
match *self {
Self::Gated(Stability::Deprecated(_, _), ..) => true,
_ => false,
}
}
}
/// A template that the attribute input must match.
/// Only top-level shape (`#[attr]` vs `#[attr(...)]` vs `#[attr = ...]`) is considered now.
#[derive(Clone, Copy)]
pub struct AttributeTemplate {
pub word: bool,
pub list: Option<&'static str>,
pub name_value_str: Option<&'static str>,
}
impl AttributeTemplate {
pub fn only_word() -> Self {
Self { word: true, list: None, name_value_str: None }
}
}
/// A convenience macro for constructing attribute templates.
/// E.g., `template!(Word, List: "description")` means that the attribute
/// supports forms `#[attr]` and `#[attr(description)]`.
macro_rules! template {
(Word) => { template!(@ true, None, None) };
(List: $descr: expr) => { template!(@ false, Some($descr), None) };
(NameValueStr: $descr: expr) => { template!(@ false, None, Some($descr)) };
(Word, List: $descr: expr) => { template!(@ true, Some($descr), None) };
(Word, NameValueStr: $descr: expr) => { template!(@ true, None, Some($descr)) };
(List: $descr1: expr, NameValueStr: $descr2: expr) => {
template!(@ false, Some($descr1), Some($descr2))
};
(Word, List: $descr1: expr, NameValueStr: $descr2: expr) => {
template!(@ true, Some($descr1), Some($descr2))
};
(@ $word: expr, $list: expr, $name_value_str: expr) => { AttributeTemplate {
word: $word, list: $list, name_value_str: $name_value_str
} };
}
macro_rules! ungated {
($attr:ident, $typ:expr, $tpl:expr $(,)?) => {
(sym::$attr, $typ, $tpl, Ungated)
};
}
macro_rules! gated {
($attr:ident, $typ:expr, $tpl:expr, $gate:ident, $msg:expr $(,)?) => {
(sym::$attr, $typ, $tpl, Gated(Stability::Unstable, sym::$gate, $msg, cfg_fn!($gate)))
};
($attr:ident, $typ:expr, $tpl:expr, $msg:expr $(,)?) => {
(sym::$attr, $typ, $tpl, Gated(Stability::Unstable, sym::$attr, $msg, cfg_fn!($attr)))
};
}
macro_rules! rustc_attr {
(TEST, $attr:ident, $typ:expr, $tpl:expr $(,)?) => {
rustc_attr!(
$attr, $typ, $tpl,
concat!("the `#[", stringify!($attr), "]` attribute is just used for rustc unit tests \
and will never be stable",
),
)
};
($attr:ident, $typ:expr, $tpl:expr, $msg:expr $(,)?) => {
(sym::$attr, $typ, $tpl,
Gated(Stability::Unstable, sym::rustc_attrs, $msg, cfg_fn!(rustc_attrs)))
};
}
macro_rules! experimental {
($attr:ident) => {
concat!("the `#[", stringify!($attr), "]` attribute is an experimental feature")
}
}
const IMPL_DETAIL: &str = "internal implementation detail";
const INTERNAL_UNSTABLE: &str = "this is an internal attribute that will never be stable";
pub type BuiltinAttribute = (Symbol, AttributeType, AttributeTemplate, AttributeGate);
/// Attributes that have a special meaning to rustc or rustdoc.
pub const BUILTIN_ATTRIBUTES: &[BuiltinAttribute] = &[
// ==========================================================================
// Stable attributes:
// ==========================================================================
// Condtional compilation:
ungated!(cfg, Normal, template!(List: "predicate")),
ungated!(cfg_attr, Normal, template!(List: "predicate, attr1, attr2, ...")),
// Testing:
ungated!(ignore, Normal, template!(Word, NameValueStr: "reason")),
ungated!(
should_panic, Normal,
template!(Word, List: r#"expected = "reason"#, NameValueStr: "reason"),
),
// FIXME(Centril): This can be used on stable but shouldn't.
ungated!(reexport_test_harness_main, Normal, template!(NameValueStr: "name")),
// Macros:
ungated!(derive, Normal, template!(List: "Trait1, Trait2, ...")),
ungated!(automatically_derived, Normal, template!(Word)),
// FIXME(#14407)
ungated!(macro_use, Normal, template!(Word, List: "name1, name2, ...")),
ungated!(macro_escape, Normal, template!(Word)), // Deprecated synonym for `macro_use`.
ungated!(macro_export, Normal, template!(Word, List: "local_inner_macros")),
ungated!(proc_macro, Normal, template!(Word)),
ungated!(
proc_macro_derive, Normal,
template!(List: "TraitName, /*opt*/ attributes(name1, name2, ...)"),
),
ungated!(proc_macro_attribute, Normal, template!(Word)),
// Lints:
ungated!(warn, Normal, template!(List: r#"lint1, lint2, ..., /*opt*/ reason = "...""#)),
ungated!(allow, Normal, template!(List: r#"lint1, lint2, ..., /*opt*/ reason = "...""#)),
ungated!(forbid, Normal, template!(List: r#"lint1, lint2, ..., /*opt*/ reason = "...""#)),
ungated!(deny, Normal, template!(List: r#"lint1, lint2, ..., /*opt*/ reason = "...""#)),
ungated!(must_use, Whitelisted, template!(Word, NameValueStr: "reason")),
// FIXME(#14407)
ungated!(
deprecated, Normal,
template!(
Word,
List: r#"/*opt*/ since = "version", /*opt*/ note = "reason""#,
NameValueStr: "reason"
),
),
// Crate properties:
ungated!(crate_name, CrateLevel, template!(NameValueStr: "name")),
ungated!(crate_type, CrateLevel, template!(NameValueStr: "bin|lib|...")),
ungated!(crate_id, CrateLevel, template!(NameValueStr: "ignored")),
// ABI, linking, symbols, and FFI
ungated!(
link, Whitelisted,
template!(List: r#"name = "...", /*opt*/ kind = "dylib|static|...", /*opt*/ cfg = "...""#),
),
ungated!(link_name, Whitelisted, template!(NameValueStr: "name")),
ungated!(no_link, Normal, template!(Word)),
ungated!(repr, Normal, template!(List: "C")),
ungated!(export_name, Whitelisted, template!(NameValueStr: "name")),
ungated!(link_section, Whitelisted, template!(NameValueStr: "name")),
ungated!(no_mangle, Whitelisted, template!(Word)),
ungated!(used, Whitelisted, template!(Word)),
// Limits:
ungated!(recursion_limit, CrateLevel, template!(NameValueStr: "N")),
ungated!(type_length_limit, CrateLevel, template!(NameValueStr: "N")),
// Entry point:
ungated!(main, Normal, template!(Word)),
ungated!(start, Normal, template!(Word)),
ungated!(no_start, CrateLevel, template!(Word)),
ungated!(no_main, CrateLevel, template!(Word)),
// Modules, prelude, and resolution:
ungated!(path, Normal, template!(NameValueStr: "file")),
ungated!(no_std, CrateLevel, template!(Word)),
ungated!(no_implicit_prelude, Normal, template!(Word)),
ungated!(non_exhaustive, Whitelisted, template!(Word)),
// Runtime
ungated!(windows_subsystem, Whitelisted, template!(NameValueStr: "windows|console")),
ungated!(panic_handler, Normal, template!(Word)), // RFC 2070
// Code generation:
ungated!(inline, Whitelisted, template!(Word, List: "always|never")),
ungated!(cold, Whitelisted, template!(Word)),
ungated!(no_builtins, Whitelisted, template!(Word)),
ungated!(target_feature, Whitelisted, template!(List: r#"enable = "name""#)),
// FIXME: #14408 whitelist docs since rustdoc looks at them
ungated!(doc, Whitelisted, template!(List: "hidden|inline|...", NameValueStr: "string")),
// ==========================================================================
// Unstable attributes:
// ==========================================================================
// Linking:
gated!(naked, Whitelisted, template!(Word), naked_functions, experimental!(naked)),
gated!(
link_args, Normal, template!(NameValueStr: "args"),
"the `link_args` attribute is experimental and not portable across platforms, \
it is recommended to use `#[link(name = \"foo\")] instead",
),
gated!(
link_ordinal, Whitelisted, template!(List: "ordinal"), raw_dylib,
experimental!(link_ordinal)
),
// Plugins:
(
sym::plugin_registrar, Normal, template!(Word),
Gated(
Stability::Deprecated(
"https://github.com/rust-lang/rust/pull/64675",
Some("may be removed in a future compiler version"),
),
sym::plugin_registrar,
"compiler plugins are deprecated",
cfg_fn!(plugin_registrar)
)
),
(
sym::plugin, CrateLevel, template!(List: "name|name(args)"),
Gated(
Stability::Deprecated(
"https://github.com/rust-lang/rust/pull/64675",
Some("may be removed in a future compiler version"),
),
sym::plugin,
"compiler plugins are deprecated",
cfg_fn!(plugin)
)
),
// Testing:
gated!(allow_fail, Normal, template!(Word), experimental!(allow_fail)),
gated!(
test_runner, CrateLevel, template!(List: "path"), custom_test_frameworks,
"custom test frameworks are an unstable feature",
),
// RFC #1268
gated!(marker, Normal, template!(Word), marker_trait_attr, experimental!(marker)),
gated!(
thread_local, Whitelisted, template!(Word),
"`#[thread_local]` is an experimental feature, and does not currently handle destructors",
),
gated!(no_core, CrateLevel, template!(Word), experimental!(no_core)),
// RFC 2412
gated!(
optimize, Whitelisted, template!(List: "size|speed"), optimize_attribute,
experimental!(optimize),
),
gated!(ffi_returns_twice, Whitelisted, template!(Word), experimental!(ffi_returns_twice)),
gated!(track_caller, Whitelisted, template!(Word), experimental!(track_caller)),
gated!(
register_attr, CrateLevel, template!(List: "attr1, attr2, ..."),
experimental!(register_attr),
),
gated!(
register_tool, CrateLevel, template!(List: "tool1, tool2, ..."),
experimental!(register_tool),
),
// ==========================================================================
// Internal attributes: Stability, deprecation, and unsafe:
// ==========================================================================
ungated!(feature, CrateLevel, template!(List: "name1, name1, ...")),
// FIXME(#14407) -- only looked at on-demand so we can't
// guarantee they'll have already been checked.
ungated!(
rustc_deprecated, Whitelisted,
template!(List: r#"since = "version", reason = "...""#)
),
// FIXME(#14407)
ungated!(stable, Whitelisted, template!(List: r#"feature = "name", since = "version""#)),
// FIXME(#14407)
ungated!(
unstable, Whitelisted,
template!(List: r#"feature = "name", reason = "...", issue = "N""#),
),
gated!(
rustc_const_unstable, Normal, template!(List: r#"feature = "name""#),
"the `#[rustc_const_unstable]` attribute is an internal feature",
),
gated!(
allow_internal_unstable, Normal, template!(Word, List: "feat1, feat2, ..."),
"allow_internal_unstable side-steps feature gating and stability checks",
),
gated!(
allow_internal_unsafe, Normal, template!(Word),
"allow_internal_unsafe side-steps the unsafe_code lint",
),
// ==========================================================================
// Internal attributes: Type system related:
// ==========================================================================
gated!(fundamental, Whitelisted, template!(Word), experimental!(fundamental)),
gated!(
// RFC #1445.
structural_match, Whitelisted, template!(Word),
"the semantics of constant patterns is not yet settled",
),
gated!(
may_dangle, Normal, template!(Word), dropck_eyepatch,
"`may_dangle` has unstable semantics and may be removed in the future",
),
// ==========================================================================
// Internal attributes: Runtime related:
// ==========================================================================
rustc_attr!(rustc_allocator, Whitelisted, template!(Word), IMPL_DETAIL),
rustc_attr!(rustc_allocator_nounwind, Whitelisted, template!(Word), IMPL_DETAIL),
gated!(alloc_error_handler, Normal, template!(Word), experimental!(alloc_error_handler)),
gated!(
default_lib_allocator, Whitelisted, template!(Word), allocator_internals,
experimental!(default_lib_allocator),
),
gated!(
needs_allocator, Normal, template!(Word), allocator_internals,
experimental!(needs_allocator),
),
gated!(panic_runtime, Whitelisted, template!(Word), experimental!(panic_runtime)),
gated!(needs_panic_runtime, Whitelisted, template!(Word), experimental!(needs_panic_runtime)),
gated!(
unwind, Whitelisted, template!(List: "allowed|aborts"), unwind_attributes,
experimental!(unwind),
),
gated!(
compiler_builtins, Whitelisted, template!(Word),
"the `#[compiler_builtins]` attribute is used to identify the `compiler_builtins` crate \
which contains compiler-rt intrinsics and will never be stable",
),
gated!(
sanitizer_runtime, Whitelisted, template!(Word),
"the `#[sanitizer_runtime]` attribute is used to identify crates that contain the runtime \
of a sanitizer and will never be stable",
),
gated!(
profiler_runtime, Whitelisted, template!(Word),
"the `#[profiler_runtime]` attribute is used to identify the `profiler_builtins` crate \
which contains the profiler runtime and will never be stable",
),
// ==========================================================================
// Internal attributes, Linkage:
// ==========================================================================
gated!(
linkage, Whitelisted, template!(NameValueStr: "external|internal|..."),
"the `linkage` attribute is experimental and not portable across platforms",
),
rustc_attr!(rustc_std_internal_symbol, Whitelisted, template!(Word), INTERNAL_UNSTABLE),
// ==========================================================================
// Internal attributes, Macro related:
// ==========================================================================
rustc_attr!(rustc_builtin_macro, Whitelisted, template!(Word), IMPL_DETAIL),
rustc_attr!(rustc_proc_macro_decls, Normal, template!(Word), INTERNAL_UNSTABLE),
rustc_attr!(
rustc_macro_transparency, Whitelisted,
template!(NameValueStr: "transparent|semitransparent|opaque"),
"used internally for testing macro hygiene",
),
// ==========================================================================
// Internal attributes, Diagnostics related:
// ==========================================================================
rustc_attr!(
rustc_on_unimplemented, Whitelisted,
template!(
List: r#"/*opt*/ message = "...", /*opt*/ label = "...", /*opt*/ note = "...""#,
NameValueStr: "message"
),
INTERNAL_UNSTABLE
),
// Whitelists "identity-like" conversion methods to suggest on type mismatch.
rustc_attr!(rustc_conversion_suggestion, Whitelisted, template!(Word), INTERNAL_UNSTABLE),
// ==========================================================================
// Internal attributes, Const related:
// ==========================================================================
rustc_attr!(rustc_promotable, Whitelisted, template!(Word), IMPL_DETAIL),
rustc_attr!(rustc_allow_const_fn_ptr, Whitelisted, template!(Word), IMPL_DETAIL),
rustc_attr!(rustc_args_required_const, Whitelisted, template!(List: "N"), INTERNAL_UNSTABLE),
// ==========================================================================
// Internal attributes, Layout related:
// ==========================================================================
rustc_attr!(
rustc_layout_scalar_valid_range_start, Whitelisted, template!(List: "value"),
"the `#[rustc_layout_scalar_valid_range_start]` attribute is just used to enable \
niche optimizations in libcore and will never be stable",
),
rustc_attr!(
rustc_layout_scalar_valid_range_end, Whitelisted, template!(List: "value"),
"the `#[rustc_layout_scalar_valid_range_end]` attribute is just used to enable \
niche optimizations in libcore and will never be stable",
),
rustc_attr!(
rustc_nonnull_optimization_guaranteed, Whitelisted, template!(Word),
"the `#[rustc_nonnull_optimization_guaranteed]` attribute is just used to enable \
niche optimizations in libcore and will never be stable",
),
// ==========================================================================
// Internal attributes, Misc:
// ==========================================================================
gated!(
lang, Normal, template!(NameValueStr: "name"), lang_items,
"language items are subject to change",
),
(
sym::rustc_diagnostic_item,
Normal,
template!(NameValueStr: "name"),
Gated(
Stability::Unstable,
sym::rustc_attrs,
"diagnostic items compiler internal support for linting",
cfg_fn!(rustc_attrs),
),
),
(
sym::no_debug, Whitelisted, template!(Word),
Gated(
Stability::Deprecated("https://github.com/rust-lang/rust/issues/29721", None),
sym::no_debug,
"the `#[no_debug]` attribute was an experimental feature that has been \
deprecated due to lack of demand",
cfg_fn!(no_debug)
)
),
gated!(
// Used in resolve:
prelude_import, Whitelisted, template!(Word),
"`#[prelude_import]` is for use by rustc only",
),
gated!(
rustc_paren_sugar, Normal, template!(Word), unboxed_closures,
"unboxed_closures are still evolving",
),
rustc_attr!(
rustc_inherit_overflow_checks, Whitelisted, template!(Word),
"the `#[rustc_inherit_overflow_checks]` attribute is just used to control \
overflow checking behavior of several libcore functions that are inlined \
across crates and will never be stable",
),
rustc_attr!(rustc_reservation_impl, Normal, template!(NameValueStr: "reservation message"),
"the `#[rustc_reservation_impl]` attribute is internally used \
for reserving for `for<T> From<!> for T` impl"
),
rustc_attr!(
rustc_test_marker, Normal, template!(Word),
"the `#[rustc_test_marker]` attribute is used internally to track tests",
),
// ==========================================================================
// Internal attributes, Testing:
// ==========================================================================
rustc_attr!(TEST, rustc_outlives, Normal, template!(Word)),
rustc_attr!(TEST, rustc_variance, Normal, template!(Word)),
rustc_attr!(TEST, rustc_layout, Normal, template!(List: "field1, field2, ...")),
rustc_attr!(TEST, rustc_regions, Normal, template!(Word)),
rustc_attr!(
TEST, rustc_error, Whitelisted,
template!(Word, List: "delay_span_bug_from_inside_query")
),
rustc_attr!(TEST, rustc_dump_user_substs, Whitelisted, template!(Word)),
rustc_attr!(TEST, rustc_if_this_changed, Whitelisted, template!(Word, List: "DepNode")),
rustc_attr!(TEST, rustc_then_this_would_need, Whitelisted, template!(List: "DepNode")),
rustc_attr!(
TEST, rustc_dirty, Whitelisted,
template!(List: r#"cfg = "...", /*opt*/ label = "...", /*opt*/ except = "...""#),
),
rustc_attr!(
TEST, rustc_clean, Whitelisted,
template!(List: r#"cfg = "...", /*opt*/ label = "...", /*opt*/ except = "...""#),
),
rustc_attr!(
TEST, rustc_partition_reused, Whitelisted,
template!(List: r#"cfg = "...", module = "...""#),
),
rustc_attr!(
TEST, rustc_partition_codegened, Whitelisted,
template!(List: r#"cfg = "...", module = "...""#),
),
rustc_attr!(
TEST, rustc_expected_cgu_reuse, Whitelisted,
template!(List: r#"cfg = "...", module = "...", kind = "...""#),
),
rustc_attr!(TEST, rustc_synthetic, Whitelisted, template!(Word)),
rustc_attr!(TEST, rustc_symbol_name, Whitelisted, template!(Word)),
rustc_attr!(TEST, rustc_def_path, Whitelisted, template!(Word)),
rustc_attr!(TEST, rustc_mir, Whitelisted, template!(List: "arg1, arg2, ...")),
rustc_attr!(TEST, rustc_dump_program_clauses, Whitelisted, template!(Word)),
rustc_attr!(TEST, rustc_dump_env_program_clauses, Whitelisted, template!(Word)),
rustc_attr!(TEST, rustc_object_lifetime_default, Whitelisted, template!(Word)),
rustc_attr!(TEST, rustc_dummy, Normal, template!(Word /* doesn't matter*/)),
gated!(
omit_gdb_pretty_printer_section, Whitelisted, template!(Word),
"the `#[omit_gdb_pretty_printer_section]` attribute is just used for the Rust test suite",
),
];
pub fn deprecated_attributes() -> Vec<&'static BuiltinAttribute>
|
pub fn is_builtin_attr_name(name: Symbol) -> bool {
BUILTIN_ATTRIBUTE_MAP.get(&name).is_some()
}
lazy_static! {
pub static ref BUILTIN_ATTRIBUTE_MAP: FxHashMap<Symbol, &'static BuiltinAttribute> = {
let mut map = FxHashMap::default();
for attr in BUILTIN_ATTRIBUTES.iter() {
if map.insert(attr.0, attr).is_some() {
panic!("duplicate builtin attribute `{}`", attr.0);
}
}
map
};
}
|
{
BUILTIN_ATTRIBUTES.iter().filter(|(.., gate)| gate.is_deprecated()).collect()
}
|
readfifo0.rs
|
#[doc = "Reader of register READFIFO0"]
pub type R = crate::R<u32, super::READFIFO0>;
#[doc = "Reader of field `DATA`"]
pub type DATA_R = crate::R<u32, u32>;
impl R {
#[doc = "Bits 0:31 - Reads Data"]
#[inline(always)]
pub fn data(&self) -> DATA_R
|
}
|
{
DATA_R::new((self.bits & 0xffff_ffff) as u32)
}
|
api.ts
|
import { Injectable } from '@angular/core';
import { assign, cloneDeep } from 'lodash-es';
import { FuseMockApiService } from '@fuse/lib/mock-api';
import { user as userData } from 'app/mock-api/common/user/data';
@Injectable({
providedIn: 'root'
})
export class
|
{
private _user: any = userData;
/**
* Constructor
*/
constructor(private _fuseMockApiService: FuseMockApiService)
{
// Register Mock API handlers
this.registerHandlers();
}
// -----------------------------------------------------------------------------------------------------
// @ Public methods
// -----------------------------------------------------------------------------------------------------
/**
* Register Mock API handlers
*/
registerHandlers(): void
{
// -----------------------------------------------------------------------------------------------------
// @ User - GET
// -----------------------------------------------------------------------------------------------------
this._fuseMockApiService
.onGet('api/common/user')
.reply(() => [200, cloneDeep(this._user)]);
// -----------------------------------------------------------------------------------------------------
// @ User - PATCH
// -----------------------------------------------------------------------------------------------------
this._fuseMockApiService
.onPatch('api/common/user')
.reply(({request}) => {
// Get the user mock-api
const user = cloneDeep(request.body.user);
// Update the user mock-api
this._user = assign({}, this._user, user);
// Return the response
return [200, cloneDeep(this._user)];
});
}
}
|
UserMockApi
|
udp.go
|
package main
import (
"bytes"
"log"
"net"
"fmt"
)
func check_result(tip string,err error)bool{
if err := recover(); err != nil {
log.Println(tip, err)
return false
}
return true
}
func listenUDP() {
log.Println("listening for stats UDP on port " + *udpListenAddress)
serverAddr, err := net.ResolveUDPAddr("udp", *udpListenAddress)
if err != nil {
log.Println("Error: ", err)
}
serverConn, err := net.ListenUDP("udp", serverAddr)
check_result("Error:",err)
defer func() {
if err := recover(); err != nil {
fmt.Println("Critical: ",err)
fmt.Println("Panic,try to restart UDP Service")
go listenUDP()
}
}()
defer serverConn.Close()
buf := make([]byte, 8192)
for {
n, _, err := serverConn.ReadFromUDP(buf)
if !check_result("Error reading from UDP: ",err){
continue
}
udpPacketCount.Inc()
if *debug {
log.Printf("new udp package: %s", string(buf[ 0:n]))
}
deltas, err := NewDelta(bytes.NewBuffer(buf[0:n]))
if !check_result("Error creating delta: ",err){
continue
|
}
for _,delta := range deltas{
err = delta.Apply()
if !check_result("Error applying delta: ",err){
continue
}
}
}
}
| |
rastrigin_accept_action.py
|
#
# sample from a Rastrigin test function
# this is to illustrate how to use accept_action in CDNest to avoid repeat calculations.
#
# A 2D Rastrigin function looks
#
# logL=-(10.0*2 + (coords[0]**2 - 10*np.cos(2.0*np.pi*coords[0])) + (coords[1]**2 - 10*np.cos(2.0*np.pi*coords[1])) )
#
# Every perturb, only one parameter is updated, so that the terms related to the rest parameters
# do not need to recalculate, just use the values in the previous step.
#
# In this example, we use an array to record values of the term "(coords[0]**2 - 10*np.cos(2.0*np.pi*coords[0]))"
# in every accepted perturb.
#
from mpi4py import MPI
import numpy as np
import cydnest
import matplotlib.pyplot as plt
from matplotlib import cm
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
def randh():
"""
generate from the heavy-tailed distribution.
"""
return 10.0**(1.5 - 3*np.abs(np.random.randn()/np.sqrt(-np.log(np.random.rand()))))*np.random.randn()
def wrap(x, a, b):
assert b > a
return (x - a)%(b - a) + a
class Model(object):
def
|
(self, num_params=1, num_particles=1):
"""
intialize the model
"""
# number of particles each core holds
self.num_particles = num_particles
# number of parameters
self.num_params = num_params
# parameter ranges, a list
self.param_range = [[-5.12, 5.12]]*num_params
# parameter prior type.
# three types: Uniform, Gaussian, Log
self.prior_type = ["Uniform"]*num_params
# parameter prior information. used when the prior is Gaussian
# indicate the mean and standard deviation of the Gaussian prior
self.prior_info = [[0.0, 1.0]]*num_params
# which parameter being perturbed
# which particle being perturbed
self.which_param_update = 0
self.which_particle_update = 0
# perturbed values and accepted values for all particles
self.value_perturb = [0.0]*self.num_particles
self.value_accept = [0.0]*self.num_particles
def accept_action(self):
"""
action taken when a perturb is accepted
record the accepted values from the perturbed values
"""
# note "which_particle_update" is updated and "which_param_update" is updated
if self.which_param_update < 1:
self.value_accept[self.which_particle_update] = self.value_perturb[self.which_particle_update]
def kill_action(self, i, i_copy):
"""
cdnest kill a particle when it is not updated for a long time.
action taken when a particle is killed: i particle is killed,
copy i_copy particle's values to i particle's values
this function is needed, since we record some accepted values
"""
self.value_accept[i] = self.value_accept[i_copy]
return
# users can define their own functions to generate
# the initial parameter values
# this is optinal. if not defined, cydnest will use the internal
# function.
def from_prior(self):
"""
generate initial values of model parameters from priors
"""
coords = np.zeros(self.num_params)
for i in range(self.num_params):
if self.prior_type[i] == "Uniform":
coords[i] = np.random.uniform(self.param_range[i][0], self.param_range[i][1])
elif self.prior_type[i] == "Gaussian":
coords[i] = np.random.randn() * self.prior_info[i][1] + self.prior_info[0]
wrap(coords[i], self.param_range[i][0], self.param_range[i][1])
elif self.prior_type[i] == "Log": # LOG prior
coords[i] = np.random.uniform(np.log(self.param_range[i][0]), np.log(self.param_range[i][1]))
coords[i] = np.exp(coords[i])
return coords
# users can define their own functions to perturb
# parameter values for sampling
# this is optinal. if not defined, cydnest will use the internal
# function.
def perturb(self, coords):
"""
perturb the parameters
"""
i = np.random.randint(self.num_params)
# record which parameter is updated
self.which_param_update = i
LogH = 0.0 # prior ratio: ln(prior(new)/prior(old)) = ln(prior(new)) - ln(prior(old))
width = (self.param_range[i][1]-self.param_range[i][0])
if self.prior_type[i] == "Uniform":
coords[i] += width*randh()
coords[i] = wrap(coords[i], self.param_range[i][0], self.param_range[i][1])
elif self.prior_type[i] == "Gaussian":
LogH -= ( -0.5* (coords[i] - self.prior_info[i][0])**2/self.prior_info[i][1]**2 ) # ln(Gaussian)
coords[i] += width*randh()
coords[i] = wrap(coords[i], self.param_range[i][0], self.param_range[i][1])
LogH += ( -0.5* (coords[i] - self.prior_info[i][0])**2/self.prior_info[i][1]**2 )
elif self.prior_type[i] == "Log":
LogH -= ( -np.log(coords[i]) ) # ln(1/x) = -ln(x)
coords[i] += width*randh()
coords[i] = wrap(coords[i], self.param_range[i][0], self.param_range[i][1])
LogH += ( -np.log(coords[i]) )
return LogH
def log_likelihood_initial(self, coords):
"""
calculate likelihood at initial start
"""
self.which_particle_update = cydnest.get_which_particle_update()
self.value_accept[self.which_particle_update] = coords[0]**2 - 10*np.cos(2.0*np.pi*coords[0])
value = self.value_accept[self.which_particle_update]
return -(10.0*2 + (value) + (coords[1]**2 - 10*np.cos(2.0*np.pi*coords[1])) )
def log_likelihood(self, coords):
"""
calculate likelihood
"""
# get which particle is being updated, and save it to self model
self.which_particle_update = cydnest.get_which_particle_update()
value = 0.0
if self.which_param_update < 1: # when 0-th parameter update, recalculate
self.value_perturb[self.which_particle_update] = coords[0]**2 - 10*np.cos(2.0*np.pi*coords[0])
value = self.value_perturb[self.which_particle_update]
else: # otherwise, use the accepted value
value = self.value_accept[self.which_particle_update]
return -(10.0*2 + (value) + (coords[1]**2 - 10*np.cos(2.0*np.pi*coords[1])) )
# create a model
model = Model(num_params=2, num_particles=2)
# create a dnest sampler
# max_num_save is the number of samples to generate
# max_num_levels is the number of levels
# ptol is the likelihood tolerance in loge()
sampler = cydnest.sampler(model, sample_dir="./", max_num_saves = 10000, ptol=0.1, num_particles=model.num_particles)
#
# The full argument lists look like:
# sampler = cydnest.sampler(model, sample_dir="./", max_num_saves = 10000, ptol=0.1,
# num_particles=1, thread_steps_factor = 10,
# max_num_levels = 0, lam = 10, beta = 100
# new_level_interval_factor = 2, save_interval_factor = 2)
#
# run sampler
logz = sampler.run()
comm.Barrier()
# ouput evidence
if rank == 0:
print("Evidence:", logz)
psample = np.loadtxt(sampler.get_sample_dir() +"/posterior_sample" + sampler.get_sample_tag() + ".txt")
psample_info = np.loadtxt(sampler.get_sample_dir() +"/posterior_sample_info" + sampler.get_sample_tag() + ".txt")
fig = plt.figure(figsize=(15, 12))
ax = fig.add_subplot(111, projection='3d')
X = np.arange(-1.5, 1.5, 0.01)
Y = np.arange(-1.5, 1.5, 0.01)
X, Y = np.meshgrid(X, Y)
Z = -(10.0*2 + (X**2 - 10*np.cos(2.0*np.pi*X)) + (Y**2 - 10*np.cos(2.0*np.pi*Y)) )
ax.plot_surface(X, Y, Z, cmap=cm.ocean, rstride=2, cstride=2, linewidth=0, antialiased=False, zorder=0)
idx = np.where((np.abs(psample[:, 0]) <1.4) & (np.abs(psample[:, 1]) <1.4))
ax.plot(psample[idx[0], 0], psample[idx[0], 1], psample_info[idx[0]], ls='none', marker='+', zorder=10)
ax.set_xlim(-1.5, 1.5)
ax.set_ylim(-1.5, 1.5)
ax.set_xlabel(r'$\theta_1$')
ax.set_ylabel(r'$\theta_2$')
ax.set_zlabel(r'$\log L$')
fig.savefig("fig_rastrigin.jpg", bbox_inches='tight')
plt.show()
# do postprocess, plot, show the properties of sampling
cydnest.postprocess(sampler.get_sample_dir(), sampler.get_sample_tag(), temperature=1.0, doplot=True)
|
__init__
|
proxy.go
|
// Copyright © 2017 Louis Taylor <[email protected]>
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package proxy
import (
"os"
"strings"
"syscall"
)
var ProxyEnvs = []string{
"http_proxy",
"https_proxy",
"HTTP_PROXY",
"HTTPS_PROXY",
}
var NoProxy = []string{
"no_proxy",
"NO_PROXY",
}
func replaceShell() {
shell := os.Getenv("SHELL")
syscall.Exec(shell, []string{shell}, syscall.Environ())
}
func On(proxy string) {
|
func Off() {
for _, env := range ProxyEnvs {
os.Unsetenv(env)
}
replaceShell()
}
func ignore(noProxy string, host string) string {
hosts := os.Getenv(noProxy)
if len(hosts) > 0 {
return strings.Join([]string{hosts, host}, ",")
} else {
return host
}
}
func Ignore(host string) {
for _, env := range NoProxy {
newEnv := ignore(env, host)
os.Setenv(env, newEnv)
}
replaceShell()
}
|
for _, env := range ProxyEnvs {
os.Setenv(env, proxy)
}
replaceShell()
}
|
test.js
|
// Tests and utilities
var darksky = require('./darksky');
var wxsummary = require('./wxsummary');
const api_key = "b4d58ccf2c792a4a0588e1b31b0e7daf";
//lat lon of Concord, MA
var concord = {'lat': '42.460372',
'lon': '-71.348948'};
var old_time = "2017-11-11T11:59:00";
var opts = {exclude: "minutely,hourly,daily,alerts,flags"};
function wx()
{
var client = new darksky.Client(api_key);
client.forecast(concord.lat, concord.lon, opts,
function(err, data) {
if (err) {
console.error(err);
}
// process.stdout.write(Object.prototype.toString.call(data));
console.log(data);
}
);}
function wxV3()
{
var client = new darksky.Client(api_key);
client.forecastV3(concord.lat, concord.lon, {},
function(err, data) {
if (err) {
console.error(err);
}
// process.stdout.write(Object.prototype.toString.call(data));
console.log(data);
}
);}
|
{
if (err) {
console.error(err);
}
process.stdout.write(JSON.stringify(data, null, 2));
}
);}
exports.wx = wx;
exports.wxV3 = wxV3;
exports.summary = summary;
|
function summary()
{
wxsummary.get_summary (function(err, data)
|
lbmap.go
|
// Copyright 2016-2019 Authors of Cilium
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package lbmap
import (
"fmt"
"github.com/cilium/cilium/pkg/bpf"
"github.com/cilium/cilium/pkg/loadbalancer"
"github.com/cilium/cilium/pkg/lock"
"github.com/cilium/cilium/pkg/logging"
"github.com/cilium/cilium/pkg/logging/logfields"
"github.com/cilium/cilium/pkg/option"
"github.com/cilium/cilium/pkg/u8proto"
"github.com/sirupsen/logrus"
)
var (
log = logging.DefaultLogger.WithField(logfields.LogSubsys, "map-lb")
// mutex protects access to the BPF map to guarantee atomicity if a
// transaction must be split across multiple map access operations.
mutex lock.RWMutex
)
const (
// Maximum number of entries in each hashtable
MaxEntries = 65536
maxFrontEnds = 256
// MaxSeq is used by daemon for generating bpf define LB_RR_MAX_SEQ.
MaxSeq = 31
)
var (
// cache contains *all* services of both IPv4 and IPv6 based maps
// combined
cache = newLBMapCache()
)
func updateServiceEndpoint(key ServiceKey, value ServiceValue) error {
log.WithFields(logrus.Fields{
"frontend": key,
"backend": value,
}).Debug("adding frontend for backend to BPF maps")
if key.GetBackend() != 0 && value.RevNatKey().GetKey() == 0 {
return fmt.Errorf("invalid RevNat ID (0) in the Service Value")
}
if _, err := key.Map().OpenOrCreate(); err != nil {
return err
}
return key.Map().Update(key.ToNetwork(), value.ToNetwork())
}
// DeleteService deletes a legacy service from the lbmap. The given key has to
// be of the master service.
func DeleteService(key ServiceKey) error {
mutex.Lock()
defer mutex.Unlock()
err := deleteServiceLocked(key)
if err != nil {
return err
}
return nil
}
func deleteServiceLocked(key ServiceKey) error {
err := key.Map().Delete(key.ToNetwork())
if err != nil {
return err
}
return lookupAndDeleteServiceWeights(key)
}
func lookupService(key ServiceKey) (ServiceValue, error) {
var svc ServiceValue
val, err := key.Map().Lookup(key.ToNetwork())
if err != nil {
return nil, err
}
if key.IsIPv6() {
svc = val.(*Service6Value)
} else {
svc = val.(*Service4Value)
}
return svc.ToNetwork(), nil
}
// updateServiceWeights updates cilium_lb6_rr_seq or cilium_lb4_rr_seq bpf maps.
func updateServiceWeights(key ServiceKey, value *RRSeqValue) error {
if _, err := key.RRMap().OpenOrCreate(); err != nil {
return err
}
return key.RRMap().Update(key.ToNetwork(), value)
}
// lookupAndDeleteServiceWeights deletes entry from cilium_lb6_rr_seq or cilium_lb4_rr_seq
func lookupAndDeleteServiceWeights(key ServiceKey) error {
_, err := key.RRMap().Lookup(key.ToNetwork())
if err != nil {
// Ignore if entry is not found.
return nil
}
return key.RRMap().Delete(key.ToNetwork())
}
func updateRevNatLocked(key RevNatKey, value RevNatValue) error {
log.WithFields(logrus.Fields{
logfields.BPFMapKey: key,
logfields.BPFMapValue: value,
}).Debug("adding revNat to lbmap")
if key.GetKey() == 0 {
return fmt.Errorf("invalid RevNat ID (0)")
}
if _, err := key.Map().OpenOrCreate(); err != nil {
return err
}
return key.Map().Update(key.ToNetwork(), value.ToNetwork())
}
func UpdateRevNat(key RevNatKey, value RevNatValue) error {
mutex.Lock()
defer mutex.Unlock()
return updateRevNatLocked(key, value)
}
func deleteRevNatLocked(key RevNatKey) error {
log.WithField(logfields.BPFMapKey, key).Debug("deleting RevNatKey")
return key.Map().Delete(key.ToNetwork())
}
func DeleteRevNat(key RevNatKey) error {
mutex.Lock()
defer mutex.Unlock()
return deleteRevNatLocked(key)
}
// gcd computes the gcd of two numbers.
func gcd(x, y uint16) uint16
|
// generateWrrSeq generates a wrr sequence based on provided weights.
func generateWrrSeq(weights []uint16) (*RRSeqValue, error) {
svcRRSeq := RRSeqValue{}
n := len(weights)
if n < 2 {
return nil, fmt.Errorf("needs at least 2 weights")
}
g := uint16(0)
for i := 0; i < n; i++ {
if weights[i] != 0 {
g = gcd(g, weights[i])
}
}
// This means all the weights are 0.
if g == 0 {
return nil, fmt.Errorf("all specified weights are 0")
}
sum := uint16(0)
for i := range weights {
// Normalize the weights.
weights[i] = weights[i] / g
sum += weights[i]
}
// Check if Generated seq fits in our array.
if int(sum) > len(svcRRSeq.Idx) {
return nil, fmt.Errorf("sum of normalized weights exceeds %d", len(svcRRSeq.Idx))
}
// Generate the Sequence.
i := uint16(0)
k := uint16(0)
for {
j := uint16(0)
for j < weights[k] {
svcRRSeq.Idx[i] = k
i++
j++
}
if i >= sum {
break
}
k++
}
svcRRSeq.Count = sum
return &svcRRSeq, nil
}
// updateWrrSeq updates bpf map with the generated wrr sequence.
func updateWrrSeq(fe ServiceKey, weights []uint16) error {
sum := uint16(0)
for _, v := range weights {
sum += v
}
if sum == 0 {
return nil
}
svcRRSeq, err := generateWrrSeq(weights)
if err != nil {
return fmt.Errorf("unable to generate weighted round robin seq for %s with value %+v: %s", fe.String(), weights, err)
}
return updateServiceWeights(fe, svcRRSeq)
}
func updateMasterService(fe ServiceKey, nbackends int, nonZeroWeights uint16) error {
fe.SetBackend(0)
zeroValue := fe.NewValue().(ServiceValue)
zeroValue.SetCount(nbackends)
zeroValue.SetWeight(nonZeroWeights)
return updateServiceEndpoint(fe, zeroValue)
}
// UpdateService adds or updates the given service in the bpf maps (in both -
// legacy and v2).
func UpdateService(fe ServiceKey, backends []ServiceValue,
addRevNAT bool, revNATID int,
isLegacySVCEnabled bool,
acquireBackendID func(loadbalancer.L3n4Addr) (loadbalancer.BackendID, error),
releaseBackendID func(loadbalancer.BackendID)) error {
var (
weights []uint16
nNonZeroWeights uint16
)
// Find out which backends are new (i.e. the ones which do not exist yet and
// will be created in this function) and acquire IDs for them
newBackendIDs, err := acquireNewBackendIDs(backends, acquireBackendID)
if err != nil {
return err
}
mutex.Lock()
defer mutex.Unlock()
// Store mapping of backend addr ID => backend ID in the cache
cache.addBackendIDs(newBackendIDs)
// Prepare the service cache for the updates
svc, addedBackends, removedBackendIDs, err := cache.prepareUpdate(fe, backends)
if err != nil {
return err
}
// FIXME(brb) Uncomment the following code after we have enabled weights
// in the BPF datapath code.
//for _, be := range besValues {
// weights = append(weights, be.GetWeight())
// if be.GetWeight() != 0 {
// nNonZeroWeights++
// }
//}
besValuesV2 := svc.getBackendsV2()
log.WithFields(logrus.Fields{
"frontend": fe,
"backends": besValuesV2,
}).Debugf("Updating BPF representation of service")
// Add the new backends to the BPF maps
if err := updateBackendsLocked(addedBackends); err != nil {
return err
}
if isLegacySVCEnabled {
besValues := svc.getBackends()
// Update the legacy service BPF maps
if err := updateServiceLegacyLocked(fe, besValues, addRevNAT, revNATID,
weights, nNonZeroWeights); err != nil {
return err
}
}
// Update the v2 service BPF maps
if err := updateServiceV2Locked(fe, besValuesV2, svc, addRevNAT, revNATID,
weights, nNonZeroWeights, isLegacySVCEnabled); err != nil {
return err
}
// Delete no longer needed backends
if err := removeBackendsLocked(removedBackendIDs, fe.IsIPv6(),
releaseBackendID); err != nil {
return err
}
return nil
}
func acquireNewBackendIDs(backends []ServiceValue,
acquireBackendID func(loadbalancer.L3n4Addr) (loadbalancer.BackendID, error)) (
map[BackendAddrID]loadbalancer.BackendID, error) {
newBackendsByAddrID := map[BackendAddrID]ServiceValue{}
for _, b := range backends {
newBackendsByAddrID[b.BackendAddrID()] = b
}
newBackendsByAddrID = cache.filterNewBackends(newBackendsByAddrID)
newBackendIDs := map[BackendAddrID]loadbalancer.BackendID{}
for addrID := range newBackendsByAddrID {
addr := *serviceValue2L3n4Addr(newBackendsByAddrID[addrID])
backendID, err := acquireBackendID(addr)
if err != nil {
return nil, fmt.Errorf("Unable to acquire backend ID for %s: %s", addrID, err)
}
newBackendIDs[addrID] = backendID
log.WithFields(logrus.Fields{
logfields.BackendName: addrID,
logfields.BackendID: backendID,
}).Debug("Acquired backend ID")
}
return newBackendIDs, nil
}
func updateBackendsLocked(addedBackends map[loadbalancer.BackendID]ServiceValue) error {
var err error
// Create new backends
for backendID, svcVal := range addedBackends {
var b Backend
if svcVal.IsIPv6() {
svc6Val := svcVal.(*Service6Value)
b, err = NewBackend6(backendID, svc6Val.Address.IP(), svc6Val.Port, u8proto.ANY)
} else {
svc4Val := svcVal.(*Service4Value)
b, err = NewBackend4(backendID, svc4Val.Address.IP(), svc4Val.Port, u8proto.ANY)
}
if err != nil {
return err
}
if err := updateBackend(b); err != nil {
return err
}
}
return nil
}
func updateServiceLegacyLocked(fe ServiceKey, besValues []ServiceValue,
addRevNAT bool, revNATID int,
weights []uint16, nNonZeroWeights uint16) error {
var (
existingCount int
)
// Check if the service already exists, it is not failure scenario if
// the services doesn't exist. That's simply a new service. Even if the
// service cannot be looked up for an existing service, it is still
// better to proceed and update the service, at the cost of a slightly
// less atomic update.
svcValue, err := lookupService(fe)
if err == nil {
existingCount = svcValue.GetCount()
}
// Update the legacy svc entries to point to the backends for the backward
// compatibility
for nsvc, be := range besValues {
fe.SetBackend(nsvc + 1) // service count starts with 1
backendID := cache.getBackendIDByAddrID(be.BackendAddrID())
be.SetCount(int(backendID)) // For the backward-compatibility
if err := updateServiceEndpoint(fe, be); err != nil {
return fmt.Errorf("unable to update service %+v with the value %+v: %s", fe, be, err)
}
}
err = updateMasterService(fe, len(besValues), nNonZeroWeights)
if err != nil {
return fmt.Errorf("unable to update service %+v: %s", fe, err)
}
err = updateWrrSeq(fe, weights)
if err != nil {
return fmt.Errorf("unable to update service weights for %s with value %+v: %s", fe.String(), weights, err)
}
// Remove old backends that are no longer needed
for i := len(besValues) + 1; i <= existingCount; i++ {
fe.SetBackend(i)
if err := deleteServiceLocked(fe); err != nil {
return fmt.Errorf("unable to delete service %+v: %s", fe, err)
}
}
return nil
}
func updateServiceV2Locked(fe ServiceKey, backends map[BackendAddrID]ServiceValue,
svc *bpfService,
addRevNAT bool, revNATID int,
weights []uint16, nNonZeroWeights uint16,
isLegacySVCEnabled bool) error {
var (
existingCount int
svcKeyV2 ServiceKeyV2
)
if fe.IsIPv6() {
svc6Key := fe.(*Service6Key)
svcKeyV2 = NewService6KeyV2(svc6Key.Address.IP(), svc6Key.Port, u8proto.ANY, 0)
} else {
svc4Key := fe.(*Service4Key)
svcKeyV2 = NewService4KeyV2(svc4Key.Address.IP(), svc4Key.Port, u8proto.ANY, 0)
}
svcValV2, err := lookupServiceV2(svcKeyV2)
if err == nil {
existingCount = svcValV2.GetCount()
}
svcValV2 = svcKeyV2.NewValue().(ServiceValueV2)
slot := 1
for addrID, svcVal := range backends {
if isLegacySVCEnabled {
legacySlaveSlot, found := svc.getSlaveSlot(addrID)
if !found {
return fmt.Errorf("Slave slot not found for backend with addrID %s", addrID)
}
svcValV2.SetCount(legacySlaveSlot) // For the backward-compatibility
}
backendID := cache.getBackendIDByAddrID(addrID)
svcValV2.SetBackendID(backendID)
svcValV2.SetRevNat(revNATID)
svcValV2.SetWeight(svcVal.GetWeight())
svcKeyV2.SetSlave(slot)
if err := updateServiceEndpointV2(svcKeyV2, svcValV2); err != nil {
return fmt.Errorf("Unable to update service %+v with the value %+v: %s",
svcKeyV2, svcValV2, err)
}
log.WithFields(logrus.Fields{
logfields.ServiceKey: svcKeyV2,
logfields.ServiceValue: svcValV2,
logfields.SlaveSlot: slot,
}).Debug("Upserted service entry")
slot++
}
if addRevNAT {
zeroValue := fe.NewValue().(ServiceValue)
zeroValue.SetRevNat(revNATID)
revNATKey := zeroValue.RevNatKey()
revNATValue := fe.RevNatValue()
if err := updateRevNatLocked(revNATKey, revNATValue); err != nil {
return fmt.Errorf("unable to update reverse NAT %+v with value %+v, %s", revNATKey, revNATValue, err)
}
defer func() {
if err != nil {
deleteRevNatLocked(revNATKey)
}
}()
}
err = updateMasterServiceV2(svcKeyV2, len(svc.backendsV2), nNonZeroWeights, revNATID)
if err != nil {
return fmt.Errorf("unable to update service %+v: %s", svcKeyV2, err)
}
err = updateWrrSeqV2(svcKeyV2, weights)
if err != nil {
return fmt.Errorf("unable to update service weights for %s with value %+v: %s", svcKeyV2.String(), weights, err)
}
for i := slot; i <= existingCount; i++ {
svcKeyV2.SetSlave(i)
if err := deleteServiceLockedV2(svcKeyV2); err != nil {
return fmt.Errorf("unable to delete service %+v: %s", svcKeyV2, err)
}
log.WithFields(logrus.Fields{
logfields.SlaveSlot: i,
logfields.ServiceKey: svcKeyV2,
}).Debug("Deleted service entry")
}
return nil
}
func removeBackendsLocked(removedBackendIDs []loadbalancer.BackendID, isIPv6 bool,
releaseBackendID func(loadbalancer.BackendID)) error {
var backendKey BackendKey
if isIPv6 {
backendKey = NewBackend6Key(0)
} else {
backendKey = NewBackend4Key(0)
}
for _, backendID := range removedBackendIDs {
backendKey.SetID(backendID)
if err := deleteBackendLocked(backendKey); err != nil {
return fmt.Errorf("Unable to delete backend with ID %d: %s", backendID, err)
}
releaseBackendID(backendID)
log.WithField(logfields.BackendID, backendID).Debug("Deleted backend")
}
return nil
}
// DeleteRevNATBPF deletes the revNAT entry from its corresponding BPF map
// (IPv4 or IPv6) with ID id. Returns an error if the deletion operation failed.
func DeleteRevNATBPF(id loadbalancer.ServiceID, isIPv6 bool) error {
var revNATK RevNatKey
if isIPv6 {
revNATK = NewRevNat6Key(uint16(id))
} else {
revNATK = NewRevNat4Key(uint16(id))
}
err := DeleteRevNat(revNATK)
return err
}
// DumpServiceMapsToUserspace dumps the contents of both the IPv6 and IPv4
// service / loadbalancer BPF maps, and converts them to a SVCMap and slice of
// LBSVC. Returns the errors that occurred while dumping the maps.
func DumpServiceMapsToUserspace() (loadbalancer.SVCMap, []*loadbalancer.LBSVC, []error) {
newSVCMap := loadbalancer.SVCMap{}
newSVCList := []*loadbalancer.LBSVC{}
errors := []error{}
idCache := map[string]loadbalancer.ServiceID{}
parseSVCEntries := func(key bpf.MapKey, value bpf.MapValue) {
svcKey := key.DeepCopyMapKey().(ServiceKey)
svcValue := value.DeepCopyMapValue().(ServiceValue)
// Skip master service
if svcKey.GetBackend() == 0 {
return
}
scopedLog := log.WithFields(logrus.Fields{
logfields.BPFMapKey: svcKey,
logfields.BPFMapValue: svcValue,
})
scopedLog.Debug("parsing service mapping")
fe, be := serviceKeynValue2FEnBE(svcKey, svcValue)
// Build a cache to map frontend IP to service ID. The master
// service key does not have the service ID set so the cache
// needs to be built based on backend key entries.
if k := svcValue.RevNatKey().GetKey(); k != uint16(0) {
idCache[fe.String()] = loadbalancer.ServiceID(k)
}
svc := newSVCMap.AddFEnBE(fe, be, svcKey.GetBackend())
newSVCList = append(newSVCList, svc)
}
mutex.RLock()
defer mutex.RUnlock()
if option.Config.EnableIPv4 {
err := Service4Map.DumpWithCallback(parseSVCEntries)
if err != nil {
errors = append(errors, err)
}
}
if option.Config.EnableIPv6 {
err := Service6Map.DumpWithCallback(parseSVCEntries)
if err != nil {
errors = append(errors, err)
}
}
// serviceKeynValue2FEnBE() cannot fill in the service ID reliably as
// not all BPF map entries contain the service ID. Do a pass over all
// parsed entries and fill in the service ID
for i := range newSVCList {
newSVCList[i].FE.ID = loadbalancer.ID(idCache[newSVCList[i].FE.String()])
}
// Do the same for the svcMap
for key, svc := range newSVCMap {
svc.FE.ID = loadbalancer.ID(idCache[svc.FE.String()])
newSVCMap[key] = svc
}
return newSVCMap, newSVCList, errors
}
// DumpServiceMapsToUserspaceV2 dumps the services in the same way as
// DumpServiceMapsToUserspace.
func DumpServiceMapsToUserspaceV2() (loadbalancer.SVCMap, []*loadbalancer.LBSVC, []error) {
newSVCMap := loadbalancer.SVCMap{}
newSVCList := []*loadbalancer.LBSVC{}
errors := []error{}
idCache := map[string]loadbalancer.ServiceID{}
backendValueMap := map[loadbalancer.BackendID]BackendValue{}
parseBackendEntries := func(key bpf.MapKey, value bpf.MapValue) {
backendKey := key.(BackendKey)
backendValue := value.DeepCopyMapValue().(BackendValue)
backendValueMap[backendKey.GetID()] = backendValue
}
parseSVCEntries := func(key bpf.MapKey, value bpf.MapValue) {
svcKey := key.DeepCopyMapKey().(ServiceKeyV2)
svcValue := value.DeepCopyMapValue().(ServiceValueV2)
// Skip master service
if svcKey.GetSlave() == 0 {
return
}
backendID := svcValue.GetBackendID()
scopedLog := log.WithFields(logrus.Fields{
logfields.BPFMapKey: svcKey,
logfields.BPFMapValue: svcValue,
})
backendValue, found := backendValueMap[backendID]
if !found {
errors = append(errors, fmt.Errorf("backend %d not found", backendID))
return
}
scopedLog.Debug("parsing service mapping v2")
fe, be := serviceKeynValuenBackendValue2FEnBE(svcKey, svcValue, backendID, backendValue)
// Build a cache to map frontend IP to service ID. The master
// service key does not have the service ID set so the cache
// needs to be built based on backend key entries.
if k := svcValue.RevNatKey().GetKey(); k != uint16(0) {
idCache[fe.String()] = loadbalancer.ServiceID(k)
}
svc := newSVCMap.AddFEnBE(fe, be, svcKey.GetSlave())
newSVCList = append(newSVCList, svc)
}
mutex.RLock()
defer mutex.RUnlock()
if option.Config.EnableIPv4 {
// TODO(brb) optimization: instead of dumping the backend map, we can
// pass its content to the function.
err := Backend4Map.DumpWithCallback(parseBackendEntries)
if err != nil {
errors = append(errors, err)
}
err = Service4MapV2.DumpWithCallback(parseSVCEntries)
if err != nil {
errors = append(errors, err)
}
}
if option.Config.EnableIPv6 {
// TODO(brb) same ^^ optimization applies here as well.
err := Backend6Map.DumpWithCallback(parseBackendEntries)
if err != nil {
errors = append(errors, err)
}
err = Service6MapV2.DumpWithCallback(parseSVCEntries)
if err != nil {
errors = append(errors, err)
}
}
// serviceKeynValue2FEnBE() cannot fill in the service ID reliably as
// not all BPF map entries contain the service ID. Do a pass over all
// parsed entries and fill in the service ID
for i := range newSVCList {
newSVCList[i].FE.ID = loadbalancer.ID(idCache[newSVCList[i].FE.String()])
}
// Do the same for the svcMap
for key, svc := range newSVCMap {
svc.FE.ID = loadbalancer.ID(idCache[svc.FE.String()])
newSVCMap[key] = svc
}
return newSVCMap, newSVCList, errors
}
// DumpBackendMapsToUserspace dumps the backend entries from the BPF maps.
func DumpBackendMapsToUserspace() (map[BackendAddrID]*loadbalancer.LBBackEnd, error) {
backendValueMap := map[loadbalancer.BackendID]BackendValue{}
lbBackends := map[BackendAddrID]*loadbalancer.LBBackEnd{}
parseBackendEntries := func(key bpf.MapKey, value bpf.MapValue) {
// No need to deep copy the key because we are using the ID which
// is a value.
backendKey := key.(BackendKey)
backendValue := value.DeepCopyMapValue().(BackendValue)
backendValueMap[backendKey.GetID()] = backendValue
}
if option.Config.EnableIPv4 {
err := Backend4Map.DumpWithCallback(parseBackendEntries)
if err != nil {
return nil, fmt.Errorf("Unable to dump lb4 backends map: %s", err)
}
}
if option.Config.EnableIPv6 {
err := Backend6Map.DumpWithCallback(parseBackendEntries)
if err != nil {
return nil, fmt.Errorf("Unable to dump lb6 backends map: %s", err)
}
}
for backendID, backendVal := range backendValueMap {
ip := backendVal.GetAddress()
port := backendVal.GetPort()
weight := uint16(0) // FIXME(brb): set weight when we support it
proto := loadbalancer.NONE
lbBackend := loadbalancer.NewLBBackEnd(backendID, proto, ip, port, weight)
lbBackends[backendVal.BackendAddrID()] = lbBackend
}
return lbBackends, nil
}
// DumpRevNATMapsToUserspace dumps the contents of both the IPv6 and IPv4
// revNAT BPF maps, and stores the contents of said dumps in a RevNATMap.
// Returns the errors that occurred while dumping the maps.
func DumpRevNATMapsToUserspace() (loadbalancer.RevNATMap, []error) {
newRevNATMap := loadbalancer.RevNATMap{}
errors := []error{}
parseRevNATEntries := func(key bpf.MapKey, value bpf.MapValue) {
revNatK := key.DeepCopyMapKey().(RevNatKey)
revNatV := value.DeepCopyMapValue().(RevNatValue)
scopedLog := log.WithFields(logrus.Fields{
logfields.BPFMapKey: revNatK,
logfields.BPFMapValue: revNatV,
})
scopedLog.Debug("parsing BPF revNAT mapping")
fe := revNatValue2L3n4AddrID(revNatK, revNatV)
newRevNATMap[loadbalancer.ServiceID(fe.ID)] = fe.L3n4Addr
}
mutex.RLock()
defer mutex.RUnlock()
if option.Config.EnableIPv4 {
if err := RevNat4Map.DumpWithCallback(parseRevNATEntries); err != nil {
err = fmt.Errorf("error dumping RevNat4Map: %s", err)
errors = append(errors, err)
}
}
if option.Config.EnableIPv6 {
if err := RevNat6Map.DumpWithCallback(parseRevNATEntries); err != nil {
err = fmt.Errorf("error dumping RevNat6Map: %s", err)
errors = append(errors, err)
}
}
return newRevNATMap, errors
}
// RestoreService restores a single service in the cache. This is required to
// guarantee consistent backend ordering, slave slot and backend by backend
// address ID lookups.
func RestoreService(svc loadbalancer.LBSVC, v2Exists bool) error {
return cache.restoreService(svc, v2Exists)
}
func lookupServiceV2(key ServiceKeyV2) (ServiceValueV2, error) {
val, err := key.Map().Lookup(key.ToNetwork())
if err != nil {
return nil, err
}
svc := val.(ServiceValueV2)
return svc.ToNetwork(), nil
}
func updateMasterServiceV2(fe ServiceKeyV2, nbackends int, nonZeroWeights uint16, revNATID int) error {
fe.SetSlave(0)
zeroValue := fe.NewValue().(ServiceValueV2)
zeroValue.SetCount(nbackends)
zeroValue.SetWeight(nonZeroWeights)
zeroValue.SetRevNat(revNATID)
return updateServiceEndpointV2(fe, zeroValue)
}
// updateWrrSeq updates bpf map with the generated wrr sequence.
func updateWrrSeqV2(fe ServiceKeyV2, weights []uint16) error {
sum := uint16(0)
for _, v := range weights {
sum += v
}
if sum == 0 {
return nil
}
svcRRSeq, err := generateWrrSeq(weights)
if err != nil {
return fmt.Errorf("unable to generate weighted round robin seq for %s with value %+v: %s", fe.String(), weights, err)
}
return updateServiceWeightsV2(fe, svcRRSeq)
}
// updateServiceWeightsV2 updates cilium_lb6_rr_seq_v2 or cilium_lb4_rr_seq_v2 bpf maps.
func updateServiceWeightsV2(key ServiceKeyV2, value *RRSeqValue) error {
if _, err := key.RRMap().OpenOrCreate(); err != nil {
return err
}
return key.RRMap().Update(key.ToNetwork(), value)
}
func deleteServiceLockedV2(key ServiceKeyV2) error {
err := key.Map().Delete(key.ToNetwork())
if err != nil {
return err
}
return lookupAndDeleteServiceWeightsV2(key)
}
// lookupAndDeleteServiceWeightsV2 deletes entry from cilium_lb6_rr_seq or cilium_lb4_rr_seq
func lookupAndDeleteServiceWeightsV2(key ServiceKeyV2) error {
_, err := key.RRMap().Lookup(key.ToNetwork())
if err != nil {
// Ignore if entry is not found.
return nil
}
return key.RRMap().Delete(key.ToNetwork())
}
func updateBackend(backend Backend) error {
if _, err := backend.Map().OpenOrCreate(); err != nil {
return err
}
return backend.Map().Update(backend.GetKey(), backend.GetValue().ToNetwork())
}
func deleteBackendLocked(key BackendKey) error {
return key.Map().Delete(key)
}
func updateServiceEndpointV2(key ServiceKeyV2, value ServiceValueV2) error {
log.WithFields(logrus.Fields{
logfields.ServiceKey: key,
logfields.ServiceValue: value,
logfields.SlaveSlot: key.GetSlave(),
}).Debug("Upserting service entry")
if key.GetSlave() != 0 && value.RevNatKey().GetKey() == 0 {
return fmt.Errorf("invalid RevNat ID (0) in the Service Value")
}
if _, err := key.Map().OpenOrCreate(); err != nil {
return err
}
return key.Map().Update(key.ToNetwork(), value.ToNetwork())
}
// AddBackendIDsToCache populates the given backend IDs to the lbmap local cache.
func AddBackendIDsToCache(backendIDs map[BackendAddrID]loadbalancer.BackendID) {
cache.addBackendIDs(backendIDs)
}
// DeleteServiceV2 deletes a service from the lbmap and deletes backends of it if
// they are not used by any other service.
//
//The given key has to be of the master service.
func DeleteServiceV2(svc loadbalancer.L3n4AddrID, releaseBackendID func(loadbalancer.BackendID)) error {
var (
backendKey BackendKey
svcKey ServiceKeyV2
)
mutex.Lock()
defer mutex.Unlock()
isIPv6 := svc.IsIPv6()
log.WithField(logfields.ServiceName, svc).Debug("Deleting service")
if isIPv6 {
svcKey = NewService6KeyV2(svc.IP, svc.Port, u8proto.ANY, 0)
} else {
svcKey = NewService4KeyV2(svc.IP, svc.Port, u8proto.ANY, 0)
}
backendsToRemove, backendsCount, err := cache.removeServiceV2(svcKey)
if err != nil {
return err
}
for slot := 0; slot <= backendsCount; slot++ {
svcKey.SetSlave(slot)
if err := svcKey.MapDelete(); err != nil {
return err
}
}
if isIPv6 {
backendKey = NewBackend6Key(0)
} else {
backendKey = NewBackend4Key(0)
}
for _, id := range backendsToRemove {
backendKey.SetID(id)
if err := deleteBackendLocked(backendKey); err != nil {
return fmt.Errorf("Unable to delete backend with ID %d: %s", id, err)
}
releaseBackendID(id)
log.WithField(logfields.BackendID, id).Debug("Deleted backend")
}
return nil
}
// DeleteServiceCache deletes the service cache.
func DeleteServiceCache(svc loadbalancer.L3n4AddrID) {
var svcKey ServiceKey
if !svc.IsIPv6() {
svcKey = NewService4Key(svc.IP, svc.Port, 0)
} else {
svcKey = NewService6Key(svc.IP, svc.Port, 0)
}
cache.delete(svcKey)
}
// DeleteOrphanServiceV2AndRevNAT removes the given service v2 without consulting
// or updating the service cache. Also, it removes the related revNAT entry if
// delRevNAT is set.
//
// This function is used only when restoring services during the launch of
// cilium-agent, and it is used to remove v2 services which have no corresponding
// legacy ones (thus, no cache entries exist).
//
// The function is a copy-paste of the daemon.svcDeleteBPFLegacy, and it will
// go away once we stop supporting the legacy svc.
func DeleteOrphanServiceV2AndRevNAT(svc loadbalancer.L3n4AddrID, delRevNAT bool) error {
var svcKey ServiceKeyV2
if !svc.IsIPv6() {
svcKey = NewService4KeyV2(svc.IP, svc.Port, u8proto.ANY, 0)
} else {
svcKey = NewService6KeyV2(svc.IP, svc.Port, u8proto.ANY, 0)
}
svcKey.SetSlave(0)
mutex.Lock()
defer mutex.Unlock()
// Get count of backends from master.
val, err := svcKey.Map().Lookup(svcKey.ToNetwork())
if err != nil {
return fmt.Errorf("key %s is not in lbmap v2", svcKey.ToNetwork())
}
vval := val.(ServiceValueV2)
numBackends := uint16(vval.GetCount())
// ServiceKeys are unique by their slave number, which corresponds to the number of backends. Delete each of these.
for i := numBackends; i > 0; i-- {
var slaveKey ServiceKeyV2
if !svc.IsIPv6() {
slaveKey = NewService4KeyV2(svc.IP, svc.Port, u8proto.ANY, i)
} else {
slaveKey = NewService6KeyV2(svc.IP, svc.Port, u8proto.ANY, i)
}
log.WithFields(logrus.Fields{
"idx.backend": i,
"key": slaveKey,
}).Debug("deleting backend # for slave ServiceKey v2")
if err := deleteServiceLockedV2(slaveKey); err != nil {
return fmt.Errorf("deleting service v2 failed for %s: %s", slaveKey, err)
}
}
log.WithField(logfields.ServiceID, svc.ID).Debug("done deleting service slaves, now deleting master service")
if err := deleteServiceLockedV2(svcKey); err != nil {
return fmt.Errorf("deleting service failed for %s: %s", svcKey, err)
}
if delRevNAT {
var revNATK RevNatKey
if svc.IsIPv6() {
revNATK = NewRevNat6Key(uint16(svc.ID))
} else {
revNATK = NewRevNat4Key(uint16(svc.ID))
}
// The revNAT entry might not exist, so just log the error instead of
// returning it.
if err := deleteRevNatLocked(revNATK); err != nil {
log.WithField(logfields.ServiceID, svc.ID).WithError(err).
Warning("Failed to delete reverse NAT entry")
}
}
return nil
}
func DeleteOrphanBackends(releaseBackendID func(loadbalancer.BackendID)) []error {
mutex.Lock()
defer mutex.Unlock()
var key BackendKey
errors := make([]error, 0)
toRemove := cache.removeBackendsWithRefCountZero()
for addrID, id := range toRemove {
log.WithField(logfields.BackendID, id).Debug("Removing orphan backend")
if addrID.IsIPv6() {
key = NewBackend6Key(id)
} else {
key = NewBackend4Key(id)
}
if err := deleteBackendLocked(key); err != nil {
errors = append(errors,
fmt.Errorf("Unable to remove backend from the BPF map %d: %s",
id, err))
}
releaseBackendID(id)
}
return errors
}
|
{
for y != 0 {
x, y = y, x%y
}
return x
}
|
selection.rs
|
use super::logs::Logs;
use super::pool::internal::Pool;
use crate::{
blockcfg::{BlockDate, Contents, ContentsBuilder, Ledger, LedgerParameters},
fragment::FragmentId,
};
use chain_core::property::Fragment as _;
use jormungandr_lib::interfaces::FragmentStatus;
pub enum SelectionOutput {
Commit { fragment_id: FragmentId },
RequestSmallerFee,
RequestSmallerSize,
Reject { reason: String },
}
pub trait FragmentSelectionAlgorithm {
fn select(
&mut self,
ledger: &Ledger,
ledger_params: &LedgerParameters,
block_date: BlockDate,
logs: &mut Logs,
pool: &mut Pool,
);
fn finalize(self) -> Contents;
}
#[derive(Debug)]
pub enum FragmentSelectionAlgorithmParams {
OldestFirst,
}
pub struct OldestFirst {
builder: ContentsBuilder,
current_total_size: u32,
}
impl OldestFirst {
pub fn new() -> Self {
OldestFirst {
builder: ContentsBuilder::new(),
current_total_size: 0,
}
}
}
impl FragmentSelectionAlgorithm for OldestFirst {
fn finalize(self) -> Contents {
self.builder.into()
}
fn select(
&mut self,
ledger: &Ledger,
ledger_params: &LedgerParameters,
block_date: BlockDate,
logs: &mut Logs,
pool: &mut Pool,
) {
let mut ledger_simulation = ledger.clone();
while let Some(fragment) = pool.remove_oldest() {
let id = fragment.id();
let fragment_raw = fragment.to_raw(); // TODO: replace everything to FragmentRaw in the node
let fragment_size = fragment_raw.size_bytes_plus_size() as u32;
let total_size = self.current_total_size + fragment_size;
if total_size <= ledger_params.block_content_max_size {
match ledger_simulation.apply_fragment(ledger_params, &fragment, block_date) {
Ok(ledger_new) => {
self.builder.push(fragment);
ledger_simulation = ledger_new;
}
Err(error) => {
use std::error::Error as _;
let error = if let Some(source) = error.source() {
format!("{}: {}", error, source)
} else {
error.to_string()
};
logs.modify(id, FragmentStatus::Rejected { reason: error })
}
}
self.current_total_size = total_size;
if total_size == ledger_params.block_content_max_size {
break;
}
}
}
}
}
| ||
RiskSubjection.py
|
# -*- coding: utf-8 -*-
import math
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
import GeoCommonBase as CoordinateBase
# 危险隶属度函数,障碍物周围隶属度函数随着方向和速度逐渐变小
# 改进:隶属度范围和速度大小有关
def riskSubjection(boatPnt,currPnt,boatVelo,boatOrien,evaluTime,impactFactor):
#考虑求解半径
evaluDistance=boatVelo*evaluTime
#评估点与船的距离
realDistance=CoordinateBase.Distance(boatPnt,currPnt)
if(realDistance<=evaluDistance):
# 评估点与船只在范围内,返回隶属函数
# currPnt相对于boatPnt的向量
relativeVector = CoordinateBase.Point(currPnt.x - boatPnt.x, currPnt.y - boatPnt.y)
# currPnt与船行驶方向的夹角
interAngle=CoordinateBase.IntersectionAngle(boatOrien,relativeVector) # 返回角度和向量的夹角
# 方向影响量
orienFactor = velocityDirectionFactor(interAngle,impactFactor)
return 1-realDistance/(evaluDistance*orienFactor)
else:
return 0
# 速度方向影响因子
def velocityDirectionFactor(interangle,impactFactor):
# 方向影响量
delta = math.cos(CoordinateBase.angle2radian(interangle))
orienFactor = 1 + impactFactor * (1 / (1 + math.e ** (-delta * 3.5))) ** 3.5 * (1 + delta) # Sigmoid函数
return orienFactor
#求解隶属度函数值,不考虑速度方向
def subordinateFunctionW
|
basePnt的向量
return 1-CoordinateBase.Distance(basePnt,currPnt)/semidiameter
else:
return 0
# 绕点旋转公式
def RotationWayPnt(rotIni_x,rotIni_y,edit_x,edit_y,rotaAngle):
#点绕点旋转公式,逆时针 旋转原点rotIni,待计算点edit,逆时针旋转角度rotaAngle
Rotaradian=rotaAngle*math.pi/180
newX=(edit_x-rotIni_x)*math.cos(Rotaradian)-(edit_y-rotIni_y)*math.sin(Rotaradian)+rotIni_x
newY=(edit_x-rotIni_x)*math.sin(Rotaradian)+(edit_y-rotIni_y)*math.cos(Rotaradian)+rotIni_y
return CoordinateBase.Point(newX,newY)
if __name__=="__main__":
boatLocation=CoordinateBase.Point(0,0)
currLocation=CoordinateBase.Point(10,10)
boatVelo=10
boatOrien=45
evaluTime=10
impactFactor=0.7
subjection=riskSubjection(boatLocation,currLocation,boatVelo,boatOrien,evaluTime,impactFactor)
print (subjection)
# 绘制等高线
fig = plt.figure(1) # 创建图表1
ax = Axes3D(fig)
X = np.arange(-150, 150, 2)
Y = np.arange(-150, 150, 2)
X, Y = np.meshgrid(X, Y)
zs = np.array([riskSubjection(boatLocation,CoordinateBase.Point(x, y),boatVelo,boatOrien,evaluTime,impactFactor) for x, y in zip(np.ravel(X), np.ravel(Y))])
Z = zs.reshape(X.shape)
surf = ax.plot_surface(X, Y, Z, rstride=1, cstride=1, cmap='hot')
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
contourfig = plt.figure(2) # 创建图表2,等高线图
coutax = contourfig.add_subplot(1, 1, 1)
#plt.text(15, -13, "V", fontsize=15, verticalalignment="bottom", horizontalalignment="left")
plt.contour(X, Y, Z,20)
# coutax.set_xlabel('X Label')
# coutax.set_ylabel('Y Label')
ratioPlt = plt.figure(3) # 创建图表2,等高线图
ax2 = ratioPlt.add_subplot(3, 3, 3)
x=0
while x<math.pi:
orienFactor = velocityDirectionFactor(x*180/math.pi,impactFactor)
ax2.scatter(x, orienFactor, c='r', marker='.') # 航路
x+=math.pi/100
plt.show()
|
ithoutOri(basePnt,currPnt,semidiameter):
#考虑求解半径 semidiameter
#判断是否在范围semidiameter内
if(CoordinateBase.Distance(basePnt,currPnt)<=semidiameter):
#在一定范围内,调用隶属函数,currPnt相对于
|
model23.py
|
"""
Exactly equals to Model21 (the best results so far), but differnt configurations.
Exactly based on Model10, but ReLU to GeLU
Based on Model8, add dropout and max, avg combine.
Based on Local model, add residual connections.
The extraction is doubled for depth.
Learning Point Cloud with Progressively Local representation.
[B,3,N] - {[B,G,K,d]-[B,G,d]} - {[B,G',K,d]-[B,G',d]} -cls
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import einsum
from einops import rearrange, repeat
from pointnet2_ops import pointnet2_utils
def square_distance(src, dst):
"""
Calculate Euclid distance between each two points.
src^T * dst = xn * xm + yn * ym + zn * zm;
sum(src^2, dim=-1) = xn*xn + yn*yn + zn*zn;
sum(dst^2, dim=-1) = xm*xm + ym*ym + zm*zm;
dist = (xn - xm)^2 + (yn - ym)^2 + (zn - zm)^2
= sum(src**2,dim=-1)+sum(dst**2,dim=-1)-2*src^T*dst
Input:
src: source points, [B, N, C]
dst: target points, [B, M, C]
Output:
dist: per-point square distance, [B, N, M]
"""
B, N, _ = src.shape
_, M, _ = dst.shape
dist = -2 * torch.matmul(src, dst.permute(0, 2, 1))
dist += torch.sum(src ** 2, -1).view(B, N, 1)
dist += torch.sum(dst ** 2, -1).view(B, 1, M)
return dist
def index_points(points, idx):
"""
Input:
points: input points data, [B, N, C]
idx: sample index data, [B, S]
Return:
new_points:, indexed points data, [B, S, C]
"""
device = points.device
B = points.shape[0]
view_shape = list(idx.shape)
view_shape[1:] = [1] * (len(view_shape) - 1)
repeat_shape = list(idx.shape)
repeat_shape[0] = 1
batch_indices = torch.arange(B, dtype=torch.long).to(device).view(view_shape).repeat(repeat_shape)
new_points = points[batch_indices, idx, :]
return new_points
def farthest_point_sample(xyz, npoint):
"""
Input:
xyz: pointcloud data, [B, N, 3]
npoint: number of samples
Return:
centroids: sampled pointcloud index, [B, npoint]
"""
device = xyz.device
B, N, C = xyz.shape
centroids = torch.zeros(B, npoint, dtype=torch.long).to(device)
distance = torch.ones(B, N).to(device) * 1e10
farthest = torch.randint(0, N, (B,), dtype=torch.long).to(device)
batch_indices = torch.arange(B, dtype=torch.long).to(device)
for i in range(npoint):
centroids[:, i] = farthest
centroid = xyz[batch_indices, farthest, :].view(B, 1, 3)
dist = torch.sum((xyz - centroid) ** 2, -1)
distance = torch.min(distance, dist)
farthest = torch.max(distance, -1)[1]
return centroids
def query_ball_point(radius, nsample, xyz, new_xyz):
"""
Input:
radius: local region radius
nsample: max sample number in local region
xyz: all points, [B, N, 3]
new_xyz: query points, [B, S, 3]
Return:
group_idx: grouped points index, [B, S, nsample]
"""
device = xyz.device
B, N, C = xyz.shape
_, S, _ = new_xyz.shape
group_idx = torch.arange(N, dtype=torch.long).to(device).view(1, 1, N).repeat([B, S, 1])
sqrdists = square_distance(new_xyz, xyz)
group_idx[sqrdists > radius ** 2] = N
group_idx = group_idx.sort(dim=-1)[0][:, :, :nsample]
group_first = group_idx[:, :, 0].view(B, S, 1).repeat([1, 1, nsample])
mask = group_idx == N
group_idx[mask] = group_first[mask]
return group_idx
def knn_point(nsample, xyz, new_xyz):
"""
Input:
nsample: max sample number in local region
xyz: all points, [B, N, C]
new_xyz: query points, [B, S, C]
Return:
group_idx: grouped points index, [B, S, nsample]
"""
sqrdists = square_distance(new_xyz, xyz)
_, group_idx = torch.topk(sqrdists, nsample, dim=-1, largest=False, sorted=False)
return group_idx
class LocalGrouper(nn.Module):
def __init__(self, groups, kneighbors, **kwargs):
"""
Give xyz[b,p,3] and fea[b,p,d], return new_xyz[b,g,3] and new_fea[b,g,k,2d]
:param groups: groups number
:param kneighbors: k-nerighbors
:param kwargs: others
"""
super(LocalGrouper, self).__init__()
self.groups = groups
self.kneighbors = kneighbors
def forward(self, xyz, points):
B, N, C = xyz.shape
S = self.groups
xyz = xyz.contiguous() # xyz [btach, points, xyz]
# fps_idx = farthest_point_sample(xyz, self.groups).long()
fps_idx = pointnet2_utils.furthest_point_sample(xyz, self.groups).long() # [B, npoint]
new_xyz = index_points(xyz, fps_idx)
new_points = index_points(points, fps_idx)
idx = knn_point(self.kneighbors, xyz, new_xyz)
# idx = query_ball_point(radius, nsample, xyz, new_xyz)
# grouped_xyz = index_points(xyz, idx) # [B, npoint, nsample, C]
grouped_points = index_points(points, idx)
grouped_points_norm = grouped_points - new_points.view(B, S, 1, -1)
new_points = torch.cat([grouped_points_norm,
new_points.view(B, S, 1, -1).repeat(1, 1, self.kneighbors, 1)]
, dim=-1)
return new_xyz, new_points
class FCBNReLU1D(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=1, bias=False):
super(FCBNReLU1D, self).__init__()
self.net = nn.Sequential(
nn.Conv1d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, bias=bias),
nn.BatchNorm1d(out_channels),
nn.GELU()
)
def forward(self, x):
return self.net(x)
class FCBNReLU1DRes(nn.Module):
def __init__(self, channel, kernel_size=1, bias=False):
super(FCBNReLU1DRes, self).__init__()
self.net = nn.Sequential(
nn.Conv1d(in_channels=channel, out_channels=channel, kernel_size=kernel_size, bias=bias),
nn.BatchNorm1d(channel),
nn.GELU(),
nn.Conv1d(in_channels=channel, out_channels=channel, kernel_size=kernel_size, bias=bias),
nn.BatchNorm1d(channel)
)
def forward(self, x):
return F.gelu(self.net(x)+x)
class Attention(nn.Module):
def __init__(self, dim, heads = 8, dim_head = 32, dropout = 0.):
super().__init__()
inner_dim = dim_head * heads
# project_out = not (heads == 1 and dim_head == dim)
self.heads = heads
self.scale = dim_head ** -0.5
self.attend = nn.Softmax(dim = -1)
self.to_qkv = nn.Linear(dim, inner_dim * 3, bias = False)
self.to_out = nn.Sequential(
nn.Conv1d(inner_dim, dim,1),
nn.BatchNorm1d(dim)
)
def forward(self, x):
x = x.permute(0,2,1)
b, n, _, h = *x.shape, self.heads
qkv = self.to_qkv(x).chunk(3, dim = -1)
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h = h), qkv)
dots = einsum('b h i d, b h j d -> b h i j', q, k) * self.scale
attn = self.attend(dots)
out = einsum('b h i j, b h j d -> b h i d', attn, v)
out = rearrange(out, 'b h n d -> b (h d) n')
return self.to_out(out)
class TransformerBlock(nn.Module):
def __init__(self, dim, heads=8, dim_head=32, **kwargs):
"""
[b batch, d dimension, k points]
:param dim: input data dimension
:param heads: heads number
:param dim_head: dimension in each head
:param kwargs:
"""
super(TransformerBlock, self).__init__()
self.attention = Attention(dim=dim, heads=heads, dim_head=dim_head)
self.ffn = nn.Sequential(
nn.Conv1d(dim, dim, 1, bias=False),
nn.BatchNorm1d(dim)
)
def forward(self, x):
"""
:input x: [b batch, d dimension, p points,]
:return: [b batch, d dimension, p points,]
"""
att = self.attention(x)
att = F.gelu(att+x)
out = self.ffn(att)
out = F.gelu(att+out)
return out
class PreExtraction(nn.Module):
def __init__(self, channels, blocks=1):
"""
input: [b,g,k,d]: output:[b,d,g]
:param channels:
:param blocks:
"""
super(PreExtraction, self).__init__()
operation = []
for _ in range(blocks):
operation.append(
FCBNReLU1DRes(channels)
)
self.operation = nn.Sequential(*operation)
self.transformer = TransformerBlock(channels, heads=4)
def forward(self, x):
b, n, s, d = x.size() # torch.Size([32, 512, 32, 6])
x = x.permute(0, 1, 3, 2)
x = x.reshape(-1, d, s)
batch_size, _, N = x.size()
x = self.operation(x) # [b, d, k]
x = self.transformer(x)
x = F.adaptive_max_pool1d(x, 1).view(batch_size, -1)
x = x.reshape(b, n, -1).permute(0, 2, 1)
return x
class PosExtraction(nn.Module):
def __init__(self, channels, blocks=1):
"""
input[b,d,g]; output[b,d,g]
:param channels:
:param blocks:
"""
super(PosExtraction, self).__init__()
operation = []
for _ in range(blocks):
operation.append(
FCBNReLU1DRes(channels)
)
self.operation = nn.Sequential(*operation)
self.transformer = TransformerBlock(channels, heads=4)
def forward(self, x): # [b, d, k]
return self.transformer(self.operation(x))
class Mo
|
n.Module):
def __init__(self, points=1024, class_num=40, embed_dim=64,
pre_blocks=[2,2,2,2], pos_blocks=[2,2,2,2], k_neighbors=[32,32,32,32],
reducers=[2,2,2,2], **kwargs):
super(Model23, self).__init__()
self.stages = len(pre_blocks)
self.class_num = class_num
self.points=points
self.embedding = nn.Sequential(
FCBNReLU1D(3, embed_dim),
FCBNReLU1D(embed_dim, embed_dim)
)
assert len(pre_blocks)==len(k_neighbors)==len(reducers)==len(pos_blocks), \
"Please check stage number consistent for pre_blocks, pos_blocks k_neighbors, reducers."
self.local_grouper_list = nn.ModuleList()
self.pre_blocks_list = nn.ModuleList()
self.pos_blocks_list = nn.ModuleList()
last_channel = embed_dim
anchor_points = self.points
for i in range(len(pre_blocks)):
out_channel = last_channel*2
pre_block_num=pre_blocks[i]
pos_block_num = pos_blocks[i]
kneighbor = k_neighbors[i]
reduce = reducers[i]
anchor_points = anchor_points//reduce
# append local_grouper_list
local_grouper = LocalGrouper(anchor_points, kneighbor) #[b,g,k,d]
self.local_grouper_list.append(local_grouper)
# append pre_block_list
pre_block_module = PreExtraction(out_channel, pre_block_num)
self.pre_blocks_list.append(pre_block_module)
# append pos_block_list
pos_block_module = PosExtraction(out_channel, pos_block_num)
self.pos_blocks_list.append(pos_block_module)
last_channel = out_channel
self.classifier = nn.Sequential(
nn.Linear(last_channel*2, 512),
nn.BatchNorm1d(512),
nn.GELU(),
nn.Dropout(0.5),
nn.Linear(512, 256),
nn.BatchNorm1d(256),
nn.GELU(),
nn.Dropout(0.5),
nn.Linear(256, self.class_num)
)
def forward(self, x):
xyz = x.permute(0, 2, 1)
batch_size, _, _ = x.size()
x = self.embedding(x) # B,D,N
for i in range(self.stages):
xyz, x = self.local_grouper_list[i](xyz, x.permute(0, 2, 1)) # [b,g,3] [b,g,k,d]
x = self.pre_blocks_list[i](x) # [b,d,g]
x = self.pos_blocks_list[i](x) # [b,d,g]
x_max = F.adaptive_max_pool1d(x,1).squeeze(dim=-1)
x_mean = x.mean(dim=-1,keepdim=False)
x = torch.cat([x_max, x_mean], dim=-1)
x = self.classifier(x)
return x
def model23A(num_classes=40, **kwargs) -> Model23: # 19201MiB
return Model23(points=1024, class_num=num_classes, embed_dim=128,
pre_blocks=[2,2], pos_blocks=[2,2], k_neighbors=[32,32],
reducers=[4,4], **kwargs)
def model23B(num_classes=40, **kwargs) -> Model23: # 19185MiB
return Model23(points=1024, class_num=num_classes, embed_dim=128,
pre_blocks=[1,1], pos_blocks=[1,1], k_neighbors=[32,32],
reducers=[4,4], **kwargs)
def model23C(num_classes=40, **kwargs) -> Model23: # 19537MiB
return Model23(points=1024, class_num=num_classes, embed_dim=128,
pre_blocks=[2,2,2], pos_blocks=[2,2,2], k_neighbors=[32,32,32],
reducers=[4,2,2], **kwargs)
def model23D(num_classes=40, **kwargs) -> Model23: # 31927MiB
return Model23(points=1024, class_num=num_classes, embed_dim=128,
pre_blocks=[2,2,2], pos_blocks=[2,2,2], k_neighbors=[16,32,32],
reducers=[2,2,2], **kwargs)
def model23E(num_classes=40, **kwargs) -> Model23: # 19215MiB # 93.476% on vis sever
return Model23(points=1024, class_num=num_classes, embed_dim=128,
pre_blocks=[3,3], pos_blocks=[3,3], k_neighbors=[32,32],
reducers=[4,4], **kwargs)
def model23F(num_classes=40, **kwargs) -> Model23: # 6437MiB
return Model23(points=1024, class_num=num_classes, embed_dim=128,
pre_blocks=[2,2], pos_blocks=[2,2], k_neighbors=[16,16],
reducers=[4,4], **kwargs)
def model23G(num_classes=40, **kwargs) -> Model23: # 19201MiB
return Model23(points=1024, class_num=num_classes, embed_dim=128,
pre_blocks=[2,2], pos_blocks=[2,2], k_neighbors=[24,24],
reducers=[4,4], **kwargs)
# don't train H, it is same to model21H
def model23H(num_classes=40, **kwargs) -> Model23:
return Model23(points=1024, class_num=num_classes, embed_dim=128,
pre_blocks=[4,4], pos_blocks=[4,4], k_neighbors=[32,32],
reducers=[4,4], **kwargs)
def model23I(num_classes=40, **kwargs) -> Model23: # 20283MiB
return Model23(points=1024, class_num=num_classes, embed_dim=256,
pre_blocks=[2,2], pos_blocks=[2,2], k_neighbors=[32,32],
reducers=[4,4], **kwargs)
# Extremely large model, 101 layers in total.
def model23J(num_classes=40, **kwargs) -> Model23: # 24999MiB
return Model23(points=1024, class_num=num_classes, embed_dim=128,
pre_blocks=[4,4,4,4], pos_blocks=[4,4,4,4], k_neighbors=[16,16,16,16],
reducers=[4,2,2,2], **kwargs)
# Also Eextremely large model, 101 layers in total.
def model23K(num_classes=40, **kwargs) -> Model23:
return Model23(points=1024, class_num=num_classes, embed_dim=128,
pre_blocks=[10,10], pos_blocks=[10,10], k_neighbors=[32,32],
reducers=[4,4], **kwargs)
if __name__ == '__main__':
data = torch.rand(2,128,10)
att = Attention(128)
out = att(data)
print(out.shape)
batch, groups,neighbors,dim=2,512,32,16
x = torch.rand(batch,groups,neighbors,dim)
pre_extractor = PreExtraction(dim,3)
out = pre_extractor(x)
print(out.shape)
x = torch.rand(batch, dim, groups)
pos_extractor = PosExtraction(dim, 3)
out = pos_extractor(x)
print(out.shape)
data = torch.rand(2, 3, 1024)
print("===> testing model ...")
model = Model23()
out = model(data)
print(out.shape)
print("===> testing modelE ...")
model = model23E()
out = model(data)
print(out.shape)
|
del23(n
|
all.js
|
'use strict';
var path = require('path');
var rootPath = path.normalize(__dirname + '/../../..');
console.log(rootPath, "is");
module.exports = {
root: rootPath,
port: process.env.PORT || 3000,
mongo: {
options: {
db: {
safe: true
}
|
}
}
};
|
|
simplify.go
|
package planar
import "github.com/go-spatial/geom"
// Simplifer is an interface for Simplifying geometries.
type Simplifer interface {
Simplify(linestring [][2]float64, isClosed bool) ([][2]float64, error)
}
func simplifyPolygon(simplifer Simplifer, plg [][][2]float64, isClosed bool) (ret [][][2]float64, err error)
|
// Simplify will simplify the provided geometry using the provided simplifer.
// If the simplifer is nil, no simplification will be attempted.
func Simplify(simplifer Simplifer, geometry geom.Geometry) (geom.Geometry, error) {
if simplifer == nil {
return geometry, nil
}
switch gg := geometry.(type) {
case geom.Collectioner:
geos := gg.Geometries()
coll := make([]geom.Geometry, len(geos))
for i := range geos {
geo, err := Simplify(simplifer, geos[i])
if err != nil {
return nil, err
}
coll[i] = geo
}
return geom.Collection(coll), nil
case geom.MultiPolygoner:
plys := gg.Polygons()
mply := make([][][][2]float64, len(plys))
for i := range plys {
ply, err := simplifyPolygon(simplifer, plys[i], true)
if err != nil {
return nil, err
}
mply[i] = ply
}
return geom.MultiPolygon(mply), nil
case geom.Polygoner:
ply, err := simplifyPolygon(simplifer, gg.LinearRings(), true)
if err != nil {
return nil, err
}
return geom.Polygon(ply), nil
case geom.MultiLineStringer:
mls, err := simplifyPolygon(simplifer, gg.LineStrings(), false)
if err != nil {
return nil, err
}
return geom.MultiLineString(mls), nil
case geom.LineStringer:
ls, err := simplifer.Simplify(gg.Verticies(), false)
if err != nil {
return nil, err
}
return geom.LineString(ls), nil
default: // Points, MutliPoints or anything else.
return geometry, nil
}
}
|
{
ret = make([][][2]float64, len(plg))
for i := range plg {
ls, err := simplifer.Simplify(plg[i], isClosed)
if err != nil {
return nil, err
}
ret[i] = ls
}
return ret, nil
}
|
proxy.go
|
/*
* Copyright (c) 2017, MegaEase
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package websocketserver
import (
"context"
"fmt"
"io"
"net"
"net/http"
"net/url"
"strings"
"time"
"github.com/gorilla/websocket"
"github.com/megaease/easegress/pkg/logger"
"github.com/megaease/easegress/pkg/supervisor"
)
const (
xForwardedFor = "X-Forwarded-For"
xForwardedHost = "X-Forwarded-Host"
xForwardedProto = "X-Forwarded-Proto"
)
// headersToSkip are gorilla library's request headers, our websocket proxy should not set.
var headersToSkip = map[string]struct{}{
"Upgrade": {},
"Connection": {},
"Sec-Websocket-Key": {},
"Sec-Websocket-Version": {},
"Sec-Websocket-Extensions": {},
"Sec-Websocket-Protocol": {},
}
var (
// defaultUpgrader specifies the parameters for upgrading an HTTP
// connection to a WebSocket connection.
defaultUpgrader = &websocket.Upgrader{
ReadBufferSize: 1024,
WriteBufferSize: 1024,
}
// defaultDialer is a dialer with all fields set to the default zero values.
defaultDialer = websocket.DefaultDialer
// defaultInterval is the default interval for polling websocket client and server which is
// 200ms right now.
defaultInterval = 200 * time.Millisecond
)
// Proxy is a handler that takes an incoming WebSocket
// connection and proxies it to the backend server.
type Proxy struct {
// server is the HTTPServer
server http.Server
superSpec *supervisor.Spec
// backendURL URL is the URL of target websocket server.
backendURL *url.URL
// upgrader specifies the parameters for upgrading an incoming HTTP
// connection to a WebSocket connection.
upgrader *websocket.Upgrader
// dialer contains options for connecting to the backend WebSocket server.
dialer *websocket.Dialer
// done is the channel for shutdowning this proxy.
done chan struct{}
}
// NewProxy returns a new Websocket proxy.
func newProxy(superSpec *supervisor.Spec) *Proxy {
proxy := &Proxy{
superSpec: superSpec,
done: make(chan struct{}),
}
go proxy.run()
return proxy
}
// buildRequestURL builds an URL with backend in spec and original HTTP request.
func (p *Proxy) buildRequestURL(r *http.Request) *url.URL {
u := *p.backendURL
u.Fragment = r.URL.Fragment
u.Path = r.URL.Path
u.RawQuery = r.URL.RawQuery
return &u
}
// passMsg passes websocket message from src to dst.
func (p *Proxy) passMsg(src, dst *websocket.Conn, errc chan error, stop chan struct{}) {
handle := func() bool {
msgType, msg, err := src.ReadMessage()
if err != nil {
m := websocket.FormatCloseMessage(websocket.CloseNormalClosure, fmt.Sprintf("%v", err))
if e, ok := err.(*websocket.CloseError); ok {
if e.Code != websocket.CloseNoStatusReceived {
m = websocket.FormatCloseMessage(e.Code, e.Text)
}
}
dst.WriteMessage(websocket.CloseMessage, m)
errc <- err
return false
}
err = dst.WriteMessage(msgType, msg)
if err != nil {
errc <- err
return false
}
return true
}
for {
select {
// this request handling is stopped due to some error or websocketserver shutdown.
case <-stop:
return
case <-time.After(defaultInterval):
if !handle() {
return
}
}
}
}
// run runs the websocket proxy.
func (p *Proxy) run() {
spec := p.superSpec.ObjectSpec().(*Spec)
backendURL, err := url.Parse(spec.Backend)
if err != nil {
logger.Errorf("BUG: %s get invalid websocketserver backend URL: %s",
p.superSpec.Name(), spec.Backend)
return
}
p.backendURL = backendURL
dialer := defaultDialer
if strings.HasPrefix(spec.Backend, "wss") {
tlsConfig, err := spec.wssTLSConfig()
if err != nil {
logger.Errorf("%s gen websocketserver backend tls failed: %v, spec :%#v",
p.superSpec.Name(), spec)
return
}
dialer.TLSClientConfig = tlsConfig
}
p.dialer = dialer
p.upgrader = defaultUpgrader
mux := http.NewServeMux()
mux.HandleFunc("/", p.handle)
addr := fmt.Sprintf(":%d", spec.Port)
p.server.Addr = addr
p.server.Handler = mux
if spec.HTTPS {
tlsConfig, err := spec.tlsConfig()
if err != nil {
logger.Errorf("%s gen websocketserver's httpserver tlsConfig: %#v, failed: %v",
p.superSpec.Name(), spec, err)
}
p.server.TLSConfig = tlsConfig
}
if p.server.TLSConfig != nil {
if err := p.server.ListenAndServeTLS("", ""); err != nil {
logger.Errorf("%s websocketserver ListenAndServeTLS failed: %v", p.superSpec.Name(), err)
}
} else {
if err := p.server.ListenAndServe(); err != nil {
logger.Errorf("%s websocketserver ListenAndServe failed: %v", p.superSpec.Name(), err)
}
}
}
// copyHeader copies headers from the incoming request to the dialer and forward them to
// the destination.
func (p *Proxy) copyHeader(req *http.Request) http.Header {
// Based on https://docs.oracle.com/en-us/iaas/Content/Balance/Reference/httpheaders.htm
// For load balancer, we add following key-value pairs to headers
// X-Forwarded-For: <original_client>, <proxy1>, <proxy2>
// X-Forwarded-Host: www.example.com:8080
// X-Forwarded-Proto: https
// New client connection is created using [Gorilla websocket library](https://github.com/gorilla/websocket), which takes care of some of the headers.
// Let's copy copy all headers from the incoming request, except the ones gorilla will set.
requestHeader := http.Header{}
for k, values := range req.Header {
if _, ok := headersToSkip[k]; ok {
continue
}
for _, v := range values {
requestHeader.Add(k, v)
}
}
xff := requestHeader.Get(xForwardedFor)
if clientIP, _, err := net.SplitHostPort(req.RemoteAddr); err == nil {
if xff == "" {
requestHeader.Set(xForwardedFor, clientIP)
} else {
requestHeader.Set(xForwardedFor, fmt.Sprintf("%s, %s", xff, clientIP))
}
}
xfh := requestHeader.Get(xForwardedHost)
if xfh == "" && req.Host != "" {
requestHeader.Set(xForwardedHost, req.Host)
}
requestHeader.Set(xForwardedProto, "http")
if req.TLS != nil {
requestHeader.Set(xForwardedProto, "https")
}
return requestHeader
}
// upgradeRspHeader passes only selected headers as return.
func (p *Proxy) upgradeRspHeader(resp *http.Response) http.Header {
upgradeHeader := http.Header{}
if hdr := resp.Header.Get("Sec-Websocket-Protocol"); hdr != "" {
upgradeHeader.Set("Sec-Websocket-Protocol", hdr)
}
if hdr := resp.Header.Get("Set-Cookie"); hdr != "" {
upgradeHeader.Set("Set-Cookie", hdr)
}
return upgradeHeader
}
// handle implements the http.Handler that proxies WebSocket connections.
func (p *Proxy) handle(rw http.ResponseWriter, req *http.Request) {
connBackend, resp, err := p.dialer.Dial(p.buildRequestURL(req).String(), p.copyHeader(req))
if err != nil {
logger.Errorf("%s dials %s failed: %v", p.superSpec.Name(), p.backendURL.String(), err)
if resp != nil {
// Handle WebSocket handshake failed scenario.
// Should send back a non-nil *http.Response for callers to handle
// `redirects`, `authentication` operations and so on.
if err := copyResponse(rw, resp); err != nil {
logger.Errorf("%s writes response failed at remote backend: %s handshake: %v",
p.superSpec.Name(), p.backendURL.String(), err)
}
} else {
http.Error(rw, http.StatusText(http.StatusServiceUnavailable), http.StatusServiceUnavailable)
}
return
}
defer connBackend.Close()
// Upgrade the incoming request to a WebSocket connection(Protocol Switching).
// Also pass the header from the Dial handshake.
connClient, err := p.upgrader.Upgrade(rw, req, p.upgradeRspHeader(resp))
if err != nil {
logger.Errorf("%s upgrades req: %#v failed: %s", p.superSpec.Name(), err)
return
}
defer connClient.Close()
errClient := make(chan error, 1)
errBackend := make(chan error, 1)
stop := make(chan struct{})
defer close(stop)
// pass msg from backend to client via WebSocket protocol.
go p.passMsg(connBackend, connClient, errBackend, stop)
// pass msg from client to backend via WebSocket protocol.
go p.passMsg(connClient, connBackend, errClient, stop)
var errMsg string
select {
case err = <-errBackend:
errMsg = "%s passes msg from backend: %s to client failed: %v"
case err = <-errClient:
errMsg = "%s passes msg client to backend: %s failed: %v"
case <-p.done:
logger.Debugf("shutdown websocketserver in request handling")
return
}
if e, ok := err.(*websocket.CloseError); !ok || e.Code == websocket.CloseAbnormalClosure {
logger.Errorf(errMsg, p.superSpec.Name(), p.backendURL.String(), err)
}
// other error type is expected, not need to log
}
// Close closes websocket proxy.
func (p *Proxy) Close() {
close(p.done)
ctx, cancelFunc := context.WithTimeout(context.Background(), 30*time.Second)
defer cancelFunc()
err := p.server.Shutdown(ctx)
if err != nil {
logger.Warnf("%s shutdowns http server failed: %v",
p.superSpec.Name(), err)
}
}
func
|
(rw http.ResponseWriter, resp *http.Response) error {
for k, vv := range resp.Header {
for _, v := range vv {
rw.Header().Add(k, v)
}
}
rw.WriteHeader(resp.StatusCode)
defer resp.Body.Close()
_, err := io.Copy(rw, resp.Body)
return err
}
|
copyResponse
|
machine.rs
|
use crate::instruction::{Instruction, State, Step};
pub struct
|
{
tape: Vec<char>,
state: State,
blank: char,
head: i32,
program: Vec<Instruction>,
}
impl Machine {
pub fn new(
initial_state: State,
blank: char,
head: i32,
program: Vec<Instruction>,
tape: Vec<char>,
) -> Self {
Self {
tape,
state: initial_state,
blank,
head,
program,
}
}
pub fn next(&mut self) -> bool {
for i in &self.program {
if i.state == self.state && i.read == *self.read() {
self.tape[self.head as usize] = i.write;
match i.step {
Step::L => {
self.head = self.head - 1;
if self.head == -1 {
self.tape.insert(0, self.blank);
self.head = 0;
}
}
Step::R => {
self.head = self.head + 1;
if self.head as usize >= self.tape.len() {
self.tape.push(self.blank);
}
}
Step::N => {}
}
self.state = i.next.clone();
return true;
}
}
false
}
pub fn print(&mut self) {
let to_string = |vc: &Vec<char>| vc.iter().collect::<String>();
let mut head: Vec<char> = vec![' '; self.tape.len()];
head.insert(self.head as usize, '^');
head.pop();
println!("({})\t{}", self.state, to_string(&self.tape));
println!("\t{}", to_string(&head));
}
fn read(&self) -> &char {
&self.tape[self.head as usize]
}
}
|
Machine
|
test_drop_duplicates.py
|
import numpy as np
import pytest
from pandas import (
NA,
Categorical,
Series,
)
import pandas._testing as tm
@pytest.mark.parametrize(
"keep, expected",
[
("first", Series([False, False, False, False, True, True, False])),
("last", Series([False, True, True, False, False, False, False])),
(False, Series([False, True, True, False, True, True, False])),
],
)
def test_drop_duplicates(any_numpy_dtype, keep, expected):
tc = Series([1, 0, 3, 5, 3, 0, 4], dtype=np.dtype(any_numpy_dtype))
if tc.dtype == "bool":
pytest.skip("tested separately in test_drop_duplicates_bool")
tm.assert_series_equal(tc.duplicated(keep=keep), expected)
tm.assert_series_equal(tc.drop_duplicates(keep=keep), tc[~expected])
sc = tc.copy()
return_value = sc.drop_duplicates(keep=keep, inplace=True)
assert return_value is None
tm.assert_series_equal(sc, tc[~expected])
@pytest.mark.parametrize(
"keep, expected",
[
("first", Series([False, False, True, True])),
("last", Series([True, True, False, False])),
(False, Series([True, True, True, True])),
],
)
def test_drop_duplicates_bool(keep, expected):
tc = Series([True, False, True, False])
tm.assert_series_equal(tc.duplicated(keep=keep), expected)
tm.assert_series_equal(tc.drop_duplicates(keep=keep), tc[~expected])
sc = tc.copy()
return_value = sc.drop_duplicates(keep=keep, inplace=True)
tm.assert_series_equal(sc, tc[~expected])
assert return_value is None
@pytest.mark.parametrize("values", [[], list(range(5))])
def test_drop_duplicates_no_duplicates(any_numpy_dtype, keep, values):
tc = Series(values, dtype=np.dtype(any_numpy_dtype))
expected = Series([False] * len(tc), dtype="bool")
if tc.dtype == "bool":
# 0 -> False and 1-> True
# any other value would be duplicated
tc = tc[:2]
expected = expected[:2]
tm.assert_series_equal(tc.duplicated(keep=keep), expected)
result_dropped = tc.drop_duplicates(keep=keep)
tm.assert_series_equal(result_dropped, tc)
# validate shallow copy
assert result_dropped is not tc
class TestSeriesDropDuplicates:
@pytest.fixture(
params=["int_", "uint", "float_", "unicode_", "timedelta64[h]", "datetime64[D]"]
)
def dtype(self, request):
return request.param
@pytest.fixture
def cat_series1(self, dtype, ordered):
# Test case 1
cat_array = np.array([1, 2, 3, 4, 5], dtype=np.dtype(dtype))
input1 = np.array([1, 2, 3, 3], dtype=np.dtype(dtype))
cat = Categorical(input1, categories=cat_array, ordered=ordered)
tc1 = Series(cat)
return tc1
def test_drop_duplicates_categorical_non_bool(self, cat_series1):
tc1 = cat_series1
expected = Series([False, False, False, True])
result = tc1.duplicated()
tm.assert_series_equal(result, expected)
result = tc1.drop_duplicates()
tm.assert_series_equal(result, tc1[~expected])
sc = tc1.copy()
return_value = sc.drop_duplicates(inplace=True)
assert return_value is None
tm.assert_series_equal(sc, tc1[~expected])
def test_drop_duplicates_categorical_non_bool_keeplast(self, cat_series1):
|
def test_drop_duplicates_categorical_non_bool_keepfalse(self, cat_series1):
tc1 = cat_series1
expected = Series([False, False, True, True])
result = tc1.duplicated(keep=False)
tm.assert_series_equal(result, expected)
result = tc1.drop_duplicates(keep=False)
tm.assert_series_equal(result, tc1[~expected])
sc = tc1.copy()
return_value = sc.drop_duplicates(keep=False, inplace=True)
assert return_value is None
tm.assert_series_equal(sc, tc1[~expected])
@pytest.fixture
def cat_series2(self, dtype, ordered):
# Test case 2; TODO: better name
cat_array = np.array([1, 2, 3, 4, 5], dtype=np.dtype(dtype))
input2 = np.array([1, 2, 3, 5, 3, 2, 4], dtype=np.dtype(dtype))
cat = Categorical(input2, categories=cat_array, ordered=ordered)
tc2 = Series(cat)
return tc2
def test_drop_duplicates_categorical_non_bool2(self, cat_series2):
# Test case 2; TODO: better name
tc2 = cat_series2
expected = Series([False, False, False, False, True, True, False])
result = tc2.duplicated()
tm.assert_series_equal(result, expected)
result = tc2.drop_duplicates()
tm.assert_series_equal(result, tc2[~expected])
sc = tc2.copy()
return_value = sc.drop_duplicates(inplace=True)
assert return_value is None
tm.assert_series_equal(sc, tc2[~expected])
def test_drop_duplicates_categorical_non_bool2_keeplast(self, cat_series2):
tc2 = cat_series2
expected = Series([False, True, True, False, False, False, False])
result = tc2.duplicated(keep="last")
tm.assert_series_equal(result, expected)
result = tc2.drop_duplicates(keep="last")
tm.assert_series_equal(result, tc2[~expected])
sc = tc2.copy()
return_value = sc.drop_duplicates(keep="last", inplace=True)
assert return_value is None
tm.assert_series_equal(sc, tc2[~expected])
def test_drop_duplicates_categorical_non_bool2_keepfalse(self, cat_series2):
tc2 = cat_series2
expected = Series([False, True, True, False, True, True, False])
result = tc2.duplicated(keep=False)
tm.assert_series_equal(result, expected)
result = tc2.drop_duplicates(keep=False)
tm.assert_series_equal(result, tc2[~expected])
sc = tc2.copy()
return_value = sc.drop_duplicates(keep=False, inplace=True)
assert return_value is None
tm.assert_series_equal(sc, tc2[~expected])
def test_drop_duplicates_categorical_bool(self, ordered):
tc = Series(
Categorical(
[True, False, True, False], categories=[True, False], ordered=ordered
)
)
expected = Series([False, False, True, True])
tm.assert_series_equal(tc.duplicated(), expected)
tm.assert_series_equal(tc.drop_duplicates(), tc[~expected])
sc = tc.copy()
return_value = sc.drop_duplicates(inplace=True)
assert return_value is None
tm.assert_series_equal(sc, tc[~expected])
expected = Series([True, True, False, False])
tm.assert_series_equal(tc.duplicated(keep="last"), expected)
tm.assert_series_equal(tc.drop_duplicates(keep="last"), tc[~expected])
sc = tc.copy()
return_value = sc.drop_duplicates(keep="last", inplace=True)
assert return_value is None
tm.assert_series_equal(sc, tc[~expected])
expected = Series([True, True, True, True])
tm.assert_series_equal(tc.duplicated(keep=False), expected)
tm.assert_series_equal(tc.drop_duplicates(keep=False), tc[~expected])
sc = tc.copy()
return_value = sc.drop_duplicates(keep=False, inplace=True)
assert return_value is None
tm.assert_series_equal(sc, tc[~expected])
def test_drop_duplicates_categorical_bool_na(self):
# GH#44351
ser = Series(
Categorical(
[True, False, True, False, NA], categories=[True, False], ordered=True
)
)
result = ser.drop_duplicates()
expected = Series(
Categorical([True, False, np.nan], categories=[True, False], ordered=True),
index=[0, 1, 4],
)
tm.assert_series_equal(result, expected)
def test_drop_duplicates_pos_args_deprecation():
# GH#41485
s = Series(["a", "b", "c", "b"])
msg = (
"In a future version of pandas all arguments of "
"Series.drop_duplicates will be keyword-only"
)
with tm.assert_produces_warning(FutureWarning, match=msg):
result = s.drop_duplicates("last")
expected = Series(["a", "c", "b"], index=[0, 2, 3])
tm.assert_series_equal(expected, result)
|
tc1 = cat_series1
expected = Series([False, False, True, False])
result = tc1.duplicated(keep="last")
tm.assert_series_equal(result, expected)
result = tc1.drop_duplicates(keep="last")
tm.assert_series_equal(result, tc1[~expected])
sc = tc1.copy()
return_value = sc.drop_duplicates(keep="last", inplace=True)
assert return_value is None
tm.assert_series_equal(sc, tc1[~expected])
|
qt_compat.py
|
""" A Qt API selector that can be used to switch between PyQt and PySide.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import os
from matplotlib import rcParams, verbose
# Available APIs.
QT_API_PYQT = 'PyQt4' # API is not set here; Python 2.x default is V 1
QT_API_PYQTv2 = 'PyQt4v2' # forced to Version 2 API
|
QT_API_PYQT5 = 'PyQt5' # use PyQt5 API; Version 2 with module shim
ETS = dict(pyqt=(QT_API_PYQTv2, 4), pyside=(QT_API_PYSIDE, 4),
pyqt5=(QT_API_PYQT5, 5))
# ETS is a dict of env variable to (QT_API, QT_MAJOR_VERSION)
# If the ETS QT_API environment variable is set, use it, but only
# if the varible if of the same major QT version. Note that
# ETS requires the version 2 of PyQt4, which is not the platform
# default for Python 2.x.
QT_API_ENV = os.environ.get('QT_API')
if rcParams['backend'] == 'Qt5Agg':
QT_RC_MAJOR_VERSION = 5
else:
QT_RC_MAJOR_VERSION = 4
QT_API = None
if (QT_API_ENV is not None):
try:
QT_ENV_MAJOR_VERSION = ETS[QT_API_ENV][1]
except KeyError:
raise RuntimeError(
('Unrecognized environment variable %r, valid values are:'
' %r, %r or %r' % (QT_API_ENV, 'pyqt', 'pyside', 'pyqt5')))
if QT_ENV_MAJOR_VERSION == QT_RC_MAJOR_VERSION:
# Only if backend and env qt major version are
# compatible use the env variable.
QT_API = ETS[QT_API_ENV][0]
if QT_API is None:
# No ETS environment or incompatible so use rcParams.
if rcParams['backend'] == 'Qt5Agg':
QT_API = rcParams['backend.qt5']
else:
QT_API = rcParams['backend.qt4']
# We will define an appropriate wrapper for the differing versions
# of file dialog.
_getSaveFileName = None
# Flag to check if sip could be imported
_sip_imported = False
# Now perform the imports.
if QT_API in (QT_API_PYQT, QT_API_PYQTv2, QT_API_PYQT5):
try:
import sip
_sip_imported = True
except ImportError:
# Try using PySide
QT_API = QT_API_PYSIDE
cond = ("Could not import sip; falling back on PySide\n"
"in place of PyQt4 or PyQt5.\n")
verbose.report(cond, 'helpful')
if _sip_imported:
if QT_API == QT_API_PYQTv2:
if QT_API_ENV == 'pyqt':
cond = ("Found 'QT_API=pyqt' environment variable. "
"Setting PyQt4 API accordingly.\n")
else:
cond = "PyQt API v2 specified."
try:
sip.setapi('QString', 2)
except:
res = 'QString API v2 specification failed. Defaulting to v1.'
verbose.report(cond + res, 'helpful')
# condition has now been reported, no need to repeat it:
cond = ""
try:
sip.setapi('QVariant', 2)
except:
res = 'QVariant API v2 specification failed. Defaulting to v1.'
verbose.report(cond + res, 'helpful')
if QT_API in [QT_API_PYQT, QT_API_PYQTv2]: # PyQt4 API
from PyQt4 import QtCore, QtGui
try:
if sip.getapi("QString") > 1:
# Use new getSaveFileNameAndFilter()
_getSaveFileName = QtGui.QFileDialog.getSaveFileNameAndFilter
else:
# Use old getSaveFileName()
def _getSaveFileName(*args, **kwargs):
return (QtGui.QFileDialog.getSaveFileName(*args, **kwargs),
None)
except (AttributeError, KeyError):
# call to getapi() can fail in older versions of sip
def _getSaveFileName(*args, **kwargs):
return QtGui.QFileDialog.getSaveFileName(*args, **kwargs), None
else: # PyQt5 API
from PyQt5 import QtCore, QtGui, QtWidgets
_getSaveFileName = QtWidgets.QFileDialog.getSaveFileName
# Alias PyQt-specific functions for PySide compatibility.
QtCore.Signal = QtCore.pyqtSignal
try:
QtCore.Slot = QtCore.pyqtSlot
except AttributeError:
# Not a perfect match but works in simple cases
QtCore.Slot = QtCore.pyqtSignature
QtCore.Property = QtCore.pyqtProperty
__version__ = QtCore.PYQT_VERSION_STR
else: # try importing pyside
try:
from PySide import QtCore, QtGui, __version__, __version_info__
except ImportError:
raise ImportError(
"Matplotlib qt-based backends require an external PyQt4, PyQt5,\n"
"or PySide package to be installed, but it was not found.")
if __version_info__ < (1, 0, 3):
raise ImportError(
"Matplotlib backend_qt4 and backend_qt4agg require PySide >=1.0.3")
_getSaveFileName = QtGui.QFileDialog.getSaveFileName
# Apply shim to Qt4 APIs to make them look like Qt5
if QT_API in (QT_API_PYQT, QT_API_PYQTv2, QT_API_PYSIDE):
'''Import all used QtGui objects into QtWidgets
Here I've opted to simple copy QtGui into QtWidgets as that
achieves the same result as copying over the objects, and will
continue to work if other objects are used.
'''
QtWidgets = QtGui
|
QT_API_PYSIDE = 'PySide' # only supports Version 2 API
|
number_publisher_node.py
|
#!/usr/bin/env python3
import rclpy
from rclpy.node import Node
from std_msgs.msg import Int64
class NumberPublisher(Node):
def __init__(self):
super().__init__('number_publisher')
self.publisher_ = self.create_publisher(Int64, 'numbers', 10)
timer_period = 0.5 # seconds
self.timer = self.create_timer(timer_period, self.timer_callback)
self.i = 0
def timer_callback(self):
msg = Int64()
msg.data = self.i
self.publisher_.publish(msg)
self.get_logger().info('Publishing: "%s"' % msg.data)
self.i += 1
def main(args=None):
|
if __name__ == '__main__':
main()
|
rclpy.init(args=args)
number_publisher = NumberPublisher()
rclpy.spin(number_publisher)
# Destroy the node explicitly
# (optional - otherwise it will be done automatically
# when the garbage collector destroys the node object)
number_publisher.destroy_node()
rclpy.shutdown()
|
get_minute_time_data.py
|
# coding=utf-8
from jotdx.parser.base import BaseParser
from jotdx.helper import get_datetime, get_volume, get_price
from collections import OrderedDict
|
import struct
import six
class GetMinuteTimeData(BaseParser):
def setParams(self, market, code):
if type(code) is six.text_type:
code = code.encode("utf-8")
pkg = bytearray.fromhex(u'0c 1b 08 00 01 01 0e 00 0e 00 1d 05')
pkg.extend(struct.pack("<H6sI", market, code, 0))
self.send_pkg = pkg
"""
b1cb74000c1b080001b61d05be03be03f0000000a208ce038d2c028302972f4124b11a00219821011183180014891c0009be0b4207b11000429c2041....
In [26]: get_price(b, 0)
Out[26]: (0, 1)
In [27]: get_price(b, 1)
Out[27]: (0, 2)
In [28]: get_price(b, 2)
Out[28]: (546, 4)
In [29]: get_price(b, 4)
Out[29]: (-206, 6)
In [30]: get_price(b, 6)
Out[30]: (2829, 8)
In [31]: get_price(b, 8)
Out[31]: (2, 9)
In [32]: get_price(b, 9)
Out[32]: (131, 11)
In [36]: get_price(b, 11)
Out[36]: (3031, 13)
In [37]: get_price(b, 13)
Out[37]: (-1, 14)
In [38]: get_price(b, 14)
Out[38]: (36, 15)
In [39]: get_price(b, 15)
Out[39]: (1713, 17)
In [40]: get_price(b, 17)
Out[40]: (0, 18)
"""
def parseResponse(self, body_buf):
pos = 0
(num, ) = struct.unpack("<H", body_buf[:2])
last_price = 0
pos += 4
prices = []
for i in range(num):
price_raw, pos = get_price(body_buf, pos)
reversed1, pos = get_price(body_buf, pos)
vol, pos = get_price(body_buf, pos)
last_price = last_price + price_raw
price = OrderedDict(
[
("price", float(last_price)/100),
("vol", vol)
]
)
prices.append(price)
return prices
| |
auth.js
|
/**
* Default State
*/
const defaultAuth = {
user: null,
token: localStorage.getItem('token'),
role: localStorage.getItem('role'),
approve: localStorage.getItem('approve'),
personalVerify: false,
message: '',
error: false,
newUser: true,
};
const defaultSignup = {
message: null,
personalVerify: false,
error: false,
};
/**
* Action Constansts
*/
const AUTHENTICATE_REQUEST_SUCCESS = 'auth/AUTHENTICATE_REQUEST_SUCCESS';
const AUTHENTICATE_REQUEST_FAILURE = 'auth/AUTHENTICATE_REQUEST_FAILURE';
const LOGOUT_REQUEST_SUCCESS = 'auth/LOGOUT_REQUEST_SUCCESS';
const SIGNUP_REQUEST_FAILURE = 'auth/SIGNUP_REQUEST_FAILURE';
const SIGNUP_REQUEST_SUCCESS = 'auth/SIGNUP_REQUEST_SUCCESS';
const UPDATE_PASSWORD_FAILURE = 'auth/UPDATE_PASSWORD_FAILURE';
const UPDATE_PASSWORD_SUCCESS = 'auth/UPDATE_PASSWORD_SUCCESS';
/**
* Actions
*/
export function authenticateSuccess(data) {
return { type: AUTHENTICATE_REQUEST_SUCCESS, data };
}
export function authenticateFailure(data) {
return { type: AUTHENTICATE_REQUEST_FAILURE, data };
}
export function logoutRequestSuccess() {
return { type: LOGOUT_REQUEST_SUCCESS };
}
export function signupFailure(data) {
return { type: SIGNUP_REQUEST_FAILURE, data };
}
|
export function signupSuccess(data) {
return { type: SIGNUP_REQUEST_SUCCESS, data };
}
export function updatePasswordFailure(data) {
return { type: UPDATE_PASSWORD_FAILURE, data };
}
export function updatePasswordSuccess(data) {
return { type: UPDATE_PASSWORD_SUCCESS, data };
}
export function authReducer(state = defaultAuth, action) {
switch (action.type) {
case AUTHENTICATE_REQUEST_SUCCESS:
return Object.assign({}, state, {
token: action.data.token,
role: action.data.role,
approve: action.data.approve,
personalVerify: action.data.personalVerify,
error: false,
newUser: action.data.newUser,
});
case AUTHENTICATE_REQUEST_FAILURE:
return Object.assign({}, state, {
message: action.data.message,
error: true,
});
default:
return state;
}
}
export function signupReducer(state = defaultSignup, action) {
switch (action.type) {
case SIGNUP_REQUEST_FAILURE:
return Object.assign({}, state, {
message: action.data.message,
error: true,
});
case SIGNUP_REQUEST_SUCCESS:
return Object.assign({}, state, {
message: action.data.message,
error: false,
});
default:
return state;
}
}
| |
Labels.js
|
Clazz.declarePackage ("J.shape");
Clazz.load (["J.shape.AtomShape", "java.util.Hashtable"], "J.shape.Labels", ["javajs.awt.Font", "JU.AU", "$.BS", "J.constant.EnumPalette", "J.modelset.LabelToken", "$.Text", "J.util.BSUtil", "$.C", "J.viewer.JC"], function () {
c$ = Clazz.decorateAsClass (function () {
this.strings = null;
this.formats = null;
this.bgcolixes = null;
this.fids = null;
this.offsets = null;
this.atomLabels = null;
this.text = null;
this.labelBoxes = null;
this.bsFontSet = null;
this.bsBgColixSet = null;
this.defaultOffset = 0;
this.defaultAlignment = 0;
this.defaultZPos = 0;
this.defaultFontId = 0;
this.defaultColix = 0;
this.defaultBgcolix = 0;
this.defaultPaletteID = 0;
this.defaultPointer = 0;
this.zeroFontId = 0;
this.defaultsOnlyForNone = true;
this.setDefaults = false;
this.isScaled = false;
this.scalePixelsPerMicron = 0;
this.pickedAtom = -1;
this.pickedOffset = 0;
this.pickedX = 0;
this.pickedY = 0;
Clazz.instantialize (this, arguments);
}, J.shape, "Labels", J.shape.AtomShape);
Clazz.prepareFields (c$, function () {
this.atomLabels = new java.util.Hashtable ();
});
$_M(c$, "initShape",
function () {
Clazz.superCall (this, J.shape.Labels, "initShape", []);
this.defaultFontId = this.zeroFontId = this.gdata.getFont3DFSS ("SansSerif", "Plain", 13).fid;
this.defaultColix = 0;
this.defaultBgcolix = 0;
this.defaultOffset = J.shape.Labels.zeroOffset;
this.defaultZPos = 0;
this.translucentAllowed = false;
});
$_V(c$, "setProperty",
function (propertyName, value, bsSelected) {
this.isActive = true;
if ("setDefaults" === propertyName) {
this.setDefaults = (value).booleanValue ();
return;
}if ("color" === propertyName) {
var pid = J.constant.EnumPalette.pidOf (value);
var colix = J.util.C.getColixO (value);
if (!this.setDefaults) for (var i = bsSelected.nextSetBit (0); i >= 0 && i < this.atomCount; i = bsSelected.nextSetBit (i + 1)) this.setLabelColix (i, colix, pid);
if (this.setDefaults || !this.defaultsOnlyForNone) {
this.defaultColix = colix;
this.defaultPaletteID = pid;
}return;
}if ("scalereference" === propertyName) {
if (this.strings == null) return;
var val = (value).floatValue ();
var scalePixelsPerMicron = (val == 0 ? 0 : 10000 / val);
for (var i = bsSelected.nextSetBit (0); i >= 0 && i < this.atomCount; i = bsSelected.nextSetBit (i + 1)) {
if (this.strings.length <= i) continue;
this.text = this.getLabel (i);
if (this.text == null) {
this.text = J.modelset.Text.newLabel (this.gdata, null, this.strings[i], 0, 0, 0, scalePixelsPerMicron, null);
this.putLabel (i, this.text);
} else {
this.text.setScalePixelsPerMicron (scalePixelsPerMicron);
}}
return;
}if ("label" === propertyName) {
this.setScaling ();
var strLabel = value;
var tokens = (strLabel == null || strLabel.length == 0 ? J.shape.Labels.nullToken : [null]);
for (var i = bsSelected.nextSetBit (0); i >= 0 && i < this.atomCount; i = bsSelected.nextSetBit (i + 1)) this.setLabel (tokens, strLabel, i);
return;
}if ("labels" === propertyName) {
this.setScaling ();
var labels = value;
for (var i = bsSelected.nextSetBit (0), pt = 0; i >= 0 && i < this.atomCount; i = bsSelected.nextSetBit (i + 1)) {
var strLabel = labels.get (pt++);
var tokens = (strLabel == null || strLabel.length == 0 ? J.shape.Labels.nullToken : [null]);
this.setLabel (tokens, strLabel, i);
}
return;
}if ("clearBoxes" === propertyName) {
this.labelBoxes = null;
return;
}if ("translucency" === propertyName || "bgtranslucency" === propertyName) {
return;
}if ("bgcolor" === propertyName) {
this.isActive = true;
if (this.bsBgColixSet == null) this.bsBgColixSet = new JU.BS ();
var bgcolix = J.util.C.getColixO (value);
if (!this.setDefaults) for (var i = bsSelected.nextSetBit (0); i >= 0 && i < this.atomCount; i = bsSelected.nextSetBit (i + 1)) this.setBgcolix (i, bgcolix);
if (this.setDefaults || !this.defaultsOnlyForNone) this.defaultBgcolix = bgcolix;
return;
}if (this.bsFontSet == null) this.bsFontSet = new JU.BS ();
if ("textLabels" === propertyName) {
this.setScaling ();
var labels = value;
for (var i = bsSelected.nextSetBit (0); i >= 0 && i < this.atomCount; i = bsSelected.nextSetBit (i + 1)) this.setTextLabel (i, labels.get (Integer.$valueOf (i)));
return;
}if ("fontsize" === propertyName) {
var fontsize = (value).intValue ();
if (fontsize < 0) {
this.fids = null;
return;
}var fid = this.gdata.getFontFid (fontsize);
if (!this.setDefaults) for (var i = bsSelected.nextSetBit (0); i >= 0 && i < this.atomCount; i = bsSelected.nextSetBit (i + 1)) this.setFont (i, fid);
if (this.setDefaults || !this.defaultsOnlyForNone) this.defaultFontId = fid;
return;
}if ("font" === propertyName) {
var fid = (value).fid;
if (!this.setDefaults) for (var i = bsSelected.nextSetBit (0); i >= 0 && i < this.atomCount; i = bsSelected.nextSetBit (i + 1)) this.setFont (i, fid);
if (this.setDefaults || !this.defaultsOnlyForNone) this.defaultFontId = fid;
return;
}if ("offset" === propertyName || "offsetexact" === propertyName) {
if (!(Clazz.instanceOf (value, Integer))) {
if (!this.setDefaults) for (var i = bsSelected.nextSetBit (0); i >= 0 && i < this.atomCount; i = bsSelected.nextSetBit (i + 1)) this.setPymolOffset (i, value);
return;
}var offset = (value).intValue ();
var isExact = (propertyName === "offsetexact");
if (offset == 0) offset = 32767;
else if (offset == J.shape.Labels.zeroOffset) offset = 0;
if (!this.setDefaults) for (var i = bsSelected.nextSetBit (0); i >= 0 && i < this.atomCount; i = bsSelected.nextSetBit (i + 1)) this.setOffsets (i, offset, isExact);
if (this.setDefaults || !this.defaultsOnlyForNone) this.defaultOffset = offset;
return;
}if ("align" === propertyName) {
var type = value;
var alignment = 1;
if (type.equalsIgnoreCase ("right")) alignment = 3;
else if (type.equalsIgnoreCase ("center")) alignment = 2;
for (var i = bsSelected.nextSetBit (0); i >= 0 && i < this.atomCount; i = bsSelected.nextSetBit (i + 1)) this.setAlignment (i, alignment);
if (this.setDefaults || !this.defaultsOnlyForNone) this.defaultAlignment = alignment;
return;
}if ("pointer" === propertyName) {
var pointer = (value).intValue ();
if (!this.setDefaults) for (var i = bsSelected.nextSetBit (0); i >= 0 && i < this.atomCount; i = bsSelected.nextSetBit (i + 1)) this.setPointer (i, pointer);
if (this.setDefaults || !this.defaultsOnlyForNone) this.defaultPointer = pointer;
return;
}if ("front" === propertyName) {
var TF = (value).booleanValue ();
if (!this.setDefaults) for (var i = bsSelected.nextSetBit (0); i >= 0 && i < this.atomCount; i = bsSelected.nextSetBit (i + 1)) this.setFront (i, TF);
if (this.setDefaults || !this.defaultsOnlyForNone) this.defaultZPos = (TF ? 32 : 0);
return;
}if ("group" === propertyName) {
var TF = (value).booleanValue ();
if (!this.setDefaults) for (var i = bsSelected.nextSetBit (0); i >= 0 && i < this.atomCount; i = bsSelected.nextSetBit (i + 1)) this.setGroup (i, TF);
if (this.setDefaults || !this.defaultsOnlyForNone) this.defaultZPos = (TF ? 16 : 0);
return;
}if ("display" === propertyName || "toggleLabel" === propertyName) {
var mode = ("toggleLabel" === propertyName ? 0 : (value).booleanValue () ? 1 : -1);
if (this.mads == null) this.mads = Clazz.newShortArray (this.atomCount, 0);
var strLabelPDB = null;
var tokensPDB = null;
var strLabelUNK = null;
var tokensUNK = null;
var strLabel;
var tokens;
for (var i = bsSelected.nextSetBit (0); i >= 0 && i < this.atomCount; i = bsSelected.nextSetBit (i + 1)) {
var atom = this.atoms[i];
if (this.formats == null || i >= this.formats.length) this.formats = JU.AU.ensureLengthS (this.formats, i + 1);
if (this.strings != null && this.strings.length > i && this.strings[i] != null) {
this.mads[i] = (mode == 0 && this.mads[i] < 0 || mode == 1 ? 1 : -1);
} else {
if (this.bsSizeSet == null) this.bsSizeSet = new JU.BS ();
this.strings = JU.AU.ensureLengthS (this.strings, i + 1);
if (atom.getGroup3 (false).equals ("UNK")) {
if (strLabelUNK == null) {
strLabelUNK = this.viewer.getStandardLabelFormat (1);
tokensUNK = J.modelset.LabelToken.compile (this.viewer, strLabelUNK, '\0', null);
}strLabel = strLabelUNK;
tokens = tokensUNK;
} else {
if (strLabelPDB == null) {
strLabelPDB = this.viewer.getStandardLabelFormat (2);
tokensPDB = J.modelset.LabelToken.compile (this.viewer, strLabelPDB, '\0', null);
}strLabel = strLabelPDB;
tokens = tokensPDB;
}this.strings[i] = J.modelset.LabelToken.formatLabelAtomArray (this.viewer, atom, tokens, '\0', null);
this.formats[i] = strLabel;
this.bsSizeSet.set (i);
if ((this.bsBgColixSet == null || !this.bsBgColixSet.get (i)) && this.defaultBgcolix != 0) this.setBgcolix (i, this.defaultBgcolix);
this.mads[i] = (mode >= 0 ? 1 : -1);
}atom.setShapeVisibility (this.myVisibilityFlag, this.strings != null && i < this.strings.length && this.strings[i] != null && this.mads[i] >= 0);
}
return;
}if (propertyName.startsWith ("label:")) {
this.setScaling ();
this.setLabel ( new Array (1), propertyName.substring (6), (value).intValue ());
return;
}if (propertyName === "deleteModelAtoms") {
this.labelBoxes = null;
var firstAtomDeleted = ((value)[2])[1];
var nAtomsDeleted = ((value)[2])[2];
this.fids = JU.AU.deleteElements (this.fids, firstAtomDeleted, nAtomsDeleted);
this.bgcolixes = JU.AU.deleteElements (this.bgcolixes, firstAtomDeleted, nAtomsDeleted);
this.offsets = JU.AU.deleteElements (this.offsets, firstAtomDeleted, nAtomsDeleted);
this.formats = JU.AU.deleteElements (this.formats, firstAtomDeleted, nAtomsDeleted);
this.strings = JU.AU.deleteElements (this.strings, firstAtomDeleted, nAtomsDeleted);
J.util.BSUtil.deleteBits (this.bsFontSet, bsSelected);
J.util.BSUtil.deleteBits (this.bsBgColixSet, bsSelected);
}this.setPropAS (propertyName, value, bsSelected);
}, "~S,~O,JU.BS");
$_M(c$, "setPymolOffset",
($fz = function (i, value) {
var text = this.getLabel (i);
if (text == null) {
var fid = (this.bsFontSet != null && this.bsFontSet.get (i) ? this.fids[i] : -1);
if (fid < 0) this.setFont (i, fid = this.defaultFontId);
var font = javajs.awt.Font.getFont3D (fid);
var colix = this.getColix2 (i, this.atoms[i], false);
text = J.modelset.Text.newLabel (this.gdata, font, this.strings[i], colix, this.getColix2 (i, this.atoms[i], true), 0, this.scalePixelsPerMicron, value);
this.setTextLabel (i, text);
} else {
text.pymolOffset = value;
}}, $fz.isPrivate = true, $fz), "~N,~A");
$_M(c$, "setScaling",
($fz = function () {
this.isActive = true;
if (this.bsSizeSet == null) this.bsSizeSet = new JU.BS ();
this.isScaled = this.viewer.getBoolean (603979845);
|
this.scalePixelsPerMicron = (this.isScaled ? this.viewer.getScalePixelsPerAngstrom (false) * 10000 : 0);
}, $fz.isPrivate = true, $fz));
$_M(c$, "setTextLabel",
($fz = function (i, t) {
if (t == null) return;
var label = t.getText ();
var atom = this.atoms[i];
this.addString (atom, i, label, label);
atom.setShapeVisibility (this.myVisibilityFlag, true);
if (t.colix >= 0) this.setLabelColix (i, t.colix, J.constant.EnumPalette.UNKNOWN.id);
this.setFont (i, t.font.fid);
this.putLabel (i, t);
}, $fz.isPrivate = true, $fz), "~N,J.modelset.Text");
$_M(c$, "setLabel",
($fz = function (temp, strLabel, i) {
var atom = this.atoms[i];
var tokens = temp[0];
if (tokens == null) tokens = temp[0] = J.modelset.LabelToken.compile (this.viewer, strLabel, '\0', null);
var label = (tokens == null ? null : J.modelset.LabelToken.formatLabelAtomArray (this.viewer, atom, tokens, '\0', null));
this.addString (atom, i, label, strLabel);
this.text = this.getLabel (i);
if (this.isScaled) {
this.text = J.modelset.Text.newLabel (this.gdata, null, label, 0, 0, 0, this.scalePixelsPerMicron, null);
this.putLabel (i, this.text);
} else if (this.text != null && label != null) {
this.text.setText (label);
}if (this.defaultOffset != J.shape.Labels.zeroOffset) this.setOffsets (i, this.defaultOffset, false);
if (this.defaultAlignment != 1) this.setAlignment (i, this.defaultAlignment);
if ((this.defaultZPos & 32) != 0) this.setFront (i, true);
else if ((this.defaultZPos & 16) != 0) this.setGroup (i, true);
if (this.defaultPointer != 0) this.setPointer (i, this.defaultPointer);
if (this.defaultColix != 0 || this.defaultPaletteID != 0) this.setLabelColix (i, this.defaultColix, this.defaultPaletteID);
if (this.defaultBgcolix != 0) this.setBgcolix (i, this.defaultBgcolix);
if (this.defaultFontId != this.zeroFontId) this.setFont (i, this.defaultFontId);
}, $fz.isPrivate = true, $fz), "~A,~S,~N");
$_M(c$, "addString",
($fz = function (atom, i, label, strLabel) {
atom.setShapeVisibility (this.myVisibilityFlag, label != null);
if (this.strings == null || i >= this.strings.length) this.strings = JU.AU.ensureLengthS (this.strings, i + 1);
if (this.formats == null || i >= this.formats.length) this.formats = JU.AU.ensureLengthS (this.formats, i + 1);
this.strings[i] = label;
this.formats[i] = (strLabel != null && strLabel.indexOf ("%{") >= 0 ? label : strLabel);
this.bsSizeSet.setBitTo (i, (strLabel != null));
}, $fz.isPrivate = true, $fz), "J.modelset.Atom,~N,~S,~S");
$_V(c$, "getProperty",
function (property, index) {
if (property.equals ("offsets")) return this.offsets;
if (property.equals ("label")) return (this.strings != null && index < this.strings.length && this.strings[index] != null ? this.strings[index] : "");
return null;
}, "~S,~N");
$_M(c$, "putLabel",
function (i, text) {
if (text == null) this.atomLabels.remove (Integer.$valueOf (i));
else this.atomLabels.put (Integer.$valueOf (i), text);
}, "~N,J.modelset.Text");
$_M(c$, "getLabel",
function (i) {
return this.atomLabels.get (Integer.$valueOf (i));
}, "~N");
$_M(c$, "putBox",
function (i, boxXY) {
if (this.labelBoxes == null) this.labelBoxes = new java.util.Hashtable ();
this.labelBoxes.put (Integer.$valueOf (i), boxXY);
}, "~N,~A");
$_M(c$, "getBox",
function (i) {
if (this.labelBoxes == null) return null;
return this.labelBoxes.get (Integer.$valueOf (i));
}, "~N");
$_M(c$, "setLabelColix",
($fz = function (i, colix, pid) {
this.setColixAndPalette (colix, pid, i);
if (this.colixes != null && ((this.text = this.getLabel (i)) != null)) this.text.setColix (this.colixes[i]);
}, $fz.isPrivate = true, $fz), "~N,~N,~N");
$_M(c$, "setBgcolix",
($fz = function (i, bgcolix) {
if (this.bgcolixes == null || i >= this.bgcolixes.length) {
if (bgcolix == 0) return;
this.bgcolixes = JU.AU.ensureLengthShort (this.bgcolixes, i + 1);
}this.bgcolixes[i] = bgcolix;
this.bsBgColixSet.setBitTo (i, bgcolix != 0);
this.text = this.getLabel (i);
if (this.text != null) this.text.setBgColix (bgcolix);
}, $fz.isPrivate = true, $fz), "~N,~N");
$_M(c$, "setOffsets",
($fz = function (i, offset, isExact) {
if (this.offsets == null || i >= this.offsets.length) {
if (offset == 0) return;
this.offsets = JU.AU.ensureLengthI (this.offsets, i + 1);
}this.offsets[i] = (this.offsets[i] & 255) | (offset << 8);
if (isExact) this.offsets[i] |= 128;
this.text = this.getLabel (i);
if (this.text != null) this.text.setOffset (offset);
}, $fz.isPrivate = true, $fz), "~N,~N,~B");
$_M(c$, "setAlignment",
($fz = function (i, alignment) {
if (this.offsets == null || i >= this.offsets.length) {
if (alignment == 1) return;
this.offsets = JU.AU.ensureLengthI (this.offsets, i + 1);
}this.offsets[i] = (this.offsets[i] & -13) | (alignment << 2);
this.text = this.getLabel (i);
if (this.text != null) this.text.setAlignment (alignment);
}, $fz.isPrivate = true, $fz), "~N,~N");
c$.getAlignment = $_M(c$, "getAlignment",
function (offsetFull) {
return (offsetFull & 12) >> 2;
}, "~N");
$_M(c$, "setPointer",
($fz = function (i, pointer) {
if (this.offsets == null || i >= this.offsets.length) {
if (pointer == 0) return;
this.offsets = JU.AU.ensureLengthI (this.offsets, i + 1);
}this.offsets[i] = (this.offsets[i] & -4) + pointer;
this.text = this.getLabel (i);
if (this.text != null) this.text.setPointer (pointer);
}, $fz.isPrivate = true, $fz), "~N,~N");
$_M(c$, "setFront",
($fz = function (i, TF) {
if (this.offsets == null || i >= this.offsets.length) {
if (!TF) return;
this.offsets = JU.AU.ensureLengthI (this.offsets, i + 1);
}this.offsets[i] = (this.offsets[i] & -49) + (TF ? 32 : 0);
}, $fz.isPrivate = true, $fz), "~N,~B");
$_M(c$, "setGroup",
($fz = function (i, TF) {
if (this.offsets == null || i >= this.offsets.length) {
if (!TF) return;
this.offsets = JU.AU.ensureLengthI (this.offsets, i + 1);
}this.offsets[i] = (this.offsets[i] & -49) + (TF ? 16 : 0);
}, $fz.isPrivate = true, $fz), "~N,~B");
$_M(c$, "setFont",
($fz = function (i, fid) {
if (this.fids == null || i >= this.fids.length) {
if (fid == this.zeroFontId) return;
this.fids = JU.AU.ensureLengthByte (this.fids, i + 1);
}this.fids[i] = fid;
this.bsFontSet.set (i);
this.text = this.getLabel (i);
if (this.text != null) {
this.text.setFontFromFid (fid);
}}, $fz.isPrivate = true, $fz), "~N,~N");
$_V(c$, "setModelClickability",
function () {
if (this.strings == null) return;
for (var i = this.strings.length; --i >= 0; ) {
var label = this.strings[i];
if (label != null && this.modelSet.atoms.length > i && !this.modelSet.isAtomHidden (i)) this.modelSet.atoms[i].setClickable (this.myVisibilityFlag);
}
});
$_V(c$, "getShapeState",
function () {
if (!this.isActive || this.bsSizeSet == null) return "";
return this.viewer.getShapeState (this);
});
$_V(c$, "checkObjectDragged",
function (prevX, prevY, x, y, dragAction, bsVisible) {
if (this.viewer.getPickingMode () != 2 || this.labelBoxes == null) return false;
if (prevX == -2147483648) {
var iAtom = this.findNearestLabel (x, y);
if (iAtom >= 0) {
this.pickedAtom = iAtom;
this.pickedX = x;
this.pickedY = y;
this.pickedOffset = (this.offsets == null || this.pickedAtom >= this.offsets.length ? 0 : this.offsets[this.pickedAtom]) >> 8;
return true;
}return false;
}if (prevX == 2147483647) {
this.pickedAtom = -1;
return false;
}if (this.pickedAtom < 0) return false;
this.move2D (this.pickedAtom, x, y);
return true;
}, "~N,~N,~N,~N,~N,JU.BS");
$_M(c$, "findNearestLabel",
($fz = function (x, y) {
if (this.labelBoxes == null) return -1;
var dmin = 3.4028235E38;
var imin = -1;
var zmin = 3.4028235E38;
for (var entry, $entry = this.labelBoxes.entrySet ().iterator (); $entry.hasNext () && ((entry = $entry.next ()) || true);) {
if (!this.atoms[entry.getKey ().intValue ()].isVisible (this.myVisibilityFlag)) continue;
var boxXY = entry.getValue ();
var dx = x - boxXY[0];
var dy = y - boxXY[1];
if (dx <= 0 || dy <= 0 || dx >= boxXY[2] || dy >= boxXY[3] || boxXY[4] > zmin) continue;
zmin = boxXY[4];
var d = Math.min (Math.abs (dx - boxXY[2] / 2), Math.abs (dy - boxXY[3] / 2));
if (d <= dmin) {
dmin = d;
imin = entry.getKey ().intValue ();
}}
return imin;
}, $fz.isPrivate = true, $fz), "~N,~N");
$_M(c$, "move2D",
($fz = function (pickedAtom, x, y) {
var xOffset = J.viewer.JC.getXOffset (this.pickedOffset);
var yOffset = -J.viewer.JC.getYOffset (this.pickedOffset);
xOffset += x - this.pickedX;
yOffset += this.pickedY - y;
var offset = J.viewer.JC.getOffset (xOffset, yOffset);
if (offset == 0) offset = 32767;
else if (offset == J.shape.Labels.zeroOffset) offset = 0;
this.setOffsets (pickedAtom, offset, true);
}, $fz.isPrivate = true, $fz), "~N,~N,~N");
$_M(c$, "getColix2",
function (i, atom, isBg) {
var colix;
if (isBg) {
colix = (this.bgcolixes == null || i >= this.bgcolixes.length) ? 0 : this.bgcolixes[i];
} else {
colix = (this.colixes == null || i >= this.colixes.length) ? 0 : this.colixes[i];
colix = J.util.C.getColixInherited (colix, atom.getColix ());
if (J.util.C.isColixTranslucent (colix)) colix = J.util.C.getColixTranslucent3 (colix, false, 0);
}return colix;
}, "~N,J.modelset.Atom,~B");
Clazz.defineStatics (c$,
"zeroOffset", 1028);
c$.nullToken = c$.prototype.nullToken = [null];
});
| |
gitInfo.test.ts
|
import * as getCommitInfo from '../git/getCommitAndBranch';
import * as git from '../git/git';
import { setGitInfo } from './gitInfo';
jest.mock('../git/getCommitAndBranch');
jest.mock('../git/git');
const getCommitAndBranch = <jest.MockedFunction<typeof getCommitInfo.default>>getCommitInfo.default;
const getBaselineBuilds = <jest.MockedFunction<typeof git.getBaselineBuilds>>git.getBaselineBuilds;
const getChangedFiles = <jest.MockedFunction<typeof git.getChangedFiles>>git.getChangedFiles;
const getParentCommits = <jest.MockedFunction<typeof git.getParentCommits>>git.getParentCommits;
const getSlug = <jest.MockedFunction<typeof git.getSlug>>git.getSlug;
const getVersion = <jest.MockedFunction<typeof git.getVersion>>git.getVersion;
const log = { info: jest.fn(), warn: jest.fn(), debug: jest.fn() };
const commitInfo = {
commit: '123asdf',
committedAt: 1640131292,
committerName: 'Gert Hengeveld',
committerEmail: '[email protected]',
branch: 'something',
slug: undefined,
isTravisPrBuild: false,
fromCI: false,
ciService: undefined,
};
beforeEach(() => {
getCommitAndBranch.mockResolvedValue(commitInfo);
getParentCommits.mockResolvedValue(['asd2344']);
getBaselineBuilds.mockResolvedValue([]);
getChangedFiles.mockResolvedValue([]);
getVersion.mockResolvedValue('Git v1.0.0');
getSlug.mockResolvedValue('user/repo');
});
describe('setGitInfo', () => {
it('sets the git info on context', async () => {
const ctx = { log, options: {} } as any;
await setGitInfo(ctx, {} as any);
expect(ctx.git).toMatchObject({
commit: '123asdf',
branch: 'something',
parentCommits: ['asd2344'],
version: 'Git v1.0.0',
slug: 'user/repo',
});
});
it('supports overriding the owner name in the slug', async () => {
const ctx = { log, options: { ownerName: 'org' } } as any;
await setGitInfo(ctx, {} as any);
expect(ctx.git).toMatchObject({ slug: 'org/repo' });
});
it('sets changedFiles', async () => {
getBaselineBuilds.mockResolvedValue([{ commit: '012qwes' } as any]);
getChangedFiles.mockResolvedValue(['styles/main.scss', 'lib/utils.js']);
const ctx = { log, options: { onlyChanged: true } } as any;
await setGitInfo(ctx, {} as any);
expect(ctx.git.changedFiles).toEqual(['styles/main.scss', 'lib/utils.js']);
});
it('drops changedFiles when matching --externals', async () => {
getBaselineBuilds.mockResolvedValue([{ commit: '012qwes' } as any]);
getChangedFiles.mockResolvedValue(['styles/main.scss', 'lib/utils.js']);
|
expect(ctx.git.changedFiles).toBeNull();
});
});
|
const ctx = { log, options: { onlyChanged: true, externals: ['**/*.scss'] } } as any;
await setGitInfo(ctx, {} as any);
|
shellcmd.py
|
import subprocess as sp
def run(cmd, output=None, stdout=None, status=None):
result = sp.run(
cmd,
stdout = sp.PIPE,
stderr = sp.PIPE,
universal_newlines = True # result byte sequence -> string
)
if status:
return result.returncode
elif result.returncode != 0:
|
if stdout:
return result.stdout
if output:
return result.stdout + result.stderr
|
print(result.stderr)
result.check_returncode()
|
where_test.py
|
#------------------------------------------------------
# S.D. Peckham
# March 25, 2009
# Speed tests for different uses of NumPy's "where"
#------------------------------------------------------
import time
import numpy
from numpy import *
def Speed_Test(n):
|
a = reshape(arange(n*n, dtype='Float64'), (n,n))
nw = 0
# SET_ZERO = False
SET_ZERO = True
#--------------------------
# Standard where use test
#--------------------------
start_time = time.time()
w = where((a % 2) == 1)
nw = w[0].size # (fastest ?)
## nw = size(w) # (This takes lots of time !! Reconfirmed: 7/26/10)
# nw = w.size(0) # (Doesn't work for tuples.)
# nw = w.__len__() # (gives tuple length)
if (SET_ZERO and (nw != 0)):
a[w] = 0.0
run_time = (time.time() - start_time)
print 'size(w) =', nw
print 'Standard Method run time =', run_time
print ' '
a = reshape(arange(n*n, dtype='Float64'), (n,n))
#--------------------
# Ravel method test
#--------------------
start_time = time.time()
w = where(ravel( (a % 2) == 1 ))[0]
nw = w.size # (These 2 methods take similar time.)
# nw = size(w)
if (SET_ZERO and (nw != 0)):
a = a.flatten() # (makes a copy)
a[w] = 0.0
a = a.reshape((n,n))
run_time = (time.time() - start_time)
print 'size(w) =', nw
print 'Ravel Method run time =', run_time
print ' '
a = reshape(arange(n*n, dtype='Float64'), (n,n))
#-------------------------------------------------
# Ravel method 2 test (50% faster than Standard)
#-------------------------------------------------
start_time = time.time()
w = where(ravel( (a % 2) == 1 ))[0]
nw = w.size # (These 2 methods take similar time.)
# nw = size(w)
if (SET_ZERO and (nw != 0)):
a = ravel(a) # (just changes "view", no copy made ?)
a[w] = 0.0
a = reshape(a, (n,n))
# a = a.reshape((n,n))
run_time = (time.time() - start_time)
print 'size(w) =', nw
print 'Ravel Method 2 run time =', run_time
print ' '
a = reshape(arange(n*n, dtype='Float64'), (n,n))
#---------------------
# Ravel method 3 test
#---------------------
start_time = time.time()
a = ravel(a) # (just changes "view", no copy made ?)
# w = where((a % 2) == 1 )
w = where(fmod(a,2) == 1 )
nw = w[0].size
# nw = size(w)
if (SET_ZERO and (nw != 0)):
a[w] = 0.0
a = reshape(a, (n,n))
# a = a.reshape((n,n))
run_time = (time.time() - start_time)
print 'size(w) =', nw
print 'Ravel Method 3 run time =', run_time
print ' '
a = reshape(arange(n*n, dtype='Float64'), (n,n))
#----------------------
# Flatten method test
#----------------------
start_time = time.time()
w = where((a.flatten() % 2) == 1)[0]
nw = w.size
# nw = size(w)
if (SET_ZERO and (nw != 0)):
a = a.flatten()
a[w] = 0.0
a = a.reshape((n,n))
run_time = (time.time() - start_time)
run_time = (time.time() - start_time)
print 'size(w) =', nw
print 'Flatten Method run time =', run_time
print ' '
|
|
Card.tsx
|
import React, { Component } from 'react';
import { computed } from 'mobx';
|
import { ICardConfig } from '../interfaces';
import { fillInFieldSets } from '../utilities/common';
import CardFieldSet from '../building-blocks/CardFieldSet';
interface IProps {
cardConfig: ICardConfig;
children?: any;
isLoading?: boolean;
model: any;
renderTopRight?: () => any;
}
@observer
class Card extends Component<IProps> {
@computed
private get fieldSets () {
return fillInFieldSets(this.props.cardConfig.fieldSets);
}
public render () {
const { cardConfig, renderTopRight, isLoading, model } = this.props;
return (
<Antd.Card title={cardConfig.title} extra={renderTopRight && renderTopRight()} loading={isLoading}>
{this.fieldSets.map((fieldSet, idx) => (
<CardFieldSet
cardConfig={cardConfig}
fieldSet={fieldSet}
idx={idx}
key={idx}
model={model}
/>
))}
{this.props.children}
</Antd.Card>
);
}
}
export default Card;
|
import { observer } from 'mobx-react';
import * as Antd from 'antd';
|
main.rs
|
//! - #![no-std]
//! 禁用标准库
#![no_std]
//! - #![no_main]
//! 不使用 main 函数作为程序入口
#![no_main]
//! 使用汇编
//! - #![feature(llvm_asm)]
//! 内嵌汇编
#![feature(llvm_asm)]
//! - #![feature(global_asm)]
//! 内嵌汇编文件
#![feature(global_asm)]
//! - #![feature(panic_info_message)]
//! panic! 时,获取其中的信息并打印
#![feature(panic_info_message)]
//! - #![feature(alloc_error_handler)]
//! 内存分配错误回调
#![feature(alloc_error_handler)]
// 汇编程序入口
global_asm!(include_str!("entry.asm"));
#[macro_use]
mod console;
mod panic;
mod sbi;
mod interrupt;
mod memory;
extern crate alloc;
/// Rust 入口函数
#[no_mangle]
pub extern "C" fn rust_main() -> ! {
// 初始化
interrupt::init();
memory::init();
let remap = memory::mapping::MemorySet::ne
|
).unwrap();
remap.activate();
println!("kernel remapped");
panic!()
}
|
w_kernel(
|
traffic_switch_test.go
|
package main
import (
"fmt"
"testing"
"github.com/stretchr/testify/require"
)
// expectActualTrafficWeights waits until that both stackset.status and the ingress have the expected actual traffic weight,
// and all stacks have their weights populated correctly
func expectActualTrafficWeights(t *testing.T, stacksetName string, weights map[string]float64) {
err := trafficWeightsUpdatedIngress(t, stacksetName, weightKindActual, weights, nil).await()
require.NoError(t, err)
err = trafficWeightsUpdatedStackset(t, stacksetName, weightKindActual, weights, nil).await()
require.NoError(t, err)
}
// expectStackTrafficWeights waits until the stack has the correct traffic weight values
func expectStackTrafficWeights(t *testing.T, stackName string, actualTrafficWeight, desiredTrafficWeight float64) {
err := stackStatusMatches(t, stackName, expectedStackStatus{
actualTrafficWeight: pfloat64(actualTrafficWeight),
desiredTrafficWeight: pfloat64(desiredTrafficWeight),
}).await()
require.NoError(t, err)
}
func TestTrafficSwitchStackset(t *testing.T) {
t.Parallel()
stacksetName := "switch-traffic-stackset"
firstVersion := "v1"
firstStack := fmt.Sprintf("%s-%s", stacksetName, firstVersion)
updatedVersion := "v2"
updatedStack := fmt.Sprintf("%s-%s", stacksetName, updatedVersion)
factory := NewTestStacksetSpecFactory(stacksetName).Ingress(nil)
spec := factory.Create(firstVersion)
err := createStackSet(stacksetName, 0, spec)
require.NoError(t, err)
_, err = waitForStack(t, stacksetName, firstVersion)
require.NoError(t, err)
spec = factory.Create(updatedVersion)
err = updateStackset(stacksetName, spec)
require.NoError(t, err)
_, err = waitForStack(t, stacksetName, updatedVersion)
require.NoError(t, err)
_, err = waitForIngress(t, stacksetName)
require.NoError(t, err)
initialWeights := map[string]float64{firstStack: 100}
expectActualTrafficWeights(t, stacksetName, initialWeights)
err = trafficWeightsUpdatedStackset(t, stacksetName, weightKindDesired, initialWeights, nil).await()
require.NoError(t, err)
err = ingressTrafficAuthoritative(t, stacksetName, false).await()
require.NoError(t, err)
expectStackTrafficWeights(t, firstStack, 100, 100)
expectStackTrafficWeights(t, updatedStack, 0, 0)
// Switch traffic 50/50
desiredWeights := map[string]float64{firstStack: 50, updatedStack: 50}
err = setDesiredTrafficWeightsStackset(stacksetName, desiredWeights)
require.NoError(t, err)
expectActualTrafficWeights(t, stacksetName, desiredWeights)
err = ingressTrafficAuthoritative(t, stacksetName, false).await()
require.NoError(t, err)
expectStackTrafficWeights(t, firstStack, 50, 50)
expectStackTrafficWeights(t, updatedStack, 50, 50)
// Switch traffic 0/100
newDesiredWeights := map[string]float64{updatedStack: 100}
err = setDesiredTrafficWeightsStackset(stacksetName, newDesiredWeights)
require.NoError(t, err)
expectActualTrafficWeights(t, stacksetName, newDesiredWeights)
err = ingressTrafficAuthoritative(t, stacksetName, false).await()
require.NoError(t, err)
expectStackTrafficWeights(t, firstStack, 0, 0)
expectStackTrafficWeights(t, updatedStack, 100, 100)
}
func TestTrafficSwitchStacksetExternalIngress(t *testing.T) {
t.Parallel()
stacksetName := "switch-traffic-stackset-external"
firstVersion := "v1"
firstStack := fmt.Sprintf("%s-%s", stacksetName, firstVersion)
updatedVersion := "v2"
updatedStack := fmt.Sprintf("%s-%s", stacksetName, updatedVersion)
factory := NewTestStacksetSpecFactory(stacksetName).ExternalIngress()
spec := factory.Create(firstVersion)
err := createStackSet(stacksetName, 0, spec)
require.NoError(t, err)
_, err = waitForStack(t, stacksetName, firstVersion)
require.NoError(t, err)
spec = factory.Create(updatedVersion)
err = updateStackset(stacksetName, spec)
require.NoError(t, err)
_, err = waitForStack(t, stacksetName, updatedVersion)
require.NoError(t, err)
initialWeights := map[string]float64{firstStack: 100}
err = trafficWeightsUpdatedStackset(t, stacksetName, weightKindActual, initialWeights, nil).await()
require.NoError(t, err)
expectStackTrafficWeights(t, firstStack, 100, 100)
expectStackTrafficWeights(t, updatedStack, 0, 0)
// Switch traffic 50/50
desiredWeights := map[string]float64{firstStack: 50, updatedStack: 50}
err = setDesiredTrafficWeightsStackset(stacksetName, desiredWeights)
require.NoError(t, err)
err = trafficWeightsUpdatedStackset(t, stacksetName, weightKindActual, desiredWeights, nil).await()
require.NoError(t, err)
expectStackTrafficWeights(t, firstStack, 50, 50)
expectStackTrafficWeights(t, updatedStack, 50, 50)
|
newDesiredWeights := map[string]float64{updatedStack: 100}
err = setDesiredTrafficWeightsStackset(stacksetName, newDesiredWeights)
require.NoError(t, err)
err = trafficWeightsUpdatedStackset(t, stacksetName, weightKindActual, newDesiredWeights, nil).await()
require.NoError(t, err)
expectStackTrafficWeights(t, firstStack, 0, 0)
expectStackTrafficWeights(t, updatedStack, 100, 100)
}
|
// Switch traffic 0/100
|
Lead.py
|
from system.core.model import Model
from flask import jsonify
class Lead(Model):
|
def __init__(self):
super(Lead, self).__init__()
def get_leads(self, name, early, late, page, sort, order):
query = 'SELECT * FROM leads'
data = {}
prev = False
if name != '':
query += ' WHERE CONCAT(first_name, " ", last_name) LIKE "%":name"%"'
prev = True
data['name'] = name
if early != '':
if prev:
query += ' AND'
else: query += ' WHERE'
query += ' registered_datetime > :start'
prev = True
data['start'] = early
if late != '':
if prev:
query += ' AND'
else: query += ' WHERE'
query += ' registered_datetime < :stop'
data['stop'] = late
if sort != '':
self.check_sort(sort)
query += ' ORDER BY ' + sort
if order != '':
if order == 'DESC':
query += ' DESC'
elif order == 'ASC':
query += ' ASC'
pages = self.db.query_db(query, data)
query += ' LIMIT :offset, 10'
data['offset'] = int(page)*10-10
results = self.db.query_db(query, data)
return jsonify({'people': results, 'pages': pages})
def check_sort(self, sort):
legal_vals = ['id','first_name','last_name','registered_datetime','email']
if not sort in legal_vals:
sort = 'registered_datetime'
|
|
txn_trace_test.go
|
package internal
import (
"strconv"
"testing"
"time"
"github.com/newrelic/go-agent/internal/cat"
)
func TestTxnTrace(t *testing.T) {
start := time.Date(2014, time.November, 28, 1, 1, 0, 0, time.UTC)
tr := &TxnData{}
tr.TxnTrace.Enabled = true
tr.TxnTrace.StackTraceThreshold = 1 * time.Hour
tr.TxnTrace.SegmentThreshold = 0
t1 := StartSegment(tr, start.Add(1*time.Second))
t2 := StartSegment(tr, start.Add(2*time.Second))
EndDatastoreSegment(EndDatastoreParams{
Tracer: tr,
Start: t2,
Now: start.Add(3 * time.Second),
Product: "MySQL",
Operation: "SELECT",
Collection: "my_table",
ParameterizedQuery: "INSERT INTO users (name, age) VALUES ($1, $2)",
QueryParameters: vetQueryParameters(map[string]interface{}{"zip": 1}),
Database: "my_db",
Host: "db-server-1",
PortPathOrID: "3306",
})
t3 := StartSegment(tr, start.Add(4*time.Second))
EndExternalSegment(tr, t3, start.Add(5*time.Second), parseURL("http://example.com/zip/zap?secret=shhh"), nil)
EndBasicSegment(tr, t1, start.Add(6*time.Second), "t1")
t4 := StartSegment(tr, start.Add(7*time.Second))
t5 := StartSegment(tr, start.Add(8*time.Second))
t6 := StartSegment(tr, start.Add(9*time.Second))
EndBasicSegment(tr, t6, start.Add(10*time.Second), "t6")
EndBasicSegment(tr, t5, start.Add(11*time.Second), "t5")
t7 := StartSegment(tr, start.Add(12*time.Second))
EndDatastoreSegment(EndDatastoreParams{
Tracer: tr,
Start: t7,
Now: start.Add(13 * time.Second),
Product: "MySQL",
Operation: "SELECT",
// no collection
})
t8 := StartSegment(tr, start.Add(14*time.Second))
EndExternalSegment(tr, t8, start.Add(15*time.Second), nil, nil)
EndBasicSegment(tr, t4, start.Add(16*time.Second), "t4")
acfg := CreateAttributeConfig(sampleAttributeConfigInput, true)
attr := NewAttributes(acfg)
attr.Agent.RequestMethod = "GET"
AddUserAttribute(attr, "zap", 123, DestAll)
ht := newHarvestTraces()
ht.regular.addTxnTrace(&HarvestTrace{
TxnEvent: TxnEvent{
Start: start,
Duration: 20 * time.Second,
FinalName: "WebTransaction/Go/hello",
CleanURL: "/url",
Attrs: attr,
},
Trace: tr.TxnTrace,
})
expect := `[
1417136460000000,
20000,
"WebTransaction/Go/hello",
"/url",
[
0,
{},
{},
[
0,
20000,
"ROOT",
{},
[
[
0,
20000,
"WebTransaction/Go/hello",
{},
[
[
1000,
6000,
"Custom/t1",
{},
[
[
2000,
3000,
"Datastore/statement/MySQL/my_table/SELECT",
{
"database_name":"my_db",
"host":"db-server-1",
"port_path_or_id":"3306",
"query":"INSERT INTO users (name, age) VALUES ($1, $2)",
"query_parameters":{
"zip":1
}
},
[]
],
[
4000,
5000,
"External/example.com/all",
{
"uri":"http://example.com/zip/zap"
},
[]
]
]
],
[
7000,
16000,
"Custom/t4",
{},
[
[
8000,
11000,
"Custom/t5",
{},
[
[
9000,
10000,
"Custom/t6",
{},
[]
]
]
],
[
12000,
13000,
"Datastore/operation/MySQL/SELECT",
{
"query":"'SELECT' on 'unknown' using 'MySQL'"
},
[]
],
[
14000,
15000,
"External/unknown/all",
{},
[]
]
]
]
]
]
]
],
{
"agentAttributes":{
"request.method":"GET"
},
"userAttributes":{
"zap":123
},
"intrinsics":{}
}
],
"",
null,
false,
null,
""
]`
expect = CompactJSONString(expect)
js, err := ht.slice()[0].MarshalJSON()
if nil != err {
t.Fatal(err)
}
if string(js) != expect {
t.Error(string(js), expect)
}
}
func TestTxnTraceNoSegmentsNoAttributes(t *testing.T)
|
func TestTxnTraceSlowestNodesSaved(t *testing.T) {
start := time.Date(2014, time.November, 28, 1, 1, 0, 0, time.UTC)
tr := &TxnData{}
tr.TxnTrace.Enabled = true
tr.TxnTrace.StackTraceThreshold = 1 * time.Hour
tr.TxnTrace.SegmentThreshold = 0
tr.TxnTrace.maxNodes = 5
durations := []int{5, 4, 6, 3, 7, 2, 8, 1, 9}
now := start
for _, d := range durations {
s := StartSegment(tr, now)
now = now.Add(time.Duration(d) * time.Second)
EndBasicSegment(tr, s, now, strconv.Itoa(d))
}
acfg := CreateAttributeConfig(sampleAttributeConfigInput, true)
attr := NewAttributes(acfg)
ht := newHarvestTraces()
ht.regular.addTxnTrace(&HarvestTrace{
TxnEvent: TxnEvent{
Start: start,
Duration: 123 * time.Second,
FinalName: "WebTransaction/Go/hello",
CleanURL: "/url",
Attrs: attr,
},
Trace: tr.TxnTrace,
})
expect := `[
1417136460000000,
123000,
"WebTransaction/Go/hello",
"/url",
[
0,
{},
{},
[
0,
123000,
"ROOT",
{},
[
[
0,
123000,
"WebTransaction/Go/hello",
{},
[
[
0,
5000,
"Custom/5",
{},
[]
],
[
9000,
15000,
"Custom/6",
{},
[]
],
[
18000,
25000,
"Custom/7",
{},
[]
],
[
27000,
35000,
"Custom/8",
{},
[]
],
[
36000,
45000,
"Custom/9",
{},
[]
]
]
]
]
],
{
"agentAttributes":{},
"userAttributes":{},
"intrinsics":{}
}
],
"",
null,
false,
null,
""
]`
expect = CompactJSONString(expect)
js, err := ht.slice()[0].MarshalJSON()
if nil != err {
t.Fatal(err)
}
if string(js) != expect {
t.Error(string(js), expect)
}
}
func TestTxnTraceSegmentThreshold(t *testing.T) {
start := time.Date(2014, time.November, 28, 1, 1, 0, 0, time.UTC)
tr := &TxnData{}
tr.TxnTrace.Enabled = true
tr.TxnTrace.StackTraceThreshold = 1 * time.Hour
tr.TxnTrace.SegmentThreshold = 7 * time.Second
tr.TxnTrace.maxNodes = 5
durations := []int{5, 4, 6, 3, 7, 2, 8, 1, 9}
now := start
for _, d := range durations {
s := StartSegment(tr, now)
now = now.Add(time.Duration(d) * time.Second)
EndBasicSegment(tr, s, now, strconv.Itoa(d))
}
acfg := CreateAttributeConfig(sampleAttributeConfigInput, true)
attr := NewAttributes(acfg)
ht := newHarvestTraces()
ht.regular.addTxnTrace(&HarvestTrace{
TxnEvent: TxnEvent{
Start: start,
Duration: 123 * time.Second,
FinalName: "WebTransaction/Go/hello",
CleanURL: "/url",
Attrs: attr,
},
Trace: tr.TxnTrace,
})
expect := `[
1417136460000000,
123000,
"WebTransaction/Go/hello",
"/url",
[
0,
{},
{},
[
0,
123000,
"ROOT",
{},
[
[
0,
123000,
"WebTransaction/Go/hello",
{},
[
[
18000,
25000,
"Custom/7",
{},
[]
],
[
27000,
35000,
"Custom/8",
{},
[]
],
[
36000,
45000,
"Custom/9",
{},
[]
]
]
]
]
],
{
"agentAttributes":{},
"userAttributes":{},
"intrinsics":{}
}
],
"",
null,
false,
null,
""
]`
expect = CompactJSONString(expect)
js, err := ht.slice()[0].MarshalJSON()
if nil != err {
t.Fatal(err)
}
if string(js) != expect {
t.Error(string(js), expect)
}
}
func TestEmptyHarvestTraces(t *testing.T) {
start := time.Date(2014, time.November, 28, 1, 1, 0, 0, time.UTC)
ht := newHarvestTraces()
js, err := ht.Data("12345", start)
if nil != err || nil != js {
t.Error(string(js), err)
}
}
func TestLongestTraceSaved(t *testing.T) {
start := time.Date(2014, time.November, 28, 1, 1, 0, 0, time.UTC)
tr := &TxnData{}
tr.TxnTrace.Enabled = true
acfg := CreateAttributeConfig(sampleAttributeConfigInput, true)
attr := NewAttributes(acfg)
ht := newHarvestTraces()
ht.Witness(HarvestTrace{
TxnEvent: TxnEvent{
Start: start,
Duration: 3 * time.Second,
FinalName: "WebTransaction/Go/3",
CleanURL: "/url/3",
Attrs: attr,
},
Trace: tr.TxnTrace,
})
ht.Witness(HarvestTrace{
TxnEvent: TxnEvent{
Start: start,
Duration: 5 * time.Second,
FinalName: "WebTransaction/Go/5",
CleanURL: "/url/5",
Attrs: attr,
},
Trace: tr.TxnTrace,
})
ht.Witness(HarvestTrace{
TxnEvent: TxnEvent{
Start: start,
Duration: 4 * time.Second,
FinalName: "WebTransaction/Go/4",
CleanURL: "/url/4",
Attrs: attr,
},
Trace: tr.TxnTrace,
})
expect := CompactJSONString(`
[
"12345",
[
[
1417136460000000,5000,"WebTransaction/Go/5","/url/5",
[
0,{},{},
[0,5000,"ROOT",{},
[[0,5000,"WebTransaction/Go/5",{},[]]]
],
{
"agentAttributes":{},
"userAttributes":{},
"intrinsics":{}
}
],
"",null,false,null,""
]
]
]`)
js, err := ht.Data("12345", start)
if nil != err || string(js) != expect {
t.Error(err, string(js), expect)
}
}
func TestTxnTraceStackTraceThreshold(t *testing.T) {
start := time.Date(2014, time.November, 28, 1, 1, 0, 0, time.UTC)
tr := &TxnData{}
tr.TxnTrace.Enabled = true
tr.TxnTrace.StackTraceThreshold = 2 * time.Second
tr.TxnTrace.SegmentThreshold = 0
tr.TxnTrace.maxNodes = 5
// below stack trace threshold
t1 := StartSegment(tr, start.Add(1*time.Second))
EndBasicSegment(tr, t1, start.Add(2*time.Second), "t1")
// not above stack trace threshold w/out params
t2 := StartSegment(tr, start.Add(2*time.Second))
EndDatastoreSegment(EndDatastoreParams{
Tracer: tr,
Start: t2,
Now: start.Add(4 * time.Second),
Product: "MySQL",
Collection: "my_table",
Operation: "SELECT",
})
// node above stack trace threshold w/ params
t3 := StartSegment(tr, start.Add(4*time.Second))
EndExternalSegment(tr, t3, start.Add(6*time.Second), parseURL("http://example.com/zip/zap?secret=shhh"), nil)
p := tr.TxnTrace.nodes[0].params
if nil != p {
t.Error(p)
}
p = tr.TxnTrace.nodes[1].params
if nil == p || nil == p.StackTrace || "" != p.CleanURL {
t.Error(p)
}
p = tr.TxnTrace.nodes[2].params
if nil == p || nil == p.StackTrace || "http://example.com/zip/zap" != p.CleanURL {
t.Error(p)
}
}
func TestTxnTraceSynthetics(t *testing.T) {
start := time.Date(2014, time.November, 28, 1, 1, 0, 0, time.UTC)
tr := &TxnData{}
tr.TxnTrace.Enabled = true
acfg := CreateAttributeConfig(sampleAttributeConfigInput, true)
attr := NewAttributes(acfg)
ht := newHarvestTraces()
ht.Witness(HarvestTrace{
TxnEvent: TxnEvent{
Start: start,
Duration: 3 * time.Second,
FinalName: "WebTransaction/Go/3",
CleanURL: "/url/3",
Attrs: attr,
CrossProcess: TxnCrossProcess{
Type: txnCrossProcessSynthetics,
Synthetics: &cat.SyntheticsHeader{
ResourceID: "resource",
},
},
},
Trace: tr.TxnTrace,
})
ht.Witness(HarvestTrace{
TxnEvent: TxnEvent{
Start: start,
Duration: 5 * time.Second,
FinalName: "WebTransaction/Go/5",
CleanURL: "/url/5",
Attrs: attr,
CrossProcess: TxnCrossProcess{
Type: txnCrossProcessSynthetics,
Synthetics: &cat.SyntheticsHeader{
ResourceID: "resource",
},
},
},
Trace: tr.TxnTrace,
})
ht.Witness(HarvestTrace{
TxnEvent: TxnEvent{
Start: start,
Duration: 4 * time.Second,
FinalName: "WebTransaction/Go/4",
CleanURL: "/url/4",
Attrs: attr,
CrossProcess: TxnCrossProcess{
Type: txnCrossProcessSynthetics,
Synthetics: &cat.SyntheticsHeader{
ResourceID: "resource",
},
},
},
Trace: tr.TxnTrace,
})
expect := CompactJSONString(`
[
"12345",
[
[
1417136460000000,3000,"WebTransaction/Go/3","/url/3",
[
0,{},{},
[0,3000,"ROOT",{},
[[0,3000,"WebTransaction/Go/3",{},[]]]
],
{
"agentAttributes":{},
"userAttributes":{},
"intrinsics":{
"synthetics_resource_id": "resource"
}
}
],
"",null,false,null,"resource"
],
[
1417136460000000,5000,"WebTransaction/Go/5","/url/5",
[
0,{},{},
[0,5000,"ROOT",{},
[[0,5000,"WebTransaction/Go/5",{},[]]]
],
{
"agentAttributes":{},
"userAttributes":{},
"intrinsics":{
"synthetics_resource_id": "resource"
}
}
],
"",null,false,null,"resource"
],
[
1417136460000000,4000,"WebTransaction/Go/4","/url/4",
[
0,{},{},
[0,4000,"ROOT",{},
[[0,4000,"WebTransaction/Go/4",{},[]]]
],
{
"agentAttributes":{},
"userAttributes":{},
"intrinsics":{
"synthetics_resource_id": "resource"
}
}
],
"",null,false,null,"resource"
]
]
]`)
js, err := ht.Data("12345", start)
if nil != err || string(js) != expect {
t.Errorf("err=%v; actual=%s; expect=%s", err, string(js), expect)
}
}
|
{
start := time.Date(2014, time.November, 28, 1, 1, 0, 0, time.UTC)
tr := &TxnData{}
tr.TxnTrace.Enabled = true
tr.TxnTrace.StackTraceThreshold = 1 * time.Hour
tr.TxnTrace.SegmentThreshold = 0
acfg := CreateAttributeConfig(sampleAttributeConfigInput, true)
attr := NewAttributes(acfg)
ht := newHarvestTraces()
ht.regular.addTxnTrace(&HarvestTrace{
TxnEvent: TxnEvent{
Start: start,
Duration: 20 * time.Second,
FinalName: "WebTransaction/Go/hello",
CleanURL: "/url",
Attrs: attr,
},
Trace: tr.TxnTrace,
})
expect := `[
1417136460000000,
20000,
"WebTransaction/Go/hello",
"/url",
[
0,
{},
{},
[
0,
20000,
"ROOT",
{},
[
[
0,
20000,
"WebTransaction/Go/hello",
{},
[]
]
]
],
{
"agentAttributes":{},
"userAttributes":{},
"intrinsics":{}
}
],
"",
null,
false,
null,
""
]`
expect = CompactJSONString(expect)
js, err := ht.slice()[0].MarshalJSON()
if nil != err {
t.Fatal(err)
}
if string(js) != expect {
t.Error(string(js), expect)
}
}
|
ty_tykind_usage.rs
|
// compile-flags: -Z unstable-options
#![feature(rustc_private)]
extern crate rustc;
use rustc::ty::{self, Ty, TyKind};
#[deny(rustc::usage_of_ty_tykind)]
fn main() {
let kind = TyKind::Bool; //~ ERROR usage of `ty::TyKind::<kind>`
match kind {
TyKind::Bool => (), //~ ERROR usage of `ty::TyKind::<kind>`
TyKind::Char => (), //~ ERROR usage of `ty::TyKind::<kind>`
TyKind::Int(..) => (), //~ ERROR usage of `ty::TyKind::<kind>`
TyKind::Uint(..) => (), //~ ERROR usage of `ty::TyKind::<kind>`
TyKind::Float(..) => (), //~ ERROR usage of `ty::TyKind::<kind>`
TyKind::Adt(..) => (), //~ ERROR usage of `ty::TyKind::<kind>`
TyKind::Foreign(..) => (), //~ ERROR usage of `ty::TyKind::<kind>`
TyKind::Str => (), //~ ERROR usage of `ty::TyKind::<kind>`
TyKind::Array(..) => (), //~ ERROR usage of `ty::TyKind::<kind>`
TyKind::Slice(..) => (), //~ ERROR usage of `ty::TyKind::<kind>`
TyKind::RawPtr(..) => (), //~ ERROR usage of `ty::TyKind::<kind>`
TyKind::Ref(..) => (), //~ ERROR usage of `ty::TyKind::<kind>`
TyKind::FnDef(..) => (), //~ ERROR usage of `ty::TyKind::<kind>`
TyKind::FnPtr(..) => (), //~ ERROR usage of `ty::TyKind::<kind>`
TyKind::Dynamic(..) => (), //~ ERROR usage of `ty::TyKind::<kind>`
TyKind::Closure(..) => (), //~ ERROR usage of `ty::TyKind::<kind>`
TyKind::Generator(..) => (), //~ ERROR usage of `ty::TyKind::<kind>`
TyKind::GeneratorWitness(..) => (), //~ ERROR usage of `ty::TyKind::<kind>`
TyKind::Never => (), //~ ERROR usage of `ty::TyKind::<kind>`
TyKind::Tuple(..) => (), //~ ERROR usage of `ty::TyKind::<kind>`
TyKind::Projection(..) => (), //~ ERROR usage of `ty::TyKind::<kind>`
TyKind::UnnormalizedProjection(..) => (), //~ ERROR usage of `ty::TyKind::<kind>`
TyKind::Opaque(..) => (), //~ ERROR usage of `ty::TyKind::<kind>`
TyKind::Param(..) => (), //~ ERROR usage of `ty::TyKind::<kind>`
TyKind::Bound(..) => (), //~ ERROR usage of `ty::TyKind::<kind>`
TyKind::Placeholder(..) => (), //~ ERROR usage of `ty::TyKind::<kind>`
TyKind::Infer(..) => (), //~ ERROR usage of `ty::TyKind::<kind>`
TyKind::Error => (), //~ ERROR usage of `ty::TyKind::<kind>`
}
if let ty::Int(int_ty) = kind {}
if let TyKind::Int(int_ty) = kind {} //~ ERROR usage of `ty::TyKind::<kind>`
fn
|
(ty_bad: TyKind<'_>, ty_good: Ty<'_>) {} //~ ERROR usage of `ty::TyKind`
}
|
ty_kind
|
rmt.rs
|
#[doc = r"Register block"]
#[repr(C)]
pub struct
|
{
#[doc = "0x00 - "]
pub ch0data: crate::Reg<ch0data::CH0DATA_SPEC>,
#[doc = "0x04 - "]
pub ch1data: crate::Reg<ch1data::CH1DATA_SPEC>,
#[doc = "0x08 - "]
pub ch2data: crate::Reg<ch2data::CH2DATA_SPEC>,
#[doc = "0x0c - "]
pub ch3data: crate::Reg<ch3data::CH3DATA_SPEC>,
#[doc = "0x10 - "]
pub ch4data: crate::Reg<ch4data::CH4DATA_SPEC>,
#[doc = "0x14 - "]
pub ch5data: crate::Reg<ch5data::CH5DATA_SPEC>,
#[doc = "0x18 - "]
pub ch6data: crate::Reg<ch6data::CH6DATA_SPEC>,
#[doc = "0x1c - "]
pub ch7data: crate::Reg<ch7data::CH7DATA_SPEC>,
#[doc = "0x20 - "]
pub ch0conf0: crate::Reg<ch0conf0::CH0CONF0_SPEC>,
#[doc = "0x24 - "]
pub ch0conf1: crate::Reg<ch0conf1::CH0CONF1_SPEC>,
#[doc = "0x28 - "]
pub ch1conf0: crate::Reg<ch1conf0::CH1CONF0_SPEC>,
#[doc = "0x2c - "]
pub ch1conf1: crate::Reg<ch1conf1::CH1CONF1_SPEC>,
#[doc = "0x30 - "]
pub ch2conf0: crate::Reg<ch2conf0::CH2CONF0_SPEC>,
#[doc = "0x34 - "]
pub ch2conf1: crate::Reg<ch2conf1::CH2CONF1_SPEC>,
#[doc = "0x38 - "]
pub ch3conf0: crate::Reg<ch3conf0::CH3CONF0_SPEC>,
#[doc = "0x3c - "]
pub ch3conf1: crate::Reg<ch3conf1::CH3CONF1_SPEC>,
#[doc = "0x40 - "]
pub ch4conf0: crate::Reg<ch4conf0::CH4CONF0_SPEC>,
#[doc = "0x44 - "]
pub ch4conf1: crate::Reg<ch4conf1::CH4CONF1_SPEC>,
#[doc = "0x48 - "]
pub ch5conf0: crate::Reg<ch5conf0::CH5CONF0_SPEC>,
#[doc = "0x4c - "]
pub ch5conf1: crate::Reg<ch5conf1::CH5CONF1_SPEC>,
#[doc = "0x50 - "]
pub ch6conf0: crate::Reg<ch6conf0::CH6CONF0_SPEC>,
#[doc = "0x54 - "]
pub ch6conf1: crate::Reg<ch6conf1::CH6CONF1_SPEC>,
#[doc = "0x58 - "]
pub ch7conf0: crate::Reg<ch7conf0::CH7CONF0_SPEC>,
#[doc = "0x5c - "]
pub ch7conf1: crate::Reg<ch7conf1::CH7CONF1_SPEC>,
#[doc = "0x60 - "]
pub ch0status: crate::Reg<ch0status::CH0STATUS_SPEC>,
#[doc = "0x64 - "]
pub ch1status: crate::Reg<ch1status::CH1STATUS_SPEC>,
#[doc = "0x68 - "]
pub ch2status: crate::Reg<ch2status::CH2STATUS_SPEC>,
#[doc = "0x6c - "]
pub ch3status: crate::Reg<ch3status::CH3STATUS_SPEC>,
#[doc = "0x70 - "]
pub ch4status: crate::Reg<ch4status::CH4STATUS_SPEC>,
#[doc = "0x74 - "]
pub ch5status: crate::Reg<ch5status::CH5STATUS_SPEC>,
#[doc = "0x78 - "]
pub ch6status: crate::Reg<ch6status::CH6STATUS_SPEC>,
#[doc = "0x7c - "]
pub ch7status: crate::Reg<ch7status::CH7STATUS_SPEC>,
#[doc = "0x80 - "]
pub ch0addr: crate::Reg<ch0addr::CH0ADDR_SPEC>,
#[doc = "0x84 - "]
pub ch1addr: crate::Reg<ch1addr::CH1ADDR_SPEC>,
#[doc = "0x88 - "]
pub ch2addr: crate::Reg<ch2addr::CH2ADDR_SPEC>,
#[doc = "0x8c - "]
pub ch3addr: crate::Reg<ch3addr::CH3ADDR_SPEC>,
#[doc = "0x90 - "]
pub ch4addr: crate::Reg<ch4addr::CH4ADDR_SPEC>,
#[doc = "0x94 - "]
pub ch5addr: crate::Reg<ch5addr::CH5ADDR_SPEC>,
#[doc = "0x98 - "]
pub ch6addr: crate::Reg<ch6addr::CH6ADDR_SPEC>,
#[doc = "0x9c - "]
pub ch7addr: crate::Reg<ch7addr::CH7ADDR_SPEC>,
#[doc = "0xa0 - "]
pub int_raw: crate::Reg<int_raw::INT_RAW_SPEC>,
#[doc = "0xa4 - "]
pub int_st: crate::Reg<int_st::INT_ST_SPEC>,
#[doc = "0xa8 - "]
pub int_ena: crate::Reg<int_ena::INT_ENA_SPEC>,
#[doc = "0xac - "]
pub int_clr: crate::Reg<int_clr::INT_CLR_SPEC>,
#[doc = "0xb0 - "]
pub ch0carrier_duty: crate::Reg<ch0carrier_duty::CH0CARRIER_DUTY_SPEC>,
#[doc = "0xb4 - "]
pub ch1carrier_duty: crate::Reg<ch1carrier_duty::CH1CARRIER_DUTY_SPEC>,
#[doc = "0xb8 - "]
pub ch2carrier_duty: crate::Reg<ch2carrier_duty::CH2CARRIER_DUTY_SPEC>,
#[doc = "0xbc - "]
pub ch3carrier_duty: crate::Reg<ch3carrier_duty::CH3CARRIER_DUTY_SPEC>,
#[doc = "0xc0 - "]
pub ch4carrier_duty: crate::Reg<ch4carrier_duty::CH4CARRIER_DUTY_SPEC>,
#[doc = "0xc4 - "]
pub ch5carrier_duty: crate::Reg<ch5carrier_duty::CH5CARRIER_DUTY_SPEC>,
#[doc = "0xc8 - "]
pub ch6carrier_duty: crate::Reg<ch6carrier_duty::CH6CARRIER_DUTY_SPEC>,
#[doc = "0xcc - "]
pub ch7carrier_duty: crate::Reg<ch7carrier_duty::CH7CARRIER_DUTY_SPEC>,
#[doc = "0xd0 - "]
pub ch0_tx_lim: crate::Reg<ch0_tx_lim::CH0_TX_LIM_SPEC>,
#[doc = "0xd4 - "]
pub ch1_tx_lim: crate::Reg<ch1_tx_lim::CH1_TX_LIM_SPEC>,
#[doc = "0xd8 - "]
pub ch2_tx_lim: crate::Reg<ch2_tx_lim::CH2_TX_LIM_SPEC>,
#[doc = "0xdc - "]
pub ch3_tx_lim: crate::Reg<ch3_tx_lim::CH3_TX_LIM_SPEC>,
#[doc = "0xe0 - "]
pub ch4_tx_lim: crate::Reg<ch4_tx_lim::CH4_TX_LIM_SPEC>,
#[doc = "0xe4 - "]
pub ch5_tx_lim: crate::Reg<ch5_tx_lim::CH5_TX_LIM_SPEC>,
#[doc = "0xe8 - "]
pub ch6_tx_lim: crate::Reg<ch6_tx_lim::CH6_TX_LIM_SPEC>,
#[doc = "0xec - "]
pub ch7_tx_lim: crate::Reg<ch7_tx_lim::CH7_TX_LIM_SPEC>,
#[doc = "0xf0 - "]
pub apb_conf: crate::Reg<apb_conf::APB_CONF_SPEC>,
_reserved61: [u8; 0x08],
#[doc = "0xfc - "]
pub date: crate::Reg<date::DATE_SPEC>,
}
#[doc = "CH0DATA register accessor: an alias for `Reg<CH0DATA_SPEC>`"]
pub type CH0DATA = crate::Reg<ch0data::CH0DATA_SPEC>;
#[doc = ""]
pub mod ch0data;
#[doc = "CH1DATA register accessor: an alias for `Reg<CH1DATA_SPEC>`"]
pub type CH1DATA = crate::Reg<ch1data::CH1DATA_SPEC>;
#[doc = ""]
pub mod ch1data;
#[doc = "CH2DATA register accessor: an alias for `Reg<CH2DATA_SPEC>`"]
pub type CH2DATA = crate::Reg<ch2data::CH2DATA_SPEC>;
#[doc = ""]
pub mod ch2data;
#[doc = "CH3DATA register accessor: an alias for `Reg<CH3DATA_SPEC>`"]
pub type CH3DATA = crate::Reg<ch3data::CH3DATA_SPEC>;
#[doc = ""]
pub mod ch3data;
#[doc = "CH4DATA register accessor: an alias for `Reg<CH4DATA_SPEC>`"]
pub type CH4DATA = crate::Reg<ch4data::CH4DATA_SPEC>;
#[doc = ""]
pub mod ch4data;
#[doc = "CH5DATA register accessor: an alias for `Reg<CH5DATA_SPEC>`"]
pub type CH5DATA = crate::Reg<ch5data::CH5DATA_SPEC>;
#[doc = ""]
pub mod ch5data;
#[doc = "CH6DATA register accessor: an alias for `Reg<CH6DATA_SPEC>`"]
pub type CH6DATA = crate::Reg<ch6data::CH6DATA_SPEC>;
#[doc = ""]
pub mod ch6data;
#[doc = "CH7DATA register accessor: an alias for `Reg<CH7DATA_SPEC>`"]
pub type CH7DATA = crate::Reg<ch7data::CH7DATA_SPEC>;
#[doc = ""]
pub mod ch7data;
#[doc = "CH0CONF0 register accessor: an alias for `Reg<CH0CONF0_SPEC>`"]
pub type CH0CONF0 = crate::Reg<ch0conf0::CH0CONF0_SPEC>;
#[doc = ""]
pub mod ch0conf0;
#[doc = "CH0CONF1 register accessor: an alias for `Reg<CH0CONF1_SPEC>`"]
pub type CH0CONF1 = crate::Reg<ch0conf1::CH0CONF1_SPEC>;
#[doc = ""]
pub mod ch0conf1;
#[doc = "CH1CONF0 register accessor: an alias for `Reg<CH1CONF0_SPEC>`"]
pub type CH1CONF0 = crate::Reg<ch1conf0::CH1CONF0_SPEC>;
#[doc = ""]
pub mod ch1conf0;
#[doc = "CH1CONF1 register accessor: an alias for `Reg<CH1CONF1_SPEC>`"]
pub type CH1CONF1 = crate::Reg<ch1conf1::CH1CONF1_SPEC>;
#[doc = ""]
pub mod ch1conf1;
#[doc = "CH2CONF0 register accessor: an alias for `Reg<CH2CONF0_SPEC>`"]
pub type CH2CONF0 = crate::Reg<ch2conf0::CH2CONF0_SPEC>;
#[doc = ""]
pub mod ch2conf0;
#[doc = "CH2CONF1 register accessor: an alias for `Reg<CH2CONF1_SPEC>`"]
pub type CH2CONF1 = crate::Reg<ch2conf1::CH2CONF1_SPEC>;
#[doc = ""]
pub mod ch2conf1;
#[doc = "CH3CONF0 register accessor: an alias for `Reg<CH3CONF0_SPEC>`"]
pub type CH3CONF0 = crate::Reg<ch3conf0::CH3CONF0_SPEC>;
#[doc = ""]
pub mod ch3conf0;
#[doc = "CH3CONF1 register accessor: an alias for `Reg<CH3CONF1_SPEC>`"]
pub type CH3CONF1 = crate::Reg<ch3conf1::CH3CONF1_SPEC>;
#[doc = ""]
pub mod ch3conf1;
#[doc = "CH4CONF0 register accessor: an alias for `Reg<CH4CONF0_SPEC>`"]
pub type CH4CONF0 = crate::Reg<ch4conf0::CH4CONF0_SPEC>;
#[doc = ""]
pub mod ch4conf0;
#[doc = "CH4CONF1 register accessor: an alias for `Reg<CH4CONF1_SPEC>`"]
pub type CH4CONF1 = crate::Reg<ch4conf1::CH4CONF1_SPEC>;
#[doc = ""]
pub mod ch4conf1;
#[doc = "CH5CONF0 register accessor: an alias for `Reg<CH5CONF0_SPEC>`"]
pub type CH5CONF0 = crate::Reg<ch5conf0::CH5CONF0_SPEC>;
#[doc = ""]
pub mod ch5conf0;
#[doc = "CH5CONF1 register accessor: an alias for `Reg<CH5CONF1_SPEC>`"]
pub type CH5CONF1 = crate::Reg<ch5conf1::CH5CONF1_SPEC>;
#[doc = ""]
pub mod ch5conf1;
#[doc = "CH6CONF0 register accessor: an alias for `Reg<CH6CONF0_SPEC>`"]
pub type CH6CONF0 = crate::Reg<ch6conf0::CH6CONF0_SPEC>;
#[doc = ""]
pub mod ch6conf0;
#[doc = "CH6CONF1 register accessor: an alias for `Reg<CH6CONF1_SPEC>`"]
pub type CH6CONF1 = crate::Reg<ch6conf1::CH6CONF1_SPEC>;
#[doc = ""]
pub mod ch6conf1;
#[doc = "CH7CONF0 register accessor: an alias for `Reg<CH7CONF0_SPEC>`"]
pub type CH7CONF0 = crate::Reg<ch7conf0::CH7CONF0_SPEC>;
#[doc = ""]
pub mod ch7conf0;
#[doc = "CH7CONF1 register accessor: an alias for `Reg<CH7CONF1_SPEC>`"]
pub type CH7CONF1 = crate::Reg<ch7conf1::CH7CONF1_SPEC>;
#[doc = ""]
pub mod ch7conf1;
#[doc = "CH0STATUS register accessor: an alias for `Reg<CH0STATUS_SPEC>`"]
pub type CH0STATUS = crate::Reg<ch0status::CH0STATUS_SPEC>;
#[doc = ""]
pub mod ch0status;
#[doc = "CH1STATUS register accessor: an alias for `Reg<CH1STATUS_SPEC>`"]
pub type CH1STATUS = crate::Reg<ch1status::CH1STATUS_SPEC>;
#[doc = ""]
pub mod ch1status;
#[doc = "CH2STATUS register accessor: an alias for `Reg<CH2STATUS_SPEC>`"]
pub type CH2STATUS = crate::Reg<ch2status::CH2STATUS_SPEC>;
#[doc = ""]
pub mod ch2status;
#[doc = "CH3STATUS register accessor: an alias for `Reg<CH3STATUS_SPEC>`"]
pub type CH3STATUS = crate::Reg<ch3status::CH3STATUS_SPEC>;
#[doc = ""]
pub mod ch3status;
#[doc = "CH4STATUS register accessor: an alias for `Reg<CH4STATUS_SPEC>`"]
pub type CH4STATUS = crate::Reg<ch4status::CH4STATUS_SPEC>;
#[doc = ""]
pub mod ch4status;
#[doc = "CH5STATUS register accessor: an alias for `Reg<CH5STATUS_SPEC>`"]
pub type CH5STATUS = crate::Reg<ch5status::CH5STATUS_SPEC>;
#[doc = ""]
pub mod ch5status;
#[doc = "CH6STATUS register accessor: an alias for `Reg<CH6STATUS_SPEC>`"]
pub type CH6STATUS = crate::Reg<ch6status::CH6STATUS_SPEC>;
#[doc = ""]
pub mod ch6status;
#[doc = "CH7STATUS register accessor: an alias for `Reg<CH7STATUS_SPEC>`"]
pub type CH7STATUS = crate::Reg<ch7status::CH7STATUS_SPEC>;
#[doc = ""]
pub mod ch7status;
#[doc = "CH0ADDR register accessor: an alias for `Reg<CH0ADDR_SPEC>`"]
pub type CH0ADDR = crate::Reg<ch0addr::CH0ADDR_SPEC>;
#[doc = ""]
pub mod ch0addr;
#[doc = "CH1ADDR register accessor: an alias for `Reg<CH1ADDR_SPEC>`"]
pub type CH1ADDR = crate::Reg<ch1addr::CH1ADDR_SPEC>;
#[doc = ""]
pub mod ch1addr;
#[doc = "CH2ADDR register accessor: an alias for `Reg<CH2ADDR_SPEC>`"]
pub type CH2ADDR = crate::Reg<ch2addr::CH2ADDR_SPEC>;
#[doc = ""]
pub mod ch2addr;
#[doc = "CH3ADDR register accessor: an alias for `Reg<CH3ADDR_SPEC>`"]
pub type CH3ADDR = crate::Reg<ch3addr::CH3ADDR_SPEC>;
#[doc = ""]
pub mod ch3addr;
#[doc = "CH4ADDR register accessor: an alias for `Reg<CH4ADDR_SPEC>`"]
pub type CH4ADDR = crate::Reg<ch4addr::CH4ADDR_SPEC>;
#[doc = ""]
pub mod ch4addr;
#[doc = "CH5ADDR register accessor: an alias for `Reg<CH5ADDR_SPEC>`"]
pub type CH5ADDR = crate::Reg<ch5addr::CH5ADDR_SPEC>;
#[doc = ""]
pub mod ch5addr;
#[doc = "CH6ADDR register accessor: an alias for `Reg<CH6ADDR_SPEC>`"]
pub type CH6ADDR = crate::Reg<ch6addr::CH6ADDR_SPEC>;
#[doc = ""]
pub mod ch6addr;
#[doc = "CH7ADDR register accessor: an alias for `Reg<CH7ADDR_SPEC>`"]
pub type CH7ADDR = crate::Reg<ch7addr::CH7ADDR_SPEC>;
#[doc = ""]
pub mod ch7addr;
#[doc = "INT_RAW register accessor: an alias for `Reg<INT_RAW_SPEC>`"]
pub type INT_RAW = crate::Reg<int_raw::INT_RAW_SPEC>;
#[doc = ""]
pub mod int_raw;
#[doc = "INT_ST register accessor: an alias for `Reg<INT_ST_SPEC>`"]
pub type INT_ST = crate::Reg<int_st::INT_ST_SPEC>;
#[doc = ""]
pub mod int_st;
#[doc = "INT_ENA register accessor: an alias for `Reg<INT_ENA_SPEC>`"]
pub type INT_ENA = crate::Reg<int_ena::INT_ENA_SPEC>;
#[doc = ""]
pub mod int_ena;
#[doc = "INT_CLR register accessor: an alias for `Reg<INT_CLR_SPEC>`"]
pub type INT_CLR = crate::Reg<int_clr::INT_CLR_SPEC>;
#[doc = ""]
pub mod int_clr;
#[doc = "CH0CARRIER_DUTY register accessor: an alias for `Reg<CH0CARRIER_DUTY_SPEC>`"]
pub type CH0CARRIER_DUTY = crate::Reg<ch0carrier_duty::CH0CARRIER_DUTY_SPEC>;
#[doc = ""]
pub mod ch0carrier_duty;
#[doc = "CH1CARRIER_DUTY register accessor: an alias for `Reg<CH1CARRIER_DUTY_SPEC>`"]
pub type CH1CARRIER_DUTY = crate::Reg<ch1carrier_duty::CH1CARRIER_DUTY_SPEC>;
#[doc = ""]
pub mod ch1carrier_duty;
#[doc = "CH2CARRIER_DUTY register accessor: an alias for `Reg<CH2CARRIER_DUTY_SPEC>`"]
pub type CH2CARRIER_DUTY = crate::Reg<ch2carrier_duty::CH2CARRIER_DUTY_SPEC>;
#[doc = ""]
pub mod ch2carrier_duty;
#[doc = "CH3CARRIER_DUTY register accessor: an alias for `Reg<CH3CARRIER_DUTY_SPEC>`"]
pub type CH3CARRIER_DUTY = crate::Reg<ch3carrier_duty::CH3CARRIER_DUTY_SPEC>;
#[doc = ""]
pub mod ch3carrier_duty;
#[doc = "CH4CARRIER_DUTY register accessor: an alias for `Reg<CH4CARRIER_DUTY_SPEC>`"]
pub type CH4CARRIER_DUTY = crate::Reg<ch4carrier_duty::CH4CARRIER_DUTY_SPEC>;
#[doc = ""]
pub mod ch4carrier_duty;
#[doc = "CH5CARRIER_DUTY register accessor: an alias for `Reg<CH5CARRIER_DUTY_SPEC>`"]
pub type CH5CARRIER_DUTY = crate::Reg<ch5carrier_duty::CH5CARRIER_DUTY_SPEC>;
#[doc = ""]
pub mod ch5carrier_duty;
#[doc = "CH6CARRIER_DUTY register accessor: an alias for `Reg<CH6CARRIER_DUTY_SPEC>`"]
pub type CH6CARRIER_DUTY = crate::Reg<ch6carrier_duty::CH6CARRIER_DUTY_SPEC>;
#[doc = ""]
pub mod ch6carrier_duty;
#[doc = "CH7CARRIER_DUTY register accessor: an alias for `Reg<CH7CARRIER_DUTY_SPEC>`"]
pub type CH7CARRIER_DUTY = crate::Reg<ch7carrier_duty::CH7CARRIER_DUTY_SPEC>;
#[doc = ""]
pub mod ch7carrier_duty;
#[doc = "CH0_TX_LIM register accessor: an alias for `Reg<CH0_TX_LIM_SPEC>`"]
pub type CH0_TX_LIM = crate::Reg<ch0_tx_lim::CH0_TX_LIM_SPEC>;
#[doc = ""]
pub mod ch0_tx_lim;
#[doc = "CH1_TX_LIM register accessor: an alias for `Reg<CH1_TX_LIM_SPEC>`"]
pub type CH1_TX_LIM = crate::Reg<ch1_tx_lim::CH1_TX_LIM_SPEC>;
#[doc = ""]
pub mod ch1_tx_lim;
#[doc = "CH2_TX_LIM register accessor: an alias for `Reg<CH2_TX_LIM_SPEC>`"]
pub type CH2_TX_LIM = crate::Reg<ch2_tx_lim::CH2_TX_LIM_SPEC>;
#[doc = ""]
pub mod ch2_tx_lim;
#[doc = "CH3_TX_LIM register accessor: an alias for `Reg<CH3_TX_LIM_SPEC>`"]
pub type CH3_TX_LIM = crate::Reg<ch3_tx_lim::CH3_TX_LIM_SPEC>;
#[doc = ""]
pub mod ch3_tx_lim;
#[doc = "CH4_TX_LIM register accessor: an alias for `Reg<CH4_TX_LIM_SPEC>`"]
pub type CH4_TX_LIM = crate::Reg<ch4_tx_lim::CH4_TX_LIM_SPEC>;
#[doc = ""]
pub mod ch4_tx_lim;
#[doc = "CH5_TX_LIM register accessor: an alias for `Reg<CH5_TX_LIM_SPEC>`"]
pub type CH5_TX_LIM = crate::Reg<ch5_tx_lim::CH5_TX_LIM_SPEC>;
#[doc = ""]
pub mod ch5_tx_lim;
#[doc = "CH6_TX_LIM register accessor: an alias for `Reg<CH6_TX_LIM_SPEC>`"]
pub type CH6_TX_LIM = crate::Reg<ch6_tx_lim::CH6_TX_LIM_SPEC>;
#[doc = ""]
pub mod ch6_tx_lim;
#[doc = "CH7_TX_LIM register accessor: an alias for `Reg<CH7_TX_LIM_SPEC>`"]
pub type CH7_TX_LIM = crate::Reg<ch7_tx_lim::CH7_TX_LIM_SPEC>;
#[doc = ""]
pub mod ch7_tx_lim;
#[doc = "APB_CONF register accessor: an alias for `Reg<APB_CONF_SPEC>`"]
pub type APB_CONF = crate::Reg<apb_conf::APB_CONF_SPEC>;
#[doc = ""]
pub mod apb_conf;
#[doc = "DATE register accessor: an alias for `Reg<DATE_SPEC>`"]
pub type DATE = crate::Reg<date::DATE_SPEC>;
#[doc = ""]
pub mod date;
|
RegisterBlock
|
coordinator.rs
|
// Copyright (c) The Libra Core Contributors
// SPDX-License-Identifier: Apache-2.0
use crate::{
counters,
executor_proxy::ExecutorProxyTrait,
peer_manager::{PeerManager, PeerScoreUpdateType},
PeerId, SynchronizerState,
};
use anyhow::{bail, format_err, Result};
use futures::{
channel::{mpsc, oneshot},
stream::{futures_unordered::FuturesUnordered, select_all},
StreamExt,
};
use libra_config::config::RoleType;
use libra_config::config::StateSyncConfig;
use libra_logger::prelude::*;
use libra_types::crypto_proxies::ValidatorChangeEventWithProof;
use libra_types::{
crypto_proxies::LedgerInfoWithSignatures, transaction::TransactionListWithProof,
};
use network::{
proto::{GetChunkRequest, GetChunkResponse, StateSynchronizerMsg, StateSynchronizerMsg_oneof},
validator_network::{Event, StateSynchronizerEvents, StateSynchronizerSender},
};
use std::{
collections::HashMap,
convert::TryInto,
time::{Duration, SystemTime, UNIX_EPOCH},
};
use tokio::time::interval;
pub(crate) struct SyncRequest {
// The Result value returned to the caller is Error in case the StateSynchronizer failed to
// reach the target (the LI in the storage remains unchanged as if nothing happened).
pub callback: oneshot::Sender<Result<()>>,
pub target: LedgerInfoWithSignatures,
}
pub(crate) struct EpochRetrievalRequest {
pub start_epoch: u64,
pub callback: oneshot::Sender<Result<ValidatorChangeEventWithProof>>,
}
/// message used by StateSyncClient for communication with Coordinator
pub(crate) enum CoordinatorMessage {
// used to initiate new sync
Request(SyncRequest),
// used to notify about new txn commit
Commit,
GetState(oneshot::Sender<SynchronizerState>),
// used to generate epoch proof
GetEpochProof(EpochRetrievalRequest),
}
#[derive(Clone, Debug, PartialEq, Eq)]
struct PendingRequestInfo {
expiration_time: SystemTime,
known_version: u64,
request_epoch: u64,
limit: u64,
}
/// Coordination of synchronization process is driven by SyncCoordinator, which `start()` function
/// runs an infinite event loop and triggers actions based on external / internal requests.
/// The coordinator can work in two modes:
/// * FullNode: infinite stream of ChunkRequests is sent to the predefined static peers
/// (the parent is going to reply with a ChunkResponse if its committed version becomes
/// higher within the timeout interval).
/// * Validator: the ChunkRequests are generated on demand for a specific target LedgerInfo to
/// synchronize to.
pub(crate) struct SyncCoordinator<T> {
// used to process client requests
client_events: mpsc::UnboundedReceiver<CoordinatorMessage>,
// Current state of the storage, which includes both the latest committed transaction and the
// latest transaction covered by the LedgerInfo (see `SynchronizerState` documentation).
// The state is updated via syncing with the local storage.
local_state: SynchronizerState,
// duration with the same version before the next attempt to get the next chunk
retry_timeout: Duration,
// config
config: StateSyncConfig,
// role of node
role: RoleType,
// peers used for synchronization
peer_manager: PeerManager,
// Optional sync request to be called when the target sync is reached
sync_request: Option<SyncRequest>,
// queue of incoming long polling requests
// peer will be notified about new chunk of transactions if it's available before expiry time
subscriptions: HashMap<PeerId, PendingRequestInfo>,
executor_proxy: T,
}
impl<T: ExecutorProxyTrait> SyncCoordinator<T> {
pub fn new(
client_events: mpsc::UnboundedReceiver<CoordinatorMessage>,
role: RoleType,
config: StateSyncConfig,
executor_proxy: T,
initial_state: SynchronizerState,
) -> Self {
let upstream_peers = config.upstream_peers.upstream_peers.clone();
let retry_timeout_val = match role {
RoleType::FullNode => config.tick_interval_ms + config.long_poll_timeout_ms,
RoleType::Validator => 2 * config.tick_interval_ms,
};
Self {
client_events,
local_state: initial_state,
retry_timeout: Duration::from_millis(retry_timeout_val),
config,
role,
peer_manager: PeerManager::new(upstream_peers),
subscriptions: HashMap::new(),
sync_request: None,
executor_proxy,
}
}
/// main routine. starts sync coordinator that listens for CoordinatorMsg
pub async fn start(mut self, network: Vec<(StateSynchronizerSender, StateSynchronizerEvents)>) {
let mut interval = interval(Duration::from_millis(self.config.tick_interval_ms)).fuse();
let network_senders: Vec<StateSynchronizerSender> =
network.iter().map(|t| t.0.clone()).collect();
let events: Vec<_> = network
.into_iter()
.enumerate()
.map(|(idx, t)| t.1.map(move |e| (idx, e)))
.collect();
let mut network_events = select_all(events).fuse();
loop {
::futures::select! {
msg = self.client_events.select_next_some() => {
match msg {
CoordinatorMessage::Request(request) => {
if let Err(e) = self.request_sync(request).await {
error!("[state sync] request sync fail: {}", e);
}
}
CoordinatorMessage::Commit => {
if let Err(e) = self.process_commit().await {
error!("[state sync] process commit fail: {}", e);
}
}
CoordinatorMessage::GetState(callback) => {
self.get_state(callback);
}
CoordinatorMessage::GetEpochProof(request) => {
self.get_epoch_proof(request).await;
}
};
},
(idx, network_event) = network_events.select_next_some() => {
match network_event {
Ok(event) => {
match event {
Event::NewPeer(peer_id) => {
debug!("[state sync] new peer {}", peer_id);
self.peer_manager.enable_peer(peer_id, network_senders[idx].clone());
self.check_progress().await;
}
Event::LostPeer(peer_id) => {
debug!("[state sync] lost peer {}", peer_id);
self.peer_manager.disable_peer(&peer_id);
}
Event::Message((peer_id, mut message)) => {
match message.message.unwrap() {
StateSynchronizerMsg_oneof::ChunkRequest(request) => {
if let Err(err) = self.process_chunk_request(peer_id, request).await {
error!("[state sync] failed to serve chunk request from {}, local LI version {}: {}", peer_id, self.local_state.highest_local_li.ledger_info().version(), err);
}
}
StateSynchronizerMsg_oneof::ChunkResponse(response) => {
if let Err(err) = self.process_chunk_response(&peer_id, response).await {
error!("[state sync] failed to process chunk response from {}: {}", peer_id, err);
counters::APPLY_CHUNK_FAILURE.with_label_values(&[&*peer_id.to_string()]).inc();
} else {
self.peer_manager.update_score(&peer_id, PeerScoreUpdateType::Success);
counters::APPLY_CHUNK_SUCCESS.with_label_values(&[&*peer_id.to_string()]).inc();
}
}
}
}
_ => {}
}
},
Err(err) => { error!("[state sync] network error {}", err); },
}
},
_ = interval.select_next_some() => {
self.check_progress().await;
}
}
}
}
/// Sync up coordinator state with the local storage.
async fn sync_state_with_local_storage(&mut self) -> Result<()> {
let new_state = self.executor_proxy.get_local_storage_state().await?;
if new_state.epoch() > self.local_state.epoch() {
debug!(
"[state sync] Trusted epoch moved from {} to {}",
self.local_state.epoch(),
new_state.epoch()
);
}
self.local_state = new_state;
Ok(())
}
/// In case there has been another pending request it's going to be overridden.
/// The caller will be notified about request completion via request.callback oneshot:
/// at that moment it's guaranteed that the highest LI exposed by the storage is equal to the
/// target LI.
/// StateSynchronizer assumes that it's the only one modifying the storage (consensus is not
/// trying to commit transactions concurrently).
async fn request_sync(&mut self, request: SyncRequest) -> Result<()> {
self.sync_state_with_local_storage().await?;
let highest_local_li = self.local_state.highest_local_li.ledger_info();
let target_version = request.target.ledger_info().version();
counters::TARGET_VERSION.set(target_version as i64);
debug!(
"[state sync] sync requested. Known LI: {}, requested_version: {}",
highest_local_li, target_version
);
if target_version <= highest_local_li.version() {
request
.callback
.send(Err(format_err!("Sync request for an old version")))
.map_err(|_| format_err!("Callback error"))?;
bail!(
"[state sync] Sync request for version {} <= known version {}",
target_version,
highest_local_li.version()
);
}
self.peer_manager
.set_peers(request.target.signatures().keys().copied().collect());
self.sync_request = Some(request);
self.send_chunk_request(
self.local_state.highest_version_in_local_storage(),
self.local_state.epoch(),
)
.await
}
/// The function is called after new txns have been applied to the local storage.
/// As a result it might:
/// 1) help remote subscribers with long poll requests, 2) finish local sync request
async fn process_commit(&mut self) -> Result<()> {
// We choose to re-sync the state with the storage as it's the simplest approach:
// in case the performance implications of re-syncing upon every commit are high,
// it's possible to manage some of the highest known versions in memory.
self.sync_state_with_local_storage().await?;
let local_version = self.local_state.highest_version_in_local_storage();
counters::COMMITTED_VERSION.set(local_version as i64);
self.check_subscriptions().await;
self.peer_manager.remove_requests(local_version);
let sync_request_complete = self.sync_request.as_ref().map_or(false, |sync_req| {
// Each `ChunkResponse` is verified to make sure it never goes beyond the requested
// target version, hence, the local version should never go beyond sync req target.
assert!(local_version <= sync_req.target.ledger_info().version());
sync_req.target.ledger_info().version() == local_version
});
if sync_request_complete {
debug!(
"[state sync] synchronization to {} is finished",
local_version
);
if let Some(sync_request) = self.sync_request.take() {
sync_request
.callback
.send(Ok(()))
.map_err(|_| format_err!("Callback error"))?;
}
}
Ok(())
}
fn get_state(&self, callback: oneshot::Sender<SynchronizerState>) {
if callback.send(self.local_state.clone()).is_err() {
error!("[state sync] failed to send internal state");
}
}
/// There are two types of ChunkRequests:
/// 1) Validator chunk requests are for a specific target LI and don't ask for long polling.
/// 2) FullNode chunk requests don't specify a target LI and can allow long polling.
async fn process_chunk_request(
&mut self,
peer_id: PeerId,
mut request: GetChunkRequest,
) -> Result<()> {
if request.timeout > self.config.max_timeout_ms
|| request.limit > self.config.max_chunk_limit
{
bail!(
"[state sync] Request timeout: {}, chunk limit: {}; configured max timeout is {} ms, and chunk limit is {}",
request.timeout,
request.limit,
self.config.max_timeout_ms,
self.config.max_chunk_limit
);
}
let target = request
.ledger_info_with_sigs
.take()
.map(TryInto::try_into)
.transpose()?;
self.sync_state_with_local_storage().await?;
let local_li_version = self.local_state.highest_local_li.ledger_info().version();
debug!(
"[state sync] chunk request: peer_id: {}, request known version: {}, target version: {}, local li version: {}",
peer_id.short_str(),
request.known_version,
target.as_ref().map_or("None".to_string(), |t: &LedgerInfoWithSignatures| t.ledger_info().version().to_string()),
local_li_version,
);
// If there is nothing a node can help with, and the request supports long polling,
// add it to the subscriptions.
if local_li_version <= request.known_version && request.timeout > 0 {
let expiration_time =
SystemTime::now().checked_add(Duration::from_millis(request.timeout));
if let Some(time) = expiration_time {
let request_info = PendingRequestInfo {
expiration_time: time,
known_version: request.known_version,
request_epoch: request.current_epoch,
limit: request.limit,
};
self.subscriptions.insert(peer_id, request_info);
}
return Ok(());
}
// Send the chunk response right away (even if empty: empty response is better than no
// response at all because it triggers another attempt without timing out).
let sender = self
.peer_manager
.get_network_sender(&peer_id)
.ok_or_else(|| format_err!("ChunkRequest from unknown peer {}", peer_id.short_str()))?;
self.deliver_chunk(
peer_id,
request.known_version,
request.current_epoch,
request.limit,
target,
sender,
)
.await
}
/// Generate and send the ChunkResponse to the given peer.
/// The chunk response contains transactions from the local storage with the proofs relative to
/// the given target ledger info.
/// In case target is None, the ledger info is set to the local highest ledger info.
async fn deliver_chunk(
&self,
peer_id: PeerId,
known_version: u64,
request_epoch: u64,
limit: u64,
target: Option<LedgerInfoWithSignatures>,
mut network_sender: StateSynchronizerSender,
) -> Result<()> {
let response_li = self.choose_response_li(known_version, request_epoch, target)?;
let txns = self
.executor_proxy
.get_chunk(known_version, limit, response_li.ledger_info().version())
.await?;
let chunk_response = GetChunkResponse {
ledger_info_with_sigs: Some(response_li.into()),
txn_list_with_proof: Some(txns.into()),
};
let msg = StateSynchronizerMsg {
message: Some(StateSynchronizerMsg_oneof::ChunkResponse(chunk_response)),
};
if network_sender.send_to(peer_id, msg).await.is_err() {
error!("[state sync] failed to send p2p message");
}
Ok(())
}
/// The choice of the LedgerInfo in the response follows the following logic:
/// * response LI is either the requested target or the highest local LI if target is None.
/// * if the response LI would not belong to `request_epoch`, change
/// the response LI to the LI that is terminating `request_epoch`.
fn choose_response_li(
&self,
known_version: u64,
request_epoch: u64,
target: Option<LedgerInfoWithSignatures>,
) -> Result<LedgerInfoWithSignatures> {
let mut target_li = target.unwrap_or_else(|| self.local_state.highest_local_li.clone());
if target_li.ledger_info().epoch() > request_epoch {
let end_of_epoch_li = self
.executor_proxy
.get_epoch_proof(request_epoch)?
.ledger_info_with_sigs
.first()
.ok_or_else(|| {
format_err!(
"[state sync] Fail to retrieve end of epoch LI for epoch {}",
request_epoch
)
})?
.clone();
debug!("[state sync] Chunk response for known_version = {} is limited to the last txn of epoch {} at version {}", known_version, request_epoch, end_of_epoch_li.ledger_info().version());
target_li = end_of_epoch_li;
}
Ok(target_li)
}
/// * Issue a request for the next chunk.
/// * Validate and execute the transactions.
/// * Notify the clients in case a sync request has been completed.
async fn process_chunk_response(
&mut self,
peer_id: &PeerId,
response: GetChunkResponse,
) -> Result<()> {
counters::RESPONSES_RECEIVED
.with_label_values(&[&*peer_id.to_string()])
.inc();
let txn_list_with_proof: TransactionListWithProof = response
.txn_list_with_proof
.ok_or_else(|| format_err!("Missing txn_list_with_proof"))?
.try_into()?;
let known_version = self.local_state.highest_version_in_local_storage();
let chunk_start_version =
txn_list_with_proof
.first_transaction_version
.ok_or_else(|| {
self.peer_manager
.update_score(&peer_id, PeerScoreUpdateType::EmptyChunk);
format_err!("[state sync] Empty chunk from {}", peer_id.short_str())
})?;
if chunk_start_version != known_version + 1 {
// Old / wrong chunk.
self.peer_manager
.update_score(&peer_id, PeerScoreUpdateType::ChunkVersionCannotBeApplied);
bail!(
"[state sync] Non sequential chunk from {}: known_version: {}, received: {}",
peer_id.short_str(),
known_version,
chunk_start_version
);
}
let response_li: LedgerInfoWithSignatures = response
.ledger_info_with_sigs
.ok_or_else(|| format_err!("Missing ledger_info_with_sigs"))?
.try_into()?;
if let Some(sync_req) = self.sync_request.as_ref() {
// Valid responses should not exceed the LI version of the request.
if sync_req.target.ledger_info().version() < response_li.ledger_info().version() {
self.peer_manager
.update_score(peer_id, PeerScoreUpdateType::InvalidChunk);
bail!(
"[state sync] Response from {} has an LI version higher than requested.",
peer_id
);
}
}
let chunk_size = txn_list_with_proof.len() as u64;
// Optimistically fetch the next chunk assuming the current chunk is going to be applied
// successfully.
let new_version = known_version + chunk_size;
let new_epoch = match response_li.ledger_info().next_validator_set() {
// This LI carries the validator set for the next epoch.
Some(_) => response_li.ledger_info().epoch() + 1,
None => response_li.ledger_info().epoch(),
};
self.send_chunk_request(new_version, new_epoch).await?;
self.validate_and_store_chunk(txn_list_with_proof, response_li)
.await
.map_err(|e| {
self.peer_manager
.update_score(peer_id, PeerScoreUpdateType::InvalidChunk);
format_err!("[state sync] failed to apply chunk: {}", e)
})?;
counters::STATE_SYNC_TXN_REPLAYED.inc_by(chunk_size as i64);
debug!(
"[state sync] applied chunk. Previous version: {}, new version: {}, chunk size: {}",
known_version, new_version, chunk_size
);
// The overall chunk processing duration is calculated starting from the very first attempt
// until the commit
if let Some(first_attempt_tst) = self.peer_manager.get_first_request_time(known_version + 1)
{
if let Ok(duration) = SystemTime::now().duration_since(first_attempt_tst) {
counters::SYNC_PROGRESS_DURATION.observe_duration(duration);
}
}
self.process_commit().await
}
async fn validate_and_store_chunk(
&self,
txn_list_with_proof: TransactionListWithProof,
target: LedgerInfoWithSignatures,
) -> Result<()> {
target.verify(self.local_state.verifier())?;
self.executor_proxy
.execute_chunk(txn_list_with_proof, target)
.await?;
Ok(())
}
/// Ensures that StateSynchronizer is making progress:
/// issue a new request if too much time passed since requesting highest_committed_version + 1.
async fn check_progress(&mut self) {
if self.peer_manager.is_empty() {
return;
}
if self.role == RoleType::Validator && self.sync_request.is_none() {
return;
}
let known_version = self.local_state.highest_version_in_local_storage();
let last_request_tst = self
.peer_manager
.get_last_request_time(known_version + 1)
.unwrap_or(UNIX_EPOCH);
// if coordinator didn't make progress by expected time, issue new request
if let Some(tst) = last_request_tst.checked_add(self.retry_timeout) {
if SystemTime::now().duration_since(tst).is_ok() {
self.peer_manager
.process_timeout(known_version + 1, self.role == RoleType::Validator);
if let Err(e) = self
.send_chunk_request(known_version, self.local_state.epoch())
.await
{
error!("[state sync] Failed to send chunk request: {}", e);
}
counters::TIMEOUT.inc();
}
}
}
/// Sends a chunk request with a given `known_version` and `known_epoch`
/// (might be chosen optimistically).
/// The request includes a target for Validator and a non-zero timeout for a FullNode.
async fn send_chunk_request(&mut self, known_version: u64, known_epoch: u64) -> Result<()> {
let (peer_id, mut sender) = self
.peer_manager
.pick_peer()
.ok_or_else(|| format_err!("No peers found for chunk request."))?;
let mut req = GetChunkRequest::default();
req.known_version = known_version;
req.current_epoch = known_epoch;
req.limit = self.config.chunk_limit;
if self.role == RoleType::Validator {
let target = self
.sync_request
.as_ref()
.ok_or_else(|| {
format_err!("[state sync] Validator chunk request without a sync request.")
})?
.target
.clone();
if target.ledger_info().version() <= known_version {
debug!(
"[state sync] Reached version {}, no need to send more requests",
known_version
);
return Ok(());
}
req.ledger_info_with_sigs = Some(target.into());
} else {
req.timeout = self.config.long_poll_timeout_ms;
}
debug!(
"[state sync] request next chunk. peer_id: {}, known_version: {}, timeout: {}",
peer_id.short_str(),
known_version,
req.timeout
);
let msg = StateSynchronizerMsg {
message: Some(StateSynchronizerMsg_oneof::ChunkRequest(req)),
};
self.peer_manager
.process_request(known_version + 1, peer_id);
sender.send_to(peer_id, msg).await?;
counters::REQUESTS_SENT
.with_label_values(&[&*peer_id.to_string()])
.inc();
Ok(())
}
/// The function is called after the local storage is updated with new transactions:
/// it might deliver chunks for the subscribers that have been waiting with the long polls.
///
/// Note that it is possible to help the subscribers only with the transactions that match
/// the highest ledger info in the local storage (some committed transactions are ahead of the
/// latest ledger info and are not going to be used for helping the remote subscribers).
/// The function assumes that the local state has been synced with storage.
async fn check_subscriptions(&mut self) {
let highest_li_version = self.local_state.highest_local_li.ledger_info().version();
let mut ready = vec![];
self.subscriptions.retain(|peer_id, request_info| {
// filter out expired peer requests
if SystemTime::now()
.duration_since(request_info.expiration_time.clone())
.is_ok()
{
return false;
}
if request_info.known_version < highest_li_version {
ready.push((*peer_id, request_info.clone()));
false
} else {
true
}
});
let mut futures = FuturesUnordered::new();
for (peer_id, request_info) in ready {
if let Some(sender) = self.peer_manager.get_network_sender(&peer_id) {
futures.push(self.deliver_chunk(
peer_id,
request_info.known_version,
request_info.request_epoch,
request_info.limit,
Some(self.local_state.highest_local_li.clone()),
sender,
));
}
}
while let Some(res) = futures.next().await {
if let Err(err) = res {
error!("[state sync] failed to notify subscriber {}", err);
}
}
}
async fn
|
(&self, request: EpochRetrievalRequest) {
if request
.callback
.send(self.executor_proxy.get_epoch_proof(request.start_epoch))
.is_err()
{
error!("[state sync] coordinator failed to send back epoch proof");
}
}
}
|
get_epoch_proof
|
regexp_query.rs
|
use serde::ser::{Serialize, Serializer};
/// You can use the flags parameter to enable more optional operators for Lucene’s regular
/// expression engine.
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
pub enum RegexpFlag {
/// Enables all optional operators.
All,
/// Enables the `~` operator. You can use `~` to negate the shortest following pattern.
/// For example:
///
/// `a~bc # matches 'adc' and 'aec' but not 'abc'`
Complement,
/// Enables the `<>` operators. You can use `<>` to match a numeric range. For example:
///
/// `foo<1-100> # matches 'foo1', 'foo2' ... 'foo99', 'foo100'`
/// `foo<01-100> # matches 'foo01', 'foo02' ... 'foo99', 'foo100'`
Interval,
/// Enables the `&` operator, which acts as an AND operator. The match will succeed if patterns
/// on both the left side AND the right side matches. For example:
///
/// `aaa.+&.+bbb # matches 'aaabbb'`
Intersection,
/// Enables the `@` operator. You can use @ to match any entire string.
///
/// You can combine the `@` operator with `&` and `~` operators to create an
/// "everything except" logic. For example:
///
/// `@&~(abc.+) # matches everything except terms beginning with 'abc'`
Anystring,
}
impl From<RegexpFlag> for &'static str {
fn from(value: RegexpFlag) -> Self {
match value {
RegexpFlag::All => "ALL",
RegexpFlag::Complement => "COMPLEMENT",
RegexpFlag::Interval => "INTERVAL",
RegexpFlag::Intersection => "INTERSECTION",
RegexpFlag::Anystring => "ANYSTRING",
}
}
}
impl From<RegexpFlag> for String {
fn fr
|
alue: RegexpFlag) -> Self {
<&'static str>::from(value).to_string()
}
}
impl std::fmt::Display for RegexpFlag {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
<&'static str>::from(*self).fmt(f)
}
}
impl Serialize for RegexpFlag {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
<&'static str>::from(*self).serialize(serializer)
}
}
|
om(v
|
frustrum.rs
|
use glam::*;
use rtbvh::AABB;
use std::convert::Into;
use std::fmt::Display;
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
pub enum FrustrumResult {
Outside,
Intersect,
Inside,
}
impl FrustrumResult {
pub fn should_render(&self) -> bool {
match self {
FrustrumResult::Outside => false,
FrustrumResult::Intersect => true,
FrustrumResult::Inside => true,
}
}
}
#[derive(Copy, Clone)]
pub struct FrustrumPlane {
pub normal: [f32; 3],
pub d: f32,
}
impl Display for FrustrumPlane {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(
f,
"FrustrumPlane {{ normal: {}, d: {} }}",
Vec3::from(self.normal),
self.d
)
}
}
impl FrustrumPlane {
pub fn from_coefficients(a: f32, b: f32, c: f32, d: f32) -> FrustrumPlane {
let normal = Vec3::from([a, b, c]);
let length = normal.length();
let normal = normal / length;
Self {
normal: normal.into(),
d: d / length,
}
}
pub fn new(v0: Vec3, v1: Vec3, v2: Vec3) -> FrustrumPlane {
let aux1 = v0 - v1;
let aux2 = v2 - v1;
let normal: Vec3 = (aux2 * aux1).normalize();
let point = v2;
let d = -(normal.dot(point));
FrustrumPlane {
normal: normal.into(),
d,
}
}
pub fn set_3_points(&mut self, v0: Vec3, v1: Vec3, v2: Vec3) {
let aux1 = v0 - v1;
let aux2 = v2 - v1;
let normal: Vec3 = (aux2 * aux1).normalize();
let point = v2;
let d = -(normal.dot(point));
self.normal = normal.into();
self.d = d;
}
pub fn set_normal_and_point(&mut self, normal: Vec3, point: Vec3) {
self.normal = normal.into();
self.d = -(normal.dot(point));
}
pub fn set_coefficients(&mut self, a: f32, b: f32, c: f32, d: f32) {
let normal = Vec3::from([a, b, c]);
let length = normal.length();
let normal: Vec3 = normal / length;
self.normal = normal.into();
self.d = d / length;
}
pub fn distance(&self, p: Vec3) -> f32 {
self.d + Vec3::from(self.normal).dot(p)
}
}
impl From<(Vec3, Vec3, Vec3)> for FrustrumPlane {
fn from(vecs: (Vec3, Vec3, Vec3)) -> Self {
FrustrumPlane::new(vecs.0, vecs.1, vecs.2)
}
}
impl From<[Vec3; 3]> for FrustrumPlane {
fn from(vecs: [Vec3; 3]) -> Self {
FrustrumPlane::new(vecs[0], vecs[1], vecs[2])
}
}
impl From<(f32, f32, f32, f32)> for FrustrumPlane {
fn from(coeffs: (f32, f32, f32, f32)) -> Self {
let (a, b, c, d) = coeffs;
Self::from_coefficients(a, b, c, d)
}
}
impl From<[f32; 4]> for FrustrumPlane {
fn from(coeffs: [f32; 4]) -> Self {
let [a, b, c, d] = coeffs;
Self::from_coefficients(a, b, c, d)
}
}
pub struct FrustrumG {
pub planes: [FrustrumPlane; 6],
}
impl Display for FrustrumG {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(
f,
"FrustrumG: {{ planes: [{}, {}, {}, {}, {}, {}] }}",
self.planes[0],
self.planes[1],
self.planes[2],
self.planes[3],
self.planes[4],
self.planes[5],
)
}
}
impl FrustrumG {
pub fn from_matrix(matrix: Mat4) -> FrustrumG {
let ma = matrix.to_cols_array();
let a = ma[3];
let b = ma[7];
let c = ma[11];
let d = ma[15];
// Left
let plane0: FrustrumPlane = (a + ma[0], b + ma[4], c + ma[8], d + ma[12]).into();
// Right
let plane1: FrustrumPlane = (a - ma[0], b - ma[4], c - ma[8], d - ma[12]).into();
// Top
let plane2: FrustrumPlane = (a - ma[1], b - ma[5], c - ma[9], d - ma[13]).into();
// Bottom
let plane3: FrustrumPlane = (a + ma[1], b + ma[5], c + ma[9], d + ma[13]).into();
let plane4: FrustrumPlane = (a + ma[2], b + ma[6], c + ma[10], d + ma[14]).into();
let plane5: FrustrumPlane = (a - ma[2], b - ma[6], c - ma[10], d - ma[14]).into();
FrustrumG {
planes: [plane0, plane1, plane2, plane3, plane4, plane5],
}
}
pub fn
|
(camera: &crate::camera::Camera) -> FrustrumG {
let matrix = camera.get_rh_matrix();
Self::from_matrix(matrix)
}
pub fn point_in_frustrum(&self, p: Vec3) -> FrustrumResult {
for plane in &self.planes {
if plane.distance(p) < 0.0 {
return FrustrumResult::Outside;
}
}
FrustrumResult::Inside
}
pub fn sphere_in_frustrum(&self, p: Vec3, radius: f32) -> FrustrumResult {
let mut result = FrustrumResult::Inside;
for plane in &self.planes {
let distance = plane.distance(p);
if distance < -radius {
return FrustrumResult::Outside;
} else {
result = FrustrumResult::Intersect;
}
}
result
}
pub fn aabb_in_frustrum(&self, b: &AABB) -> FrustrumResult {
let mut result = FrustrumResult::Outside;
for plane in &self.planes {
let mut min = [0.0; 3];
let mut max = [0.0; 3];
if plane.normal[0] > 0.0 {
min[0] = b.min[0];
max[0] = b.max[0];
} else {
min[0] = b.max[0];
max[0] = b.min[0];
}
if plane.normal[1] > 0.0 {
min[1] = b.min[1];
max[1] = b.max[1];
} else {
min[1] = b.max[1];
max[1] = b.min[1];
}
if plane.normal[2] > 0.0 {
min[2] = b.min[2];
max[2] = b.max[2];
} else {
min[2] = b.max[2];
max[2] = b.min[2];
}
if plane.distance(min.into()) >= 0.0 || plane.distance(max.into()) >= 0.0 {
result = FrustrumResult::Intersect;
} else {
return FrustrumResult::Outside;
}
}
result
}
}
impl From<&crate::camera::Camera> for FrustrumG {
fn from(camera: &crate::camera::Camera) -> Self {
Self::new(camera)
}
}
impl From<Mat4> for FrustrumG {
fn from(matrix: Mat4) -> Self {
Self::from_matrix(matrix)
}
}
#[cfg(test)]
mod tests {
#[test]
fn frustrum_works() {
use crate::camera::*;
use rtbvh::AABB;
let camera = Camera::zero();
let frustrum: FrustrumG = FrustrumG::from_matrix(camera.get_lh_matrix());
let point_behind = glam::Vec3::new(0.0, 0.0, -1.0);
let point_in_front = glam::Vec3::new(0.0, 0.0, 1.0);
let aabb_in_front = AABB {
min: glam::Vec3::new(0.2, 0.2, 5.0).into(),
max: glam::Vec3::new(0.2, 0.2, 5.0).into(),
};
let aabb_in_back = AABB {
min: glam::Vec3::new(-1.0, 0.0, -2.0).into(),
max: glam::Vec3::new(1.0, 0.0, -2.0).into(),
};
let aabb_half = AABB {
min: glam::Vec3::new(-5.0, 0.0, 2.0).into(),
max: glam::Vec3::new(0.0, 0.0, 2.0).into(),
};
assert_eq!(
FrustrumResult::Outside,
frustrum.point_in_frustrum(point_behind),
);
assert_eq!(
FrustrumResult::Inside,
frustrum.point_in_frustrum(point_in_front),
);
assert_eq!(
FrustrumResult::Intersect,
frustrum.aabb_in_frustrum(&aabb_in_front)
);
assert_eq!(
FrustrumResult::Outside,
frustrum.aabb_in_frustrum(&aabb_in_back),
);
assert_eq!(
FrustrumResult::Intersect,
frustrum.aabb_in_frustrum(&aabb_half),
);
}
}
|
new
|
application.go
|
/*
Copyright 2021 The AtomCI Group Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kuberes
import (
"encoding/base64"
"encoding/json"
"fmt"
"strconv"
"strings"
"github.com/go-atomci/atomci/constant"
"github.com/go-atomci/atomci/internal/core/settings"
"github.com/go-atomci/atomci/internal/dao"
"github.com/go-atomci/atomci/internal/middleware/log"
"github.com/go-atomci/atomci/internal/models"
"github.com/go-atomci/atomci/pkg/kube"
"github.com/go-atomci/atomci/utils/errors"
"github.com/go-atomci/atomci/utils/query"
"github.com/go-atomci/atomci/utils/validate"
k8serrors "k8s.io/apimachinery/pkg/api/errors"
apiv1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/client-go/kubernetes"
"github.com/astaxie/beego/orm"
)
type ContainerParam struct {
Name string `json:"name"`
Image string `json:"image"`
}
type RollingUpdateApp struct {
Name string `json:"name"`
Image string `json:"image"`
}
type AppParam struct {
Name string `json:"name"` //appname
Containers []ContainerParam `json:"containers,omitempty"`
Replicas *intstr.IntOrString `json:"replicas,omitempty"`
}
type VersionWeight struct {
Stage string `json:"stage"`
Version string `json:"version"`
Weight intstr.IntOrString `json:"weight"`
}
type AppItem struct {
models.CaasApplication
ReplicasConstrast string `json:"replicas_constrast,omitempty"`
Status string `json:"status,omitempty"`
Pods string `json:"pods,omitempty"`
CreateAt string `json:"create_at,omitempty"`
UpdateAt string `json:"update_at,omitempty"`
}
type AppPod struct {
Pod `json:",inline"`
Weight int `json:"weight"`
}
type AppDetail struct {
AppItem
Services []*ServiceDetail `json:"services,omitempty"`
Pods []*AppPod `json:"pods,omitempty"`
}
const (
AppKindDaemonSet = "daemonset"
AppKindDeployment = "deployment"
AppKindStatefulSet = "statefulset"
LABLE_APPNAME_KEY = "app"
LABLE_APPVERSION_KEY = "version"
ServiceKind = "service"
ConfigMapKind = "configmap"
SecretKind = "secret"
DescriptionAnnotationKey = "description"
OwnerNameAnnotationKey = "owner_name"
DEFAULT_PROJECT_ID = 0
YamlSeparator = "---\n"
IngApiVersion = "extensions/v1beta1"
)
type PatcherFunction func(app models.CaasApplication)
type NamespaceListFunction func() []string
type ResType string
const (
ResTypeApp ResType = "app"
ResTypePod ResType = "pod"
ResTypeDeploy ResType = "deploy"
ResTypeTemplate ResType = "template"
ResTypeImage ResType = "image"
)
type AppRes struct {
Cluster string
EnvID int64
ProjectID int64
Client kubernetes.Interface
Appmodel *dao.AppModel
}
type AppPodBasicParam struct {
Namespace string `json:"namespace"`
Name string `json:"name"`
PodName string `json:"pod_name"`
}
type AppPodDetail struct {
models.CaasApplication `json:",inline"`
Pod AppPod `json:"pod"`
}
func NewAppRes(cluster string, envID, projectID int64) (*AppRes, error) {
if cluster == "" {
return &AppRes{
Cluster: cluster,
EnvID: envID,
Appmodel: dao.NewAppModel(),
ProjectID: projectID,
}, nil
}
client, err := kube.GetClientset(cluster)
if err != nil {
if cluster != "" {
return nil, errors.NewInternalServerError().SetCause(err)
}
}
return &AppRes{
Cluster: cluster,
EnvID: envID,
Appmodel: dao.NewAppModel(),
ProjectID: projectID,
Client: client,
}, nil
}
// GetAppListByPagination ..
func (ar *AppRes) GetAppListByPagination(filterQuery *query.FilterQuery, projectID int64, cluster string) (*query.QueryResult, error) {
appList := []AppItem{}
res, err := ar.Appmodel.GetAppList(filterQuery, projectID, cluster, "")
if err != nil {
return nil, err
}
list, ok := res.Item.([]models.CaasApplication)
if !ok {
return nil, fmt.Errorf("data type is not right! ")
}
for _, item := range list {
aitem := AppItem{}
aitem.CaasApplication = item
aitem.CreateAt = item.CreateAt.Format("2006-01-02 15:04:05")
aitem.UpdateAt = item.UpdateAt.Format("2006-01-02 15:04:05")
deploymentName := item.Name
pods, status, _ := ar.GetDeployRuntime(item, deploymentName)
aitem.Pods = pods
aitem.Status = status
appList = append(appList, aitem)
}
|
return res, nil
}
func (ar *AppRes) GetDeployRuntime(app models.CaasApplication, deploymentName string) (string, string, error) {
// TODO: current only support deployment
v1Deployment, err := ar.Client.AppsV1().Deployments(app.Namespace).Get(deploymentName, metav1.GetOptions{})
if err != nil {
log.Log.Warn("get deployment error: %s", err.Error())
return "", "", err
}
readyReplicas := v1Deployment.Status.ReadyReplicas
replicas := v1Deployment.Status.Replicas
pods := fmt.Sprintf("%v / %v", readyReplicas, replicas)
status := "NotReady"
if readyReplicas == replicas {
status = "Running"
} else if readyReplicas != 0 {
status = "Warning"
}
return pods, status, nil
}
func (ar *AppRes) GetAppDetail(namespace, name string) (*AppDetail, error) {
app, err := ar.Appmodel.GetAppByName(ar.Cluster, namespace, name)
if err != nil {
return nil, err
}
detail := AppDetail{}
detail.CaasApplication = *app
deploymentName := app.Name
detail.ReplicasConstrast, detail.Status, err = ar.GetDeployRuntime(*app, deploymentName)
if err != nil {
return nil, err
}
detail.CreateAt = app.CreateAt.Format("2006-01-02 15:04:05")
detail.UpdateAt = app.UpdateAt.Format("2006-01-02 15:04:05")
// Pods
detail.Pods, err = ar.getAppPodList(app, deploymentName)
if err != nil {
log.Log.Error("Get Pods information failed: %s", err.Error())
return nil, err
}
nativeAppTemplate := NativeAppTemplate{}
err = json.Unmarshal([]byte(app.Template), &nativeAppTemplate)
if err != nil {
log.Log.Error("app template json unmarshal error: %s", err.Error())
}
appDeploymentName := nativeAppTemplate.Deployment.Name
if svc, err := ar.GetAppServiceDetail(namespace, appDeploymentName, getBestNodeIP(detail.Pods)); err != nil {
log.Log.Warn("get service detail failed: %s", err.Error())
} else {
log.Log.Debug("app: %v's svc len: %v", app.Name, len(svc))
detail.Services = svc
}
return &detail, nil
}
func (ar *AppRes) GetAppServiceDetail(namespace, appDeploymentName, nodeIP string) ([]*ServiceDetail, error) {
return GetAppServices(ar.Client, ar.Cluster, namespace, appDeploymentName, nodeIP)
}
func (ar *AppRes) GetAppPodStatus(namespace, appName, podName string) (interface{}, error) {
_, err := ar.Appmodel.GetAppByName(ar.Cluster, namespace, appName)
if err != nil {
return "", err
}
pod, err := ar.Client.CoreV1().Pods(namespace).Get(podName, metav1.GetOptions{})
if err != nil {
return "", err
}
return pod.Status, nil
}
func (ar *AppRes) GetAppPodLog(namespace, appName, podName, containerName string) (string, error) {
_, err := ar.Appmodel.GetAppByName(ar.Cluster, namespace, appName)
if err != nil {
return "", err
}
tailLines := int64(1000)
body, err := ar.Client.CoreV1().Pods(namespace).GetLogs(podName, &apiv1.PodLogOptions{
Container: containerName,
TailLines: &tailLines,
}).Do().Raw()
if err != nil {
return "", err
}
return string(body), nil
}
func (ar *AppRes) InstallApp(
namespace, tname string,
template Template,
eparam *ExtensionParam) error {
CreateK8sNamespace(ar.Cluster, namespace)
CreateRegistrySecret(ar.Cluster, namespace, ar.EnvID)
if err := template.Validate(); err != nil {
return errors.NewBadRequest().SetCause(err)
}
if err := template.Default(ar.EnvID).Deploy(ar.ProjectID, ar.EnvID, ar.Cluster, namespace, tname, eparam); err != nil {
return errors.NewInternalServerError().SetCause(err)
}
return nil
}
func (ar *AppRes) UninstallApp(app models.CaasApplication) error {
if app.Template == "" {
return nil
}
template, err := CreateAppTemplateByApp(app)
if err != nil {
return err
}
kr := NewKubeAppRes(ar.Client, ar.Cluster, app.Namespace, app.Kind)
return kr.DeleteAppResource(template)
}
func (ar *AppRes) DeleteApp(namespace, appname string) error {
app, err := ar.Appmodel.GetAppByName(ar.Cluster, namespace, appname)
if err != nil {
if err == orm.ErrNoRows {
return nil
} else {
return err
}
}
err = ar.UninstallApp(*app)
if err != nil {
return err
}
err = ar.Appmodel.DeleteApp(*app)
return err
}
func (ar *AppRes) Restart(namespace, appname string) error {
app, err := ar.Appmodel.GetAppByName(ar.Cluster, namespace, appname)
if err != nil {
if err == orm.ErrNoRows {
log.Log.Warn("%s, %s, %s is not existed", ar.Cluster, namespace, appname)
return nil
}
return err
}
// TODO: refactor
// template, err := CreateAppTemplateByApp(*app)
// if err != nil {
// return err
// }
return NewKubeAppRes(ar.Client, ar.Cluster, namespace, app.Kind).Restart(appname)
}
func (ar *AppRes) ReconfigureApp(app models.CaasApplication, template AppTemplate) (*AppDetail, error) {
kr := NewKubeAppRes(ar.Client, ar.Cluster, app.Namespace, app.Kind)
exist, err := kr.CheckAppIsExisted(app.Name)
if err != nil {
return nil, errors.NewInternalServerError().SetCause(err)
}
if exist {
// TODO: create app template by app bug
oldTpl, err := CreateAppTemplateByApp(app)
if err != nil {
return nil, errors.NewInternalServerError().SetCause(err)
}
//update
err = kr.UpdateAppResource(&app, template, oldTpl, true)
if err != nil {
return nil, errors.NewInternalServerError().SetCause(err)
}
log.Log.Warn("the app is reconfigured, cluster: %s, namespace: %s, appname: %s", ar.Cluster, app.Namespace, app.Name)
} else {
if err := template.UpdateAppObject(&app); err != nil {
return nil, errors.NewBadRequest().SetCause(err)
}
//recreate
err = kr.CreateAppResource(template)
if err != nil {
return nil, errors.NewInternalServerError().SetCause(err)
}
log.Log.Warn("the app is recreated, cluster: %s, namespace: %s, appname: %s", ar.Cluster, app.Namespace, app.Name)
}
// update app info
err = ar.Appmodel.UpdateApp(&app, true)
if err != nil {
return nil, errors.NewInternalServerError().SetCause(err)
}
appDetail, err := ar.GetAppDetail(app.Namespace, app.Name)
// TODO: bug!!! GetAppDetail error
if err != nil {
return nil, errors.NewInternalServerError().SetCause(err)
}
return appDetail, nil
}
func (ar *AppRes) RollingUpdateApp(namespace, appname string, param []ContainerParam) error {
app, err := ar.Appmodel.GetAppByName(ar.Cluster, namespace, appname)
if err != nil {
if err == orm.ErrNoRows {
return errors.NewNotFound().SetCause(err)
} else if err == orm.ErrMultiRows {
return errors.NewConflict().SetCause(err)
} else {
return errors.NewInternalServerError().SetCause(err)
}
}
template, err := CreateAppTemplateByApp(*app)
if err != nil {
return errors.NewInternalServerError().SetCause(err)
}
kr := NewKubeAppRes(ar.Client, ar.Cluster, namespace, app.Kind)
if err = kr.UpdateAppResource(app, template.Image(param), nil, false); err != nil {
return errors.NewInternalServerError().SetCause(err)
}
log.Log.Debug(fmt.Sprintf("new image for %s/%s/%s is %s!", ar.Cluster, namespace, appname, app.Image))
if err = ar.Appmodel.UpdateApp(app, true); err != nil {
return errors.NewInternalServerError().SetCause(err)
}
return nil
}
func (ar *AppRes) ScaleApp(namespace, appname string, replicas int) error {
item, err := ar.Appmodel.GetAppByName(ar.Cluster, namespace, appname)
if err != nil {
return err
}
template, err := CreateAppTemplateByApp(*item)
if err != nil {
return err
}
// TODO: need refactor
kr := NewKubeAppRes(ar.Client, ar.Cluster, namespace, item.Kind)
if err := kr.Scale(item.Name, replicas); err != nil {
return err
}
tplStr, err := template.Replicas(replicas).String()
if err != nil {
return err
}
item.Replicas = replicas
item.Template = tplStr
return ar.Appmodel.UpdateApp(item, true)
}
func (ar *AppRes) SetDeployStatus(namespace, appname, status string) error {
return ar.Appmodel.SetDeployStatus(ar.Cluster, namespace, appname, status)
}
func (ar *AppRes) getAppPodList(app *models.CaasApplication, deploymentName string) ([]*AppPod, error) {
podList, err := GetPods(ar.Client, ar.Cluster, app.Namespace, deploymentName, app.Replicas)
if err != nil {
log.Log.Error("Get Pods information failed: " + err.Error())
return nil, err
}
appPodList := []*AppPod{}
for _, item := range podList {
pod := AppPod{
Weight: 0,
}
pod.Pod = *item
averWeight := models.DEFAULT_WEIGHT
if pod.Status == string(apiv1.PodRunning) {
pod.Weight = averWeight
}
appPodList = append(appPodList, &pod)
}
return appPodList, nil
}
func (ar *AppRes) SetLabels(namespace, name string, labels map[string]string) error {
app, err := ar.Appmodel.GetAppByName(ar.Cluster, namespace, name)
if err != nil {
if err == orm.ErrNoRows {
log.Log.Warn(fmt.Sprintf("application(%s/%s/%s) is not existed!", ar.Cluster, namespace, name))
return nil
}
return err
}
if err := validate.ValidateLabels(constant.K8S_RESOURCE_TYPE_APP, labels); err != nil {
return err
}
labelStr, err := json.Marshal(labels)
if err != nil {
return err
}
if string(labelStr) != app.Labels {
return ar.Appmodel.SetLabels(ar.Cluster, namespace, name, string(labelStr))
}
return nil
}
func CreateK8sNamespace(cluster, namespace string) error {
client, err := kube.GetClientset(cluster)
if err != nil {
return err
}
_, err = client.CoreV1().Namespaces().Create(&apiv1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: namespace,
Labels: map[string]string{
"name": namespace,
},
},
})
if err != nil {
return err
}
return nil
}
func CreateRegistrySecret(cluster, namespace string, envID int64) error {
client, err := kube.GetClientset(cluster)
if err != nil {
log.Log.Warning(fmt.Sprintf("create registry secret failed: %v", err.Error()))
return err
}
// TODO: refactor code combine
projectEnv, err := dao.NewProjectModel().GetProjectEnvByID(envID)
if err != nil {
log.Log.Error("when create registry secret get project env by id: %v, error: %s", envID, err.Error())
return err
}
integrateSettingRegistry, err := settings.NewSettingManager().GetIntegrateSettingByID(projectEnv.Registry)
if err != nil {
log.Log.Error("when create registry secret get integrate setting by id: %v, error: %s", projectEnv.Registry, err.Error())
return err
}
var registryAddr, registryUser, registryPassword, registryAuth string
if registryConf, ok := integrateSettingRegistry.Config.(*settings.RegistryConfig); ok {
registryAddr = registryConf.URL
registryPassword = registryConf.Password
registryUser = registryConf.User
registryAuth = base64.StdEncoding.EncodeToString([]byte(fmt.Sprintf("%v:%v", registryConf.User, registryConf.Password)))
} else {
log.Log.Error("parse integrate setting registry config error")
return fmt.Errorf("parse integrate setting registry config error")
}
registrySecretName := fmt.Sprintf("registry-%x", integrateSettingRegistry.ID)
registryInfo := make(map[string]interface{})
registryInfo[registryAddr] = map[string]string{
"username": registryUser,
"password": registryPassword,
"auth": registryAuth,
}
auth, _ := json.Marshal(registryInfo)
registrySec, err := client.CoreV1().Secrets(namespace).Get(registrySecretName, metav1.GetOptions{})
if err != nil {
if !k8serrors.IsNotFound(err) {
return err
}
_, err = client.CoreV1().Secrets(namespace).Create(&apiv1.Secret{
ObjectMeta: metav1.ObjectMeta{
Namespace: namespace,
Name: registrySecretName,
},
Type: apiv1.SecretTypeDockercfg,
Data: map[string][]byte{
".dockercfg": auth,
},
})
} else {
if string(registrySec.Data[".dockercfg"]) == string(auth) {
return nil
}
registrySec.Data = map[string][]byte{".dockercfg": auth}
_, err = client.CoreV1().Secrets(namespace).Update(registrySec)
}
if err != nil {
log.Log.Warning(fmt.Sprintf("set registry secret failed: %v", err.Error()))
}
return err
}
func getKubeResNumber(res string) (int64, error) {
bind := map[string]int64{"ki": 1 / (2 ^ 10), "mi": 1, "gi": (2 ^ 10), "ti": (2 ^ 20), "pi": (2 ^ 30), "ei": (2 ^ 40)}
ints := map[string]int64{"k": 1 / (10 ^ 3), "m": 1, "g": (10 ^ 3), "t": (10 ^ 6), "p": (10 ^ 9), "e": (10 ^ 12)}
//default g
dest := strings.TrimSpace(strings.ToLower(res))
for key, value := range bind {
if strings.HasSuffix(dest, key) {
nb, err := strconv.Atoi((strings.TrimRight(dest, key)))
if err != nil {
return 0, err
}
return int64(nb) * value, nil
}
}
for key, value := range ints {
if strings.HasSuffix(dest, key) {
nb, err := strconv.Atoi((strings.TrimRight(dest, key)))
if err != nil {
return 0, err
}
return int64(nb) * value, nil
}
}
nb, err := strconv.Atoi(dest)
if err != nil {
return 0, err
}
return int64(nb) * (10 ^ 3), nil
}
func getBestNodeIP(pods []*AppPod) string {
if len(pods) > 0 {
return pods[0].NodeIP
}
return ""
}
type AppEvent struct {
EventLevel string `json:"event_level"`
EventObject string `json:"event_object"`
EventType string `json:"event_type"`
EventMessage string `json:"event_message"`
EventTime string `json:"event_time"`
}
func (ar *AppRes) GetAppEvent(namespace, appName string) ([]AppEvent, error) {
appEvents := []AppEvent{}
appResourceName := appName
// TODO: current only support deployment
eventList, err := GetEventList(ar.Client, ar.Cluster, namespace, appResourceName)
if err != nil {
return nil, err
}
for _, ievent := range eventList {
appEvents = append(appEvents, AppEvent{
EventLevel: ievent.EventType,
EventObject: ievent.ObjectName,
EventType: ievent.Reason,
EventMessage: ievent.Message,
EventTime: ievent.LastTimestamp.Format("2006-01-02 15:04:05"),
})
}
return appEvents, nil
}
|
res.Item = appList
|
generate_instruction_default_field.py
|
FIELDS = {
'FADD': 6,
'FSUB': 6,
'FMUL': 6,
'FDIV': 6,
'NUM': 0, 'CHAR': 1, 'HLT': 2,
'SLA': 0, 'SRA': 1, 'SLAX': 2, 'SRAX': 3, 'SLC': 4, 'SRC': 5,
'STJ': 2,
'JMP': 0, 'JSJ': 1, 'JOV': 2, 'JNOV': 3, 'JL': 4, 'JE': 5, 'JG': 6, 'JGE': 7, 'JNE': 8, 'JLE': 9,
'JAN': 0, 'JAZ': 1, 'JAP': 2, 'JANN': 3, 'JANZ': 4, 'JANP': 5,
'J1N': 0, 'J1Z': 1, 'J1P': 2, 'J1NN': 3, 'J1NZ': 4, 'J1NP': 5,
'J2N': 0, 'J2Z': 1, 'J2P': 2, 'J2NN': 3, 'J2NZ': 4, 'J2NP': 5,
'J3N': 0, 'J3Z': 1, 'J3P': 2, 'J3NN': 3, 'J3NZ': 4, 'J3NP': 5,
'J4N': 0, 'J4Z': 1, 'J4P': 2, 'J4NN': 3, 'J4NZ': 4, 'J4NP': 5,
'J5N': 0, 'J5Z': 1, 'J5P': 2, 'J5NN': 3, 'J5NZ': 4, 'J5NP': 5,
'J6N': 0, 'J6Z': 1, 'J6P': 2, 'J6NN': 3, 'J6NZ': 4, 'J6NP': 5,
'JXN': 0, 'JXZ': 1, 'JXP': 2, 'JXNN': 3, 'JXNZ': 4, 'JXNP': 5,
'INCA': 0, 'DECA': 1, 'ENTA': 2, 'ENNA': 3,
'INC1': 0, 'DEC1': 1, 'ENT1': 2, 'ENN1': 3,
'INC2': 0, 'DEC2': 1, 'ENT2': 2, 'ENN2': 3,
'INC3': 0, 'DEC3': 1, 'ENT3': 2, 'ENN3': 3,
'INC4': 0, 'DEC4': 1, 'ENT4': 2, 'ENN4': 3,
'INC5': 0, 'DEC5': 1, 'ENT5': 2, 'ENN5': 3,
'INC6': 0, 'DEC6': 1, 'ENT6': 2, 'ENN6': 3,
'INCX': 0, 'DECX': 1, 'ENTX': 2, 'ENNX': 3,
'FCMP': 6,
}
class
|
(object):
def __init__(self):
self.head = {}
def add_word(self, word, field):
head = self.head
for ch in word:
if ch not in head:
head[ch] = [-1, {}]
if ch == '#':
head[ch][0] = field
head = head[ch][1]
def print_cases(self):
def _print(head, indent, index, field):
if len(head) == 0:
print(' ' * indent + 'return {};'.format(field))
return False
print(' ' * indent + 'switch (charAt({})) {{'.format(index))
for ch in sorted(head.keys()):
print(' ' * indent + 'case \'{}\':'.format(ch))
if _print(head[ch][1], indent + 4, index + 1, head[ch][0]):
print(' ' * (indent + 4) + 'break;')
print(' ' * indent + '}')
return True
_print(self.head, indent=4, index=0, field=-1)
if __name__ == '__main__':
trie = Trie()
for op, field in FIELDS.items():
trie.add_word(op + '#', field)
trie.print_cases()
|
Trie
|
orderedmap.go
|
package orderedmap
import (
"bytes"
"encoding/json"
"fmt"
"reflect"
)
// UnmarshalJSON unmarshals a JSON object into a slice of key/value structs,
// preserving key order.
func UnmarshalJSON(b []byte, i interface{}) error {
xs := reflect.ValueOf(i).Elem()
d := json.NewDecoder(bytes.NewReader(b))
tok, err := d.Token()
if err != nil {
return err
}
if tok != json.Delim('{') {
return fmt.Errorf("unexpected token %v", tok)
}
for {
tok, err = d.Token()
if err != nil {
return err
}
if tok == json.Delim('}') {
break
}
k, ok := tok.(string)
if !ok {
return fmt.Errorf("unexpected token %v", tok)
}
kv := reflect.New(xs.Type().Elem()).Elem()
kv.Field(0).SetString(k)
err = d.Decode(kv.Field(1).Addr().Interface())
if err != nil {
return err
}
xs = reflect.Append(xs, kv)
}
reflect.ValueOf(i).Elem().Set(xs)
return nil
}
// MarshalJSON unmarshals a slice of key/value structs into a JSON object,
// preserving key order.
func MarshalJSON(i interface{}) ([]byte, error) {
if i == nil
|
buf := &bytes.Buffer{}
buf.WriteByte('{')
xs := reflect.ValueOf(i)
for i := 0; i < xs.Len(); i++ {
b, err := json.Marshal(xs.Index(i).Field(0).String())
if err != nil {
return nil, err
}
buf.Write(b)
buf.WriteByte(':')
b, err = json.Marshal(xs.Index(i).Field(1).Interface())
if err != nil {
return nil, err
}
buf.Write(b)
if i < xs.Len()-1 {
buf.WriteByte(',')
}
}
buf.WriteByte('}')
return buf.Bytes(), nil
}
|
{
return []byte("null"), nil
}
|
events_trace_result.py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .events_result_data import EventsResultData
class EventsTraceResult(EventsResultData):
|
"""A trace result.
All required parameters must be populated in order to send to Azure.
:param id: The unique ID for this event.
:type id: str
:param count: Count of the event
:type count: long
:param timestamp: Timestamp of the event
:type timestamp: datetime
:param custom_dimensions: Custom dimensions of the event
:type custom_dimensions:
~azure.applicationinsights.models.EventsResultDataCustomDimensions
:param custom_measurements: Custom measurements of the event
:type custom_measurements:
~azure.applicationinsights.models.EventsResultDataCustomMeasurements
:param operation: Operation info of the event
:type operation: ~azure.applicationinsights.models.EventsOperationInfo
:param session: Session info of the event
:type session: ~azure.applicationinsights.models.EventsSessionInfo
:param user: User info of the event
:type user: ~azure.applicationinsights.models.EventsUserInfo
:param cloud: Cloud info of the event
:type cloud: ~azure.applicationinsights.models.EventsCloudInfo
:param ai: AI info of the event
:type ai: ~azure.applicationinsights.models.EventsAiInfo
:param application: Application info of the event
:type application: ~azure.applicationinsights.models.EventsApplicationInfo
:param client: Client info of the event
:type client: ~azure.applicationinsights.models.EventsClientInfo
:param type: Required. Constant filled by server.
:type type: str
:param trace:
:type trace: ~azure.applicationinsights.models.EventsTraceInfo
"""
_validation = {
'type': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'count': {'key': 'count', 'type': 'long'},
'timestamp': {'key': 'timestamp', 'type': 'iso-8601'},
'custom_dimensions': {'key': 'customDimensions', 'type': 'EventsResultDataCustomDimensions'},
'custom_measurements': {'key': 'customMeasurements', 'type': 'EventsResultDataCustomMeasurements'},
'operation': {'key': 'operation', 'type': 'EventsOperationInfo'},
'session': {'key': 'session', 'type': 'EventsSessionInfo'},
'user': {'key': 'user', 'type': 'EventsUserInfo'},
'cloud': {'key': 'cloud', 'type': 'EventsCloudInfo'},
'ai': {'key': 'ai', 'type': 'EventsAiInfo'},
'application': {'key': 'application', 'type': 'EventsApplicationInfo'},
'client': {'key': 'client', 'type': 'EventsClientInfo'},
'type': {'key': 'type', 'type': 'str'},
'trace': {'key': 'trace', 'type': 'EventsTraceInfo'},
}
def __init__(self, **kwargs):
super(EventsTraceResult, self).__init__(**kwargs)
self.trace = kwargs.get('trace', None)
self.type = 'trace'
|
|
index.js
|
import {CalendarCreator} from "./CalendarCreator.js";
(function (){
let calendar = new CalendarCreator();
calendar.createCalendars(20, -12);
|
})();
|
console.log(calendar);
|
dialogflow_generated_dialogflowcx_v3_deployments_list_deployments_async.py
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for ListDeployments
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-dialogflowcx
# [START dialogflow_generated_dialogflowcx_v3_Deployments_ListDeployments_async]
from google.cloud import dialogflowcx_v3
async def
|
():
# Create a client
client = dialogflowcx_v3.DeploymentsAsyncClient()
# Initialize request argument(s)
request = dialogflowcx_v3.ListDeploymentsRequest(
parent="parent_value",
)
# Make the request
page_result = client.list_deployments(request=request)
async for response in page_result:
print(response)
# [END dialogflow_generated_dialogflowcx_v3_Deployments_ListDeployments_async]
|
sample_list_deployments
|
_parser.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, unused-argument
"""A parser for Relay's text format."""
from __future__ import absolute_import
import sys
from ast import literal_eval
from collections import deque
import tvm
from . import module
from .base import Span, SourceName
from . import expr
from . import ty
from . import op
PYTHON_VERSION = sys.version_info.major
try:
from .grammar.py3.RelayVisitor import RelayVisitor
from .grammar.py3.RelayParser import RelayParser
from .grammar.py3.RelayLexer import RelayLexer
except ImportError:
raise Exception("Couldn't find ANTLR parser. Try building with USE_ANTLR=ON.")
try:
from antlr4 import InputStream, CommonTokenStream
from antlr4.error.ErrorListener import ErrorListener
except ImportError:
raise Exception("Couldn't find ANTLR runtime." +
"Try running `pip{version} install antlr4-python{version}-runtime`."
.format(version=PYTHON_VERSION))
sys.setrecursionlimit(10000)
class ParseError(Exception):
"""Exception type for parse errors."""
def __init__(self, message):
# type: (str) -> None
super(ParseError, self).__init__()
self.message = message
def __repr__(self):
return "ParseError({})".format(self.message)
def __str__(self):
return repr(self)
class OpWrapper:
"""Overload the __call__ for op."""
pass
class ExprOp(OpWrapper):
"""Call an expr. The default, but does not handle attrs well."""
def __init__(self, operator):
self.operator = operator
def __call__(self, args, attrs, type_args):
try:
return expr.Call(self.operator, args, attrs, type_args)
except Exception:
raise Exception("Operator {} is not registered. It's attributes are {}"
.format(self.operator, attrs))
class FuncOp(OpWrapper):
"""Convert the attrs, call the python function with the attrs passed in as keyword arguments.
Tvm should provide this in the future, as this is pretty similar to what op.get is providing.
"""
def __init__(self, operator):
self.operator = operator
def convert(self, v):
if isinstance(v, tuple):
return tuple([self.convert(x) for x in v])
if isinstance(v, expr.Constant):
return v.data.asnumpy().item()
if isinstance(v, str):
return v
raise Exception(v)
def __call__(self, args, attrs, type_args):
if attrs is None:
attrs = {}
x = self.operator(*args, **{k: self.convert(v) for k, v in attrs.items()})
if isinstance(x, expr.TupleWrapper):
x = x.astuple()
return x
BINARY_OPS = {
RelayParser.MUL: op.multiply,
RelayParser.DIV: op.divide,
RelayParser.ADD: op.add,
RelayParser.SUB: op.subtract,
RelayParser.LT: op.less,
RelayParser.GT: op.greater,
RelayParser.LE: op.less_equal,
RelayParser.GE: op.greater_equal,
RelayParser.EQ: op.equal,
RelayParser.NE: op.not_equal,
}
FUNC_OPS = {
"nn.conv2d": op.nn.conv2d,
"nn.batch_norm": op.nn.batch_norm,
"nn.dense": op.nn.dense,
"nn.bias_add": op.nn.bias_add,
"nn.max_pool2d": op.nn.max_pool2d,
"nn.global_max_pool2d": op.nn.global_max_pool2d,
"nn.avg_pool2d": op.nn.avg_pool2d,
"nn.global_avg_pool2d": op.nn.global_avg_pool2d,
"nn.softmax": op.nn.softmax,
"reshape": op.reshape,
"nn.conv2d_transpose": op.nn.conv2d_transpose,
"concatenate": op.concatenate,
"nn.dropout": op.nn.dropout_raw,
"zeros": op.zeros,
"split": op.split,
"cast": op.cast
}
TYPE_PREFIXES = [
"int",
"uint",
"float",
"bool",
]
T = ty.TypeVar("T")
# Scope = Deque[Tuple[str, T]]
# Scopes = Deque[Scope[T]]
def lookup(scopes, name):
# type: (Scopes[T], str) -> Optional[T]
"""Look up `name` in `scopes`."""
for scope in scopes:
for key, val in scope:
if key == name:
return val
return None
def spanify(f):
"""A decorator which attaches span information
to the value returned by calling `f`.
Intended for use with the below AST visiting
methods. The idea is that after we do the work
of constructing the AST we attach Span information.
"""
def _wrapper(*args, **kwargs):
# Assumes 0th arg is self and gets source_name from object.
sn = args[0].source_name
# Assumes 1st arg is an ANTLR parser context.
ctx = args[1]
ast = f(*args, **kwargs)
line, col = ctx.getSourceInterval()
sp = Span(sn, line, col)
if isinstance(ast, tvm.relay.expr.TupleWrapper):
ast = ast.astuple()
ast.set_span(sp)
return ast
return _wrapper
# TODO(@jmp): Use https://stackoverflow.com/q/13889941
# to figure out how to get ANTLR4 to be more unhappy about syntax errors
class ParseTreeToRelayIR(RelayVisitor):
"""Parse Relay text format into Relay IR."""
def __init__(self, source_name):
# type: (str) -> None
self.source_name = source_name
self.module = module.Module({}) # type: module.Module
# Adding an empty scope allows naked lets without pain.
self.var_scopes = deque([deque()]) # type: Scopes[expr.Var]
self.global_var_scope = deque() # type: Scope[expr.GlobalVar]
self.type_param_scopes = deque([deque()]) # type: Scopes[ty.TypeVar]
self.graph_expr = [] # type: List[expr.Expr]
super(ParseTreeToRelayIR, self).__init__()
def enter_var_scope(self):
# type: () -> None
"""Enter a new Var scope so it can be popped off later."""
self.var_scopes.appendleft(deque())
def exit_var_scope(self):
# type: () -> Scope[expr.Var]
"""Pop off the current Var scope and return it."""
return self.var_scopes.popleft()
def mk_var(self, name, type_):
# type: (str, ty.Type) -> expr.Var
"""Create a new Var and add it to the Var scope."""
var = expr.Var(name, type_)
self.var_scopes[0].appendleft((name, var))
return var
def mk_global_var(self, name):
# type: (str) -> expr.GlobalVar
"""Create a new GlobalVar and add it to the GlobalVar scope."""
var = expr.GlobalVar(name)
self.global_var_scope.append((name, var))
return var
def enter_type_param_scope(self):
# type: () -> None
"""Enter a new TypeVar scope so it can be popped off later."""
self.type_param_scopes.appendleft(deque())
def exit_type_param_scope(self):
# type: () -> Scope[ty.TypeVar]
"""Pop off the current TypeVar scope and return it."""
return self.type_param_scopes.popleft()
def mk_typ(self, name, kind):
# (str, ty.Kind) -> ty.TypeVar
"""Create a new TypeVar and add it to the TypeVar scope."""
typ = ty.TypeVar(name, kind)
self.type_param_scopes[0].appendleft((name, typ))
return typ
def visitProjection(self, ctx):
return expr.TupleGetItem(self.visit(ctx.expr()), self.visit(ctx.NAT()))
def visitTerminal(self, node):
# type: (TerminalNode) -> Union[expr.Expr, int, float]
"""Visit lexer tokens that aren't ignored or visited by other functions."""
node_type = node.getSymbol().type
node_text = node.getText()
name = node_text[1:]
# variables
if node_type == RelayLexer.GLOBAL_VAR:
return lookup(deque([self.global_var_scope]), node_text[1:])
if node_type == RelayLexer.LOCAL_VAR:
# Remove the leading '%' and lookup the name.
var = lookup(self.var_scopes, name)
if var is None:
raise ParseError("Couldn't resolve `{}`.".format(name))
return var
if node_type == RelayLexer.GRAPH_VAR:
try:
return self.graph_expr[int(name)]
except IndexError:
raise ParseError("Couldn't resolve `{}`".format(name))
# data types
if node_type == RelayLexer.NAT:
return int(node_text)
if node_type == RelayLexer.FLOAT:
return float(node_text[:-1])
if node_type == RelayLexer.BOOL_LIT:
if node_text == "True":
return True
if node_text == "False":
return False
raise ParseError("Unrecognized BOOL_LIT: `{}`".format(node_text))
if node_type == RelayLexer.QUOTED_STRING:
return literal_eval(node_text)
raise ParseError("todo: `{}`".format(node_text))
def visit_list(self, ctx_list):
# type: (List[ParserRuleContext]) -> List[Any]
""""Visit a list of contexts."""
assert isinstance(ctx_list, list)
return [self.visit(ctx) for ctx in ctx_list]
def getType_(self, ctx):
# type: (Optional[RelayParser.Type_Context]) -> Optional[ty.Type]
"""Return a (possibly None) Relay type."""
if ctx is None:
return None
return self.visit(ctx)
def visitProg(self, ctx):
self.meta = None
if ctx.METADATA():
header, data = str(ctx.METADATA()).split('\n', 1)
assert header == "METADATA:"
self.meta = tvm.load_json(data)
# type: (RelayParser.ProgContext) -> Union[expr.Expr, module.Module]
if ctx.defn():
self.visit_list(ctx.defn())
return self.module
if ctx.expr():
return self.visit(ctx.expr())
return self.module
# Exprs
def visitOpIdent(self, ctx):
# type: (RelayParser.OpIdentContext) -> op.Op
op_name = ctx.CNAME().getText()
if op_name in FUNC_OPS:
return FuncOp(FUNC_OPS[op_name])
return ExprOp(op.get(op_name))
# pass through
def visitParen(self, ctx):
# type: (RelayParser.ParenContext) -> expr.Expr
return self.visit(ctx.expr())
# pass through
def visitBody(self, ctx):
# type: (RelayParser.BodyContext) -> expr.Expr
return self.visit(ctx.expr())
def visitScalarFloat(self, ctx):
# type: (RelayParser.ScalarFloatContext) -> expr.Constant
return expr.const(self.visit(ctx.FLOAT()))
def visitScalarInt(self, ctx):
# type: (RelayParser.ScalarIntContext) -> expr.Constant
return expr.const(self.visit(ctx.NAT()))
def visitScalarBool(self, ctx):
# type: (RelayParser.ScalarBoolContext) -> expr.Constant
return expr.const(self.visit(ctx.BOOL_LIT()))
def visitNeg(self, ctx):
# type: (RelayParser.NegContext) -> Union[expr.Constant, expr.Call]
val = self.visit(ctx.expr())
if isinstance(val, expr.Constant) and val.data.asnumpy().ndim == 0:
# fold Neg in for scalars
return expr.const(-val.data.asnumpy().item())
return op.negative(val)
def visitTuple(self, ctx):
# type: (RelayParser.TupleContext) -> expr.Tuple
tup = self.visit_list(ctx.expr())
return expr.Tuple(tup)
def visitLet(self, ctx):
# type: (RelayParser.SeqContext) -> expr.Let
"""Desugar various sequence constructs to Relay Let nodes."""
if ctx.var() is None:
# anonymous identity
ident = "_"
type_ = None
var = self.mk_var(ident, type_)
else:
var = self.visitVar(ctx.var())
self.enter_var_scope()
value = self.visit(ctx.expr(0))
self.exit_var_scope()
body = self.visit(ctx.expr(1))
return expr.Let(var, value, body)
def visitBinOp(self, ctx):
# type: (RelayParser.BinOpContext) -> expr.Call
"""Desugar binary operators."""
arg0, arg1 = self.visit_list(ctx.expr())
relay_op = BINARY_OPS.get(ctx.op.type)
if relay_op is None:
raise ParseError("Unimplemented binary op.")
return relay_op(arg0, arg1)
@spanify
def visitVar(self, ctx):
# type: (RelayParser.VarContext) -> expr.Var
"""Visit a single variable."""
ident = ctx.LOCAL_VAR()
if ident is None:
raise ParseError("Only local ids may be used in vars.")
type_ = self.getType_(ctx.type_())
return self.mk_var(ident.getText()[1:], type_)
def visitVarList(self, ctx):
# type: (RelayParser.VarListContext) -> List[expr.Var]
return self.visit_list(ctx.var())
# TODO: support a larger class of values than just Relay exprs
def visitAttr(self, ctx):
# type: (RelayParser.AttrContext) -> Tuple[str, expr.Expr]
return (ctx.CNAME().getText(), self.visit(ctx.expr()))
def visitArgNoAttr(self, ctx):
return (self.visit_list(ctx.varList().var()), None)
def visitAttrSeq(self, ctx):
# type: (RelayParser.AttrListContext) -> Dict[str, expr.Expr]
return dict(self.visit_list(ctx.attr()))
def visitArgWithAttr(self, ctx):
return (self.visit_list(ctx.var()), self.visitAttrSeq(ctx.attrSeq()))
def visitArgList(self,
ctx # type: RelayParser.ArgListContext
):
# type: (...) -> Tuple[Optional[List[expr.Var]], Optional[Dict[str, expr.Expr]]]
var_list = self.visit(ctx.varList()) if ctx.varList() else None
attr_list = self.visit(ctx.attrList()) if ctx.attrList() else None
return (var_list, attr_list)
def visitMeta(self, ctx):
type_key = str(ctx.CNAME())
index = int(self.visit(ctx.NAT()))
return self.meta[type_key][index]
def mk_func(self, ctx):
# type: (Union[RelayParser.FuncContext, RelayParser.DefnContext]) -> expr.Function
"""Construct a function from either a Func or Defn."""
# Enter var scope early to put params in scope.
self.enter_var_scope()
# Capture type params in params.
self.enter_type_param_scope()
type_params = ctx.typeParamList()
if type_params is not None:
type_params = type_params.ident()
assert type_params
for ty_param in type_params:
name = ty_param.getText()
self.mk_typ(name, ty.Kind.Type)
var_list, attr_list = self.visit(ctx.argList())
if var_list is None:
var_list = []
ret_type = self.getType_(ctx.type_())
body = self.visit(ctx.body())
# NB(@jroesch): you must stay in the type parameter scope until
# after you exit the body, you can reference the type parameters
# of your parent scopes.
type_params = list(self.exit_type_param_scope())
if type_params:
_, type_params = zip(*type_params)
self.exit_var_scope()
attrs = tvm.make.node("DictAttrs", **attr_list) if attr_list is not None else None
return expr.Function(var_list, body, ret_type, type_params, attrs)
@spanify
def visitFunc(self, ctx):
# type: (RelayParser.FuncContext) -> expr.Function
return self.mk_func(ctx)
# TODO: how to set spans for definitions?
# @spanify
def visitDefn(self, ctx):
# type: (RelayParser.DefnContext) -> None
ident = ctx.ident().GLOBAL_VAR()
if ident is None:
raise ParseError("Only global ids may be used in `def`s.")
ident_name = ident.getText()[1:]
ident = self.mk_global_var(ident_name)
self.module[ident] = self.mk_func(ctx)
def visitCallNoAttr(self, ctx):
return (self.visit_list(ctx.exprList().expr()), None)
def visitCallWithAttr(self, ctx):
return (self.visit_list(ctx.expr()), self.visit(ctx.attrSeq()))
def call(self, func, args, attrs, type_args):
if isinstance(func, OpWrapper):
return func(args, attrs, type_args)
return expr.Call(func, args, attrs, type_args)
@spanify
def visitCall(self, ctx):
# type: (RelayParser.CallContext) -> expr.Call
func = self.visit(ctx.expr())
args, attrs = self.visit(ctx.callList())
return self.call(func, args, attrs, [])
@spanify
def visitIfElse(self, ctx):
# type: (RelayParser.IfElseContext) -> expr.If
"""Construct a Relay If node. Creates a new scope for each branch."""
cond = self.visit(ctx.expr())
self.enter_var_scope()
true_branch = self.visit(ctx.body(0))
self.exit_var_scope()
self.enter_var_scope()
false_branch = self.visit(ctx.body(1))
self.exit_var_scope()
return expr.If(cond, true_branch, false_branch)
@spanify
def visitGraph(self, ctx):
# type: (RelayParser.GraphContext) -> expr.Expr
"""Visit a graph variable assignment."""
graph_nid = int(ctx.GRAPH_VAR().getText()[1:])
self.enter_var_scope()
value = self.visit(ctx.expr(0))
self.exit_var_scope()
if graph_nid != len(self.graph_expr):
raise ParseError(
"Expected new graph variable to be `%{}`,".format(len(self.graph_expr)) + \
"but got `%{}`".format(graph_nid))
self.graph_expr.append(value)
kont = self.visit(ctx.expr(1))
return kont
# Types
# pylint: disable=unused-argument
def visitIncompleteType(self, ctx):
# type (RelayParser.IncompleteTypeContext) -> None:
return None
def visitTypeIdent(self, ctx):
# type: (RelayParser.TypeIdentContext) -> Union[ty.TensorType, str]
'''
Handle type identifier.
'''
type_ident = ctx.CNAME().getText()
# Look through all type prefixes for a match
for type_prefix in TYPE_PREFIXES:
if type_ident.startswith(type_prefix):
return ty.scalar_type(type_ident)
type_param = lookup(self.type_param_scopes, type_ident)
if type_param is not None:
return type_param
raise ParseError("Unknown builtin type: {}".format(type_ident))
# def visitCallType(self, ctx):
# # type: (RelayParser.CallTypeContext) -> Union[expr.Expr, ty.TensorType]
# ident_type = ctx.identType().CNAME().getText()
# args = self.visit_list(ctx.type_())
# if not args:
# raise ParseError("Type-level functions must have arguments!")
# func_type = TYPE_FUNCS.get(ident_type)(args)
# if func_type is None:
# raise ParseError("Unknown type-level function: `{}`".format(ident_type))
|
# type: (RelayParser.ParensShapeContext) -> int
return self.visit(ctx.shape())
def visitShapeList(self, ctx):
# type: (RelayParser.ShapeListContext) -> List[int]
return self.visit_list(ctx.shape())
def visitTensor(self, ctx):
return tuple(self.visit_list(ctx.expr()))
def visitTensorType(self, ctx):
# type: (RelayParser.TensorTypeContext) -> ty.TensorType
"""Create a simple tensor type. No generics."""
shape = self.visit(ctx.shapeList())
dtype = self.visit(ctx.type_())
if not isinstance(dtype, ty.TensorType):
raise ParseError("Expected dtype to be a Relay base type.")
dtype = dtype.dtype
return ty.TensorType(shape, dtype)
def visitTupleType(self, ctx):
# type: (RelayParser.TupleTypeContext) -> ty.TupleType
return ty.TupleType(self.visit_list(ctx.type_()))
def visitFuncType(self, ctx):
# type: (RelayParser.FuncTypeContext) -> ty.FuncType
types = self.visit_list(ctx.type_())
arg_types = types[:-1]
ret_type = types[-1]
return ty.FuncType(arg_types, ret_type, [], None)
def make_parser(data):
# type: (str) -> RelayParser
"""Construct a RelayParser a given data stream."""
input_stream = InputStream(data)
lexer = RelayLexer(input_stream)
lexer.addErrorListener(StrictErrorListener(data))
token_stream = CommonTokenStream(lexer)
p = RelayParser(token_stream)
p.addErrorListener(StrictErrorListener(data))
return p
__source_name_counter__ = 0
class StrictErrorListener(ErrorListener):
"""This ErrorListener fail eagerly on all error, and report the program."""
def __init__(self, text):
self.text = text
def syntaxError(self, recognizer, offendingSymbol, line, column, msg, e):
raise Exception("Syntax Error in:\n" + self.text)
def reportAmbiguity(self, recognizer, dfa, startIndex, stopIndex, exact, ambigAlts, configs):
raise Exception("Ambiguity Error in:\n" + self.text)
def reportAttemptingFullContext(self,
recognizer,
dfa,
startIndex,
stopIndex,
conflictingAlts,
configs):
raise Exception("Attempting Full Context in:\n" + self.text)
def reportContextSensitivity(self, recognizer, dfa, startIndex, stopIndex, prediction, configs):
raise Exception("Context Sensitivity in:\n" + self.text)
def fromtext(data, source_name=None):
# type: (str, str) -> Union[expr.Expr, module.Module]
"""Parse a Relay program."""
if data == "":
raise ParseError("Cannot parse the empty string.")
global __source_name_counter__
if source_name is None:
source_name = "source_file{0}".format(__source_name_counter__)
if isinstance(source_name, str):
source_name = SourceName(source_name)
tree = make_parser(data).prog()
return ParseTreeToRelayIR(source_name).visit(tree)
|
# else:
# return func_type
def visitParensShape(self, ctx):
|
sortPosts.ts
|
import dayjs from 'dayjs';
import { Post, SortedPosts } from '../types/types';
/**
* Sort given posts by weekday and hour
* @param {Post[]} posts
* @returns {SortedPosts}
*/
const sortPosts = (posts: Post[]): SortedPosts => {
// Create an array for sorted posts
const sorted: SortedPosts = new Array<number>(7).fill(0).map(
() => new Array<number>(24).fill(0).map(() => [] as Post[]),
);
// Iterate posts and sort by weekday and hour
posts.forEach((post, index) => {
const time = dayjs.unix(posts[index].created_utc);
const weekday = time.day();
const hour = time.hour();
sorted[weekday][hour].push(post);
});
return sorted;
};
|
export default sortPosts;
| |
ui.tree_view.search.js
|
import $ from '../../core/renderer';
import registerComponent from '../../core/component_registrator';
import searchBoxMixin from '../widget/ui.search_box_mixin';
|
// STYLE treeView
const WIDGET_CLASS = 'dx-treeview';
const NODE_CONTAINER_CLASS = `${WIDGET_CLASS}-node-container`;
const TreeViewSearch = TreeViewBase.inherit(searchBoxMixin).inherit({
_addWidgetPrefix: function(className) {
return `${WIDGET_CLASS}-${className}`;
},
_optionChanged: function(args) {
switch(args.name) {
case 'searchValue':
if(this._showCheckboxes() && this._isRecursiveSelection()) {
this._removeSelection();
}
this._initDataAdapter();
this._updateSearch();
this._repaintContainer();
this.option('focusedElement', null);
break;
case 'searchExpr':
this._initDataAdapter();
this.repaint();
break;
case 'searchMode':
this.option('expandNodesRecursive') ? this._updateDataAdapter() : this._initDataAdapter();
this.repaint();
break;
default:
this.callBase(args);
}
},
_updateDataAdapter: function() {
this._setOptionWithoutOptionChange('expandNodesRecursive', false);
this._initDataAdapter();
this._setOptionWithoutOptionChange('expandNodesRecursive', true);
},
_getDataAdapterOptions: function() {
return extend(this.callBase(), {
searchValue: this.option('searchValue'),
searchMode: this.option('searchMode') || 'contains',
searchExpr: this.option('searchExpr')
});
},
_updateSearch: function() {
if(this._searchEditor) {
const editorOptions = this._getSearchEditorOptions();
this._searchEditor.option(editorOptions);
}
},
_repaintContainer: function() {
const $container = this.$element().find(`.${NODE_CONTAINER_CLASS}`).first();
let rootNodes;
if($container.length) {
$container.empty();
rootNodes = this._dataAdapter.getRootNodes();
this._renderEmptyMessage(rootNodes);
this._renderItems($container, rootNodes);
this._fireContentReadyAction();
}
},
_itemContainer: function(isSearchMode) {
if(this._scrollable && isSearchMode) {
return $(this._scrollable.content());
}
return this.callBase();
},
_addWidgetClass: function() {
this.$element().addClass(this._widgetClass());
},
_clean: function() {
this.callBase();
this._removeSearchBox();
}
});
registerComponent('dxTreeView', TreeViewSearch);
export default TreeViewSearch;
|
import { extend } from '../../core/utils/extend';
import TreeViewBase from './ui.tree_view.base';
|
00.24.js
|
macDetailCallback("8cba25000000/24",[{"d":"2019-09-05","t":"add","s":"ieee-oui.csv","a":"No.5,Huitai Road,Huinan High-Tech Park,Huiao Highway Huizhou Guangdong CN 516025","c":"CN","o":"UNIONMAN TECHNOLOGY CO.,LTD"}]);
|
||
forms.py
|
from django import forms
from django.contrib.auth.models import User
from newsletter.models import Report
from newsletter.models import SiteUser
from django.contrib.auth.models import Group, Permission
class ReportForm(forms.ModelForm):
COUNTRIES = (('US', 'United States'),
('CA', 'Canada'),
('GB', 'Great Britain'),
|
GROUPS_CHOICE = ()
def __init__(self, *args, **kwargs):
super(ReportForm, self).__init__(*args, **kwargs)
self.GROUPS_CHOICE = self.GROUPS_CHOICE + (('None', 'None'),)
for group in Group.objects.all():
self.GROUPS_CHOICE = self.GROUPS_CHOICE + ((group.name, group.name),)
self.fields['group'].choices = self.GROUPS_CHOICE
company_name = forms.CharField(required=True, label="Enter Company Name")
ceo_name = forms.CharField(required=True, label="Enter CEO Name")
industry = forms.CharField(required=True, label="Enter Industry")
company_Phone = forms.CharField(required=True, label="Enter Company Phone Number")
company_location = forms.CharField(required=True, label="Enter Company Location")
company_email = forms.EmailField(required=True, label="Enter Company Email")
company_country = forms.ChoiceField(required=True, choices=COUNTRIES, label="Enter Company Country")
sector = forms.CharField(required=True, label="Enter Company Sector")
group = forms.ChoiceField(label="Which Group Should This Report Be Associated With?", required=True,
choices=GROUPS_CHOICE)
is_private = forms.ChoiceField(label="Is This Private?", required=True, choices=OPTIONS)
projects = forms.CharField(required=True, label="Enter Project Name(s) Separate by Commas")
#is_encrypted = forms.ChoiceField(label="Is The File Encrypted?", required=True, choices=OPTIONS)
class Meta:
model = Report
fields = ("projects", "industry", "company_name", "sector", "ceo_name", "company_Phone",
"company_email", "company_location", "company_country", "is_private") # , #"content", "is_encrypted")
class FileAddForm(forms.Form):
YESNO = (('Y', 'Yes'),
('N', 'No'))
content = forms.FileField(label="Upload a file here",
widget=forms.FileInput(attrs={'multiple': False, 'type': 'file', 'class': 'button'}),
required=False)
encrypted = forms.ChoiceField(label="Is The File Encrypted?", required=True, choices=YESNO)
class Meta:
model = Report
fields = ("content", "encrypted")
|
('MX', 'Mexico'),)
OPTIONS = (('Y', 'Yes'),
('N', 'No'))
|
progressive_iile_render.py
|
import os
import subprocess
import time
# =============================================================================
# Constants and settings
# Each has:
# - filepath
# - directSpp
inputFiles = [
# ["/home/gj/git/pbrt-v3-scenes/white-room/whiteroom-daytime.pbrt", 16],
["/home/gj/git/pbrt-v3-scenes-extra/veach-ajar/scene.pbrt", 2],
# ["/home/gj/git/pbrt-v3-custom-scenes/mbed1/scene.pbrt", 64]
]
outputDir = "/home/gj/git/pbrt-v3-IISPT/tmpiile"
maxSpp = 256
# =============================================================================
# Directories configuration
toolsDir = os.path.abspath(os.path.dirname(__file__))
rootDir = os.path.dirname(toolsDir)
binDir = os.path.join(rootDir, "bin")
pbrtPath = os.path.join(binDir, "pbrt")
# =============================================================================
# Function definitions
def runProcess(cmd):
print(">>> {}".format(cmd))
|
subprocess.call(cmd, shell=False)
def processFileAtQuality(fdata, spp):
fpath, directSpp = fdata
# Generate output file name
fdir = os.path.dirname(fpath)
sceneName = os.path.basename(fdir)
outFileName = "{}_{}.pfm".format(sceneName, spp)
outFilePath = os.path.join(outputDir, outFileName)
statFileName = "{}_{}.txt".format(sceneName, spp)
statFilePath = os.path.join(outputDir, statFileName)
# Skip if already processed
if os.path.exists(statFilePath):
return
# Change working directory
os.chdir(fdir)
# Start timer
timeStart = time.time()
# Start process
cmd = []
cmd.append(pbrtPath)
cmd.append(fpath)
cmd.append(outFilePath)
cmd.append("--iileIndirect={}".format(spp))
cmd.append("--iileDirect={}".format(directSpp))
runProcess(cmd)
# End timer
timeEnd = time.time()
secondsElapsed = timeEnd - timeStart
secondsElapsed = int(secondsElapsed)
# Record on file
statFile = open(statFilePath, "w")
statFile.write("{}\n".format(secondsElapsed))
statFile.close()
def processFile(fdata):
spp = 0
while spp <= maxSpp:
processFileAtQuality(fdata, spp)
if spp == 0:
spp = 1
else:
spp *= 2
def main():
for fdata in inputFiles:
processFile(fdata)
# =============================================================================
# Main
main()
| |
float.go
|
package main
func
|
() {
// float to int
for _, f32 := range []float32{123.456, -123.456} {
println(int8(f32))
println(int16(f32))
println(int32(f32))
println(int64(f32))
println(uint8(f32))
println(uint16(f32))
println(uint32(f32))
println(uint64(f32))
}
for _, f64 := range []float64{123.456, -123.456} {
println(int8(f64))
println(int16(f64))
println(int32(f64))
println(int64(f64))
println(uint8(f64))
println(uint16(f64))
println(uint32(f64))
println(uint64(f64))
}
// int to float
var i8 int8 = 123
println(float32(i8))
println(float64(i8))
var ui8 uint8 = 123
println(float32(ui8))
println(float64(ui8))
var i16 int32 = 12345
println(float32(i16))
println(float64(i16))
var ui16 uint32 = 12345
println(float32(ui16))
println(float64(ui16))
var i32 int32 = 123456
println(float32(i32))
println(float64(i32))
var ui32 uint32 = 123456
println(float32(ui32))
println(float64(ui32))
var i64 int64 = 12345678910
println(float32(i64))
println(float64(i64))
var ui64 uint64 = 12345678910
println(float32(ui64))
println(float64(ui64))
}
|
main
|
clock.py
|
# Simple analogue clock in Python 3
import turtle
import time
wndw = turtle.Screen()
wndw.bgcolor("black")
wndw.setup(width=600, height=600)
wndw.title("Analogue Clock")
wndw.tracer(0)
# Create the drawing pen
pen = turtle.Turtle()
pen.hideturtle()
pen.speed(0)
pen.pensize(3)
def
|
(hr, mn, sec, pen):
# Draw clock face
pen.up()
pen.goto(0, 210)
pen.setheading(180)
pen.color("green")
pen.pendown()
pen.circle(210)
# Draw hour hashes
pen.up()
pen.goto(0, 0)
pen.setheading(90)
for _ in range(12):
pen.fd(190)
pen.pendown()
pen.fd(20)
pen.penup()
pen.goto(0, 0)
pen.rt(30)
# Draw the hands
# Each tuple in list hands describes the color, the length
# and the divisor for the angle
hands = [("white", 80, 12), ("blue", 150, 60), ("red", 110, 60)]
time_set = (hr, mn, sec)
for hand in hands:
time_part = time_set[hands.index(hand)]
angle = (time_part/hand[2])*360
pen.penup()
pen.goto(0, 0)
pen.color(hand[0])
pen.setheading(90)
pen.rt(angle)
pen.pendown()
pen.fd(hand[1])
while True:
hr = int(time.strftime("%I"))
mn = int(time.strftime("%M"))
sec = int(time.strftime("%S"))
draw_clock(hr, mn, sec, pen)
wndw.update()
time.sleep(1)
pen.clear()
wndw.mainloop()
|
draw_clock
|
HarvestMoon.tsx
|
import {Trans} from '@lingui/react'
import {DataLink} from 'components/ui/DbLink'
import {Event} from 'event'
import {Analyser} from 'parser/core/Analyser'
import {filter} from 'parser/core/filter'
import {dependency} from 'parser/core/Injectable'
import Checklist, {Requirement, Rule} from 'parser/core/modules/Checklist'
import {Data} from 'parser/core/modules/Data'
import {Invulnerability} from 'parser/core/modules/Invulnerability'
import {UnableToAct} from 'parser/core/modules/UnableToAct'
import React from 'react'
const SOULSOW_BUFFER = 1000
export class
|
extends Analyser {
static override handle = 'harvestMoon'
@dependency private checklist!: Checklist
@dependency private data!: Data
@dependency private invulnerability!: Invulnerability
@dependency private unableToAct!: UnableToAct
private moons = 0
override initialise() {
super.initialise()
this.addEventHook(
filter<Event>()
.source(this.parser.actor.id)
.type('action')
.action(this.data.actions.HARVEST_MOON.id),
() => this.moons++
)
this.addEventHook('complete', this.onComplete)
}
private getExpectedUses(): number {
const ADJUSTED_CAST = this.data.actions.SOULSOW.castTime + SOULSOW_BUFFER
const invulnWindows = this.invulnerability.getWindows().filter((window) => window.end - window.start >= ADJUSTED_CAST)
if (this.unableToAct.getDuration({start: this.parser.pull.timestamp, end: this.parser.pull.timestamp + this.parser.pull.duration}) > 0) {
return invulnWindows.filter(window => this.canChargeMoon(window)).length + 1
}
return invulnWindows.length + 1
}
private canChargeMoon(inputWindow: {start: number, end: number}): boolean {
const ADJUSTED_CAST_TIME = this.data.actions.SOULSOW.castTime + SOULSOW_BUFFER
// Find the earliest unable to act window that falls within the input window
const unableToActWindow = this.unableToAct.getWindows(inputWindow)[0]
// If there are no unable to act windows left before the end of the input window, this UTA will be undefined
// Check if our input window is long enough
if (unableToActWindow == null) {
return inputWindow.end - inputWindow.start >= ADJUSTED_CAST_TIME
}
// Even if the input window was too short, check if there's a large enough time between the input window and UTA
if (unableToActWindow.start - inputWindow.start >= ADJUSTED_CAST_TIME) {
return true
}
// Recurse the method, shrinking the window to the space between the end of the unable to act window and the end of the input window
return this.canChargeMoon({start: unableToActWindow.end, end: inputWindow.end})
}
private onComplete() {
const expectedUses = this.getExpectedUses()
const harvestsUsedPercent = this.getUsedPercentage(expectedUses)
this.checklist.add(new Rule({
name: <Trans id="rpr.harvestmoon.checklist.title">
Use <DataLink action="HARVEST_MOON" />
</Trans>,
description: <Trans id="rpr.harvestmoon.checklist.description">
<DataLink action="HARVEST_MOON"/> is one of your highest damage abilities.
Aim to use it before the end of the fight, and before any downtime long enough to recharge it.
</Trans>,
requirements: [
new Requirement({
name: <Trans id="rpr.harvestmoon.checklist.requirement.name">
<DataLink action="HARVEST_MOON" /> uses
</Trans>,
overrideDisplay: `${this.moons} / ${expectedUses} (${harvestsUsedPercent}%)`,
percent: harvestsUsedPercent,
}),
],
target: 100,
}))
}
private getUsedPercentage(expected: number): string {
return (this.moons / expected * 100).toFixed(2)
}
}
|
HarvestMoon
|
tests.rs
|
use super::*;
use linkerd_app_core::{
dns, errors::HttpError, identity as id, profiles, proxy::http, svc::NewService, tls, Error,
NameAddr, NameMatch,
};
use linkerd_app_test as support;
use std::str::FromStr;
use tower::util::ServiceExt;
use tower_test::mock;
#[tokio::test]
async fn gateway() {
assert_eq!(
Test::default().run().await.unwrap().status(),
http::StatusCode::NO_CONTENT
);
}
#[tokio::test]
async fn bad_domain() {
let test = Test {
suffix: "bad.example.com",
..Default::default()
};
let status = test
.run()
.await
.unwrap_err()
.downcast_ref::<HttpError>()
.unwrap()
.status();
assert_eq!(status, http::StatusCode::NOT_FOUND);
}
#[tokio::test]
async fn no_identity() {
let test = Test {
client_id: None,
..Default::default()
};
let status = test
.run()
.await
.unwrap_err()
.downcast_ref::<HttpError>()
.unwrap()
.status();
assert_eq!(status, http::StatusCode::FORBIDDEN);
}
#[tokio::test]
async fn forward_loop() {
let test = Test {
orig_fwd: Some(
"by=gateway.id.test;for=client.id.test;host=dst.test.example.com:4321;proto=https",
),
..Default::default()
};
let status = test
.run()
.await
.unwrap_err()
.downcast_ref::<HttpError>()
.unwrap()
.status();
assert_eq!(status, http::StatusCode::LOOP_DETECTED);
}
struct Test {
suffix: &'static str,
target: NameAddr,
client_id: Option<tls::ClientId>,
orig_fwd: Option<&'static str>,
}
impl Default for Test {
fn default() -> Self {
Self {
suffix: "test.example.com",
target: NameAddr::from_str("dst.test.example.com:4321").unwrap(),
client_id: Some(tls::ClientId::from_str("client.id.test").unwrap()),
orig_fwd: None,
}
}
}
impl Test {
async fn run(self) -> Result<http::Response<http::BoxBody>, Error> {
let Self {
suffix,
target,
client_id,
orig_fwd,
} = self;
let (outbound, mut handle) =
mock::pair::<http::Request<http::BoxBody>, http::Response<http::BoxBody>>();
let new = NewGateway::new(
|
move |_: outbound::http::Logical| outbound.clone(),
Some(tls::LocalId(id::Name::from_str("gateway.id.test").unwrap())),
);
let allow = NameMatch::new(Some(dns::Suffix::from_str(suffix).unwrap()));
let profile = if allow.matches(target.name()) {
Some(support::profile::only(profiles::Profile {
name: Some(target.name().clone()),
..profiles::Profile::default()
}))
} else {
None
};
let t = HttpTarget {
target: target.clone(),
version: http::Version::Http1,
};
let gateway = svc::stack(new)
.check_new_service::<gateway::Target, http::Request<http::BoxBody>>()
.new_service((profile, t));
let bg = tokio::spawn(async move {
handle.allow(1);
let (req, rsp) = handle.next_request().await.unwrap();
assert_eq!(
req.headers().get(http::header::FORWARDED).unwrap(),
"by=gateway.id.test;for=client.id.test;host=dst.test.example.com:4321;proto=https"
);
rsp.send_response(
http::Response::builder()
.status(http::StatusCode::NO_CONTENT)
.body(Default::default())
.unwrap(),
);
});
let req = http::Request::builder().uri(format!("http://{}", target));
let mut req = orig_fwd
.into_iter()
.fold(req, |req, fwd| req.header(http::header::FORWARDED, fwd))
.body(Default::default())
.unwrap();
if let Some(id) = client_id {
req.extensions_mut().insert(id);
}
let rsp = gateway.oneshot(req).await?;
bg.await?;
Ok(rsp)
}
}
| |
schedule.rs
|
use crate::ComponentTypeId;
use hibitset::{BitSet, BitSetLike};
use std::collections::{HashMap, HashSet};
trait Accessor {
fn reads(&self) -> &[ComponentTypeId];
fn writes(&self) -> &[ComponentTypeId];
}
trait Barrier: Ord + std::hash::Hash + Clone + std::fmt::Debug {}
trait JobDescriptor: std::fmt::Debug {
type Accessor: Accessor;
type Barrier: Barrier;
fn accessor(&self) -> &Self::Accessor;
fn run_after(&self) -> Option<Self::Barrier>;
fn finish_before(&self) -> Option<Self::Barrier>;
}
#[derive(Debug)]
enum Node<'a, J: JobDescriptor> {
Job(&'a J),
Barrier(J::Barrier),
Root,
}
#[derive(Debug)]
struct DispatchState<'a, J: JobDescriptor> {
sorted_jobs: Vec<&'a Node<'a, J>>,
jobs_completed: BitSet,
jobs_scheduled: BitSet,
job_deps: Vec<BitSet>,
}
enum ScheduleResult<'a, J: JobDescriptor> {
Schedule(&'a Node<'a, J>, usize),
WaitingForJob,
Done,
}
impl<'a, J: JobDescriptor> DispatchState<'a, J> {
pub fn next_job(&mut self) -> ScheduleResult<'a, J> {
println!("scheduling with completed {:#?}", self.jobs_completed);
let mut waiting = false;
for i in 0..self.sorted_jobs.len() {
if self.jobs_scheduled.contains(i as u32) == false {
waiting = true;
let deps = &self.job_deps[i];
// first AND between deps and jobs_completed to retain only the relevant bits,
// then XOR between deps and the result to check if there's a difference
if (deps ^ (deps & &self.jobs_completed))
.iter()
.next()
.is_none()
// .is_empty() is buggy for BitSetXOR/BitSetAnd
{
self.jobs_scheduled.add(i as u32);
return ScheduleResult::Schedule(self.sorted_jobs[i], i);
}
}
}
if waiting {
ScheduleResult::WaitingForJob
} else {
ScheduleResult::Done
}
}
pub fn complete_job(&mut self, job_idx: usize) {
self.jobs_completed.add(job_idx as u32);
}
pub fn reset(&mut self) {
self.jobs_completed.clear();
self.jobs_scheduled.clear();
}
}
type JobGraph<'a, J> = petgraph::graph::Graph<Node<'a, J>, ()>;
fn build_dispatch_state<'a, T: JobDescriptor>(graph: &'a JobGraph<'a, T>) -> DispatchState<'a, T> {
use petgraph::visit::EdgeRef;
// topologically sort graph to optimize iteration for unscheduled jobs
let mut sorted_nodes =
petgraph::algo::toposort(&graph, None).expect("failed to sort job graph");
sorted_nodes.reverse();
// extract a bitset for each node that defines their dependencies in terms of indices into sorted_nodes
let job_deps = sorted_nodes
.iter()
.map(|n| {
let dep_indices = graph
.edges_directed(*n, petgraph::Direction::Outgoing)
.filter_map(|e| sorted_nodes.iter().position(|n| *n == e.target()));
let mut bitset = BitSet::new();
for idx in dep_indices {
bitset.add(idx as u32);
}
bitset
})
.collect();
let sorted_jobs: Vec<_> = sorted_nodes.into_iter().map(|n| &graph[n]).collect();
DispatchState {
jobs_completed: BitSet::with_capacity(sorted_jobs.len() as u32),
jobs_scheduled: BitSet::with_capacity(sorted_jobs.len() as u32),
sorted_jobs,
job_deps,
}
}
fn generate_job_graph<'a, T: JobDescriptor>(jobs: &'a [T]) -> JobGraph<'a, T> {
// ensure job barrier relationships make sense
for j in jobs {
if let Some(a) = j.run_after() {
if let Some(b) = j.finish_before() {
assert!(
a < b,
"Invalid job ordering: finish_before is before run_after for job {:?}",
j
);
}
}
}
// find all used barriers and sort them
let mut barriers = HashSet::new();
for j in jobs {
if let Some(b) = j.run_after() {
barriers.insert(b);
}
if let Some(b) = j.finish_before() {
barriers.insert(b);
}
}
let mut barriers: Vec<T::Barrier> = barriers.into_iter().collect();
barriers.sort();
// sort jobs by barrier order using a stable sort to retain registration order
let mut sorted_jobs: Vec<&T> = jobs.iter().collect();
sorted_jobs.sort_by(|x, y| {
use std::cmp::Ordering;
let x_first = x.run_after().or_else(|| x.finish_before());
let y_first = y.run_after().or_else(|| y.finish_before());
if x_first.is_none() && y_first.is_some() {
Ordering::Less
} else if x_first.is_some() && y_first.is_none() {
Ordering::Greater
} else if x_first.is_none() && y_first.is_none() {
Ordering::Equal
} else {
x_first.unwrap().cmp(&y_first.unwrap())
}
});
let mut g = JobGraph::<T>::new();
let root_node = g.add_node(Node::Root);
// Create nodes for barriers and connect them
let mut barrier_nodes = HashMap::new();
barrier_nodes.insert(None, root_node);
let mut prev_node = root_node;
for b in barriers {
let node = g.add_node(Node::Barrier(b.clone()));
barrier_nodes.insert(Some(b), node);
g.add_edge(node, prev_node, ());
prev_node = node;
}
// Create nodes for jobs and create edges for resource modifications
let mut last_mutated: HashMap<ComponentTypeId, petgraph::graph::NodeIndex> = HashMap::new();
for j in sorted_jobs {
let job_node = g.add_node(Node::Job(j));
g.add_edge(job_node, barrier_nodes[&j.run_after()], ());
if j.finish_before().is_some() {
g.add_edge(barrier_nodes[&j.finish_before()], job_node, ());
}
let accessor = j.accessor();
for read in accessor.reads() {
if let Some(n) = last_mutated.get(read) {
g.add_edge(job_node, *n, ());
}
}
for write in accessor.writes() {
if let Some(n) = last_mutated.get(write) {
g.add_edge(job_node, *n, ());
}
last_mutated.insert(*write, job_node);
}
}
g
}
#[cfg(test)]
mod tests {
use super::*;
#[derive(Debug, Clone)]
struct TestAccessor {
reads: Vec<ComponentTypeId>,
writes: Vec<ComponentTypeId>,
}
impl Accessor for TestAccessor {
fn reads(&self) -> &[ComponentTypeId] {
&self.reads
}
fn writes(&self) -> &[ComponentTypeId] {
&self.writes
}
}
type TestBarrier = u32;
impl Barrier for TestBarrier {}
#[derive(Debug)]
struct TestJob {
id: u32,
accessor: TestAccessor,
run_after: Option<TestBarrier>,
finish_before: Option<TestBarrier>,
}
impl JobDescriptor for TestJob {
type Accessor = TestAccessor;
type Barrier = TestBarrier;
fn accessor(&self) -> &Self::Accessor {
&self.accessor
}
fn
|
(&self) -> Option<Self::Barrier> {
self.run_after
}
fn finish_before(&self) -> Option<Self::Barrier> {
self.finish_before
}
}
fn type_id(id: u32) -> ComponentTypeId {
ComponentTypeId(std::any::TypeId::of::<u32>(), id)
}
fn accessor(reads: Vec<ComponentTypeId>, writes: Vec<ComponentTypeId>) -> TestAccessor {
TestAccessor { reads, writes }
}
fn generate_test_jobs() -> Vec<TestJob> {
let job1 = TestJob {
id: 1,
accessor: accessor(vec![], vec![type_id(3)]),
run_after: None,
finish_before: None,
};
let job2 = TestJob {
id: 2,
accessor: accessor(vec![type_id(3)], vec![type_id(4)]),
run_after: Some(2),
finish_before: None,
};
let job3 = TestJob {
id: 3,
accessor: accessor(vec![type_id(3)], vec![type_id(5)]),
run_after: Some(2),
finish_before: None,
};
let job4 = TestJob {
id: 4,
accessor: accessor(vec![], vec![type_id(3)]),
run_after: Some(1),
finish_before: None,
};
let job5 = TestJob {
id: 5,
accessor: accessor(vec![], vec![type_id(3)]),
run_after: Some(0),
finish_before: Some(2),
};
let job6 = TestJob {
id: 6,
accessor: accessor(vec![], vec![type_id(3)]),
run_after: Some(1),
finish_before: Some(2),
};
vec![job6, job5, job4, job3, job2, job1]
}
#[test]
fn generate_graph() {
let jobs = generate_test_jobs();
let graph = generate_job_graph(&jobs);
dbg!(&graph);
let ordering: Vec<_> = petgraph::algo::toposort(&graph, None)
.unwrap()
.into_iter()
.map(|n| &graph[n])
.collect();
dbg!(ordering);
}
#[test]
fn dispatch_state() {
let jobs = generate_test_jobs();
let graph = generate_job_graph(&jobs);
let dispatch_state = build_dispatch_state(&graph);
for (idx, dep_list) in dispatch_state.job_deps.iter().enumerate() {
println!(
"deps for job {:#?}: {:#?}",
dispatch_state.sorted_jobs[idx],
dep_list
.into_iter()
.map(|d| dispatch_state.sorted_jobs[d as usize])
.collect::<Vec<_>>()
);
}
}
#[test]
fn dispatch_schedule() {
let jobs = generate_test_jobs();
let graph = generate_job_graph(&jobs);
let mut dispatch_state = build_dispatch_state(&graph);
let mut schedule_order = Vec::new();
let mut complete_queue = Vec::new();
loop {
match dispatch_state.next_job() {
ScheduleResult::Done => break,
ScheduleResult::WaitingForJob => {
if let Some(job) = complete_queue.pop() {
dispatch_state.complete_job(job);
} else {
assert!(false, "Waiting for job while scheduling");
}
}
ScheduleResult::Schedule(job, idx) => {
println!("schedule {:#?}", job);
schedule_order.push(job);
complete_queue.push(idx);
dispatch_state.complete_job(idx)
}
}
}
dbg!(schedule_order);
}
}
|
run_after
|
upload-file-object.py
|
from common.methods import set_progress
from resourcehandlers.aws.models import AWSHandler
def run(job, resource, **kwargs):
|
set_progress("Connecting to AWS s3 cloud")
aws = AWSHandler.objects.get(id=resource.aws_rh_id)
wrapper = aws.get_api_wrapper()
set_progress("This resource belongs to {}".format(aws))
file = "{{ file }}"
key_name = "{{ name }}"
s3 = wrapper.get_boto3_resource(
aws.serviceaccount,
aws.servicepasswd,
None,
service_name='s3'
)
try:
set_progress('uploading file from "{}"'.format(file))
s3.Bucket(resource.s3_bucket_name).upload_file(file, key_name)
except Exception as e:
return "FAILURE", str(e), ""
return "SUCCESS", "The file has been successfully uploaded to '{}' bucket".format(resource.s3_bucket_name), ""
|
|
scaling_mechanism_description_py3.py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ScalingMechanismDescription(Model):
"""Describes the mechanism for performing a scaling operation.
You probably want to use the sub-classes and not this class directly. Known
sub-classes are: PartitionInstanceCountScaleMechanism,
AddRemoveIncrementalNamedPartitionScalingMechanism
All required parameters must be populated in order to send to Azure.
:param kind: Required. Constant filled by server.
:type kind: str
"""
_validation = {
'kind': {'required': True},
}
_attribute_map = {
'kind': {'key': 'Kind', 'type': 'str'},
}
_subtype_map = {
'kind': {'PartitionInstanceCount': 'PartitionInstanceCountScaleMechanism', 'AddRemoveIncrementalNamedPartition': 'AddRemoveIncrementalNamedPartitionScalingMechanism'}
}
def __init__(self, **kwargs) -> None:
|
super(ScalingMechanismDescription, self).__init__(**kwargs)
self.kind = None
|
|
forms.py
|
from django import forms
from django.forms.widgets import PasswordInput
from modules.common.id_choicefield import IdentificationField
class
|
(forms.Form):
error_messages = {
'password_mismatch': (
'The confirmation was different from that you chose.'
),
}
party_name = forms.CharField(label="Name of the contesting party")
cname = forms.CharField(label="Candidate's Name")
age = forms.IntegerField(min_value=0, label="Candidate's Age")
citype = IdentificationField(label="Identity Proof of the Candidate")
cidno = forms.CharField(label="Passport / ID Number")
party_manifesto = forms.CharField(
widget=forms.Textarea,
required=False
)
party_symbol = forms.ImageField(
required=False,
help_text="The maximum size permitted is 2.5 MB"
)
cpd1 = forms.CharField(
widget=PasswordInput,
label="Enter your password",
strip=False,
)
cpd2 = forms.CharField(
widget=PasswordInput,
label="Confirm Password",
strip=False,
help_text=("Enter the same password as before, for verification")
)
show_profile = forms.ChoiceField(
choices=[
(True, "Show Profile to public"),
(False, "Hide profile from public")
],
help_text="The Election Commission can override this setting."
)
class PartyEditForm(forms.Form):
party_name = forms.CharField()
cpass = forms.CharField(
widget=PasswordInput,
label="Enter your password",
strip=False,
)
|
PartyForm
|
jobs.py
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import getpass
import logging
import multiprocessing
import os
import signal
import sys
import threading
import time
from collections import defaultdict
from time import sleep
import six
from past.builtins import basestring
from sqlalchemy import (Column, Index, Integer, String, and_, func, not_, or_)
from sqlalchemy.exc import OperationalError
from sqlalchemy.orm.session import make_transient
from airflow import configuration as conf
from airflow import executors, models, settings
from airflow.exceptions import AirflowException
from airflow.models import DAG, DagRun
from airflow.models.dagpickle import DagPickle
from airflow.settings import Stats
from airflow.task.task_runner import get_task_runner
from airflow.ti_deps.dep_context import DepContext, QUEUE_DEPS, RUN_DEPS
from airflow.utils import asciiart, helpers, timezone
from airflow.utils.configuration import tmp_configuration_copy
from airflow.utils.dag_processing import (AbstractDagFileProcessor,
DagFileProcessorAgent,
SimpleDag,
SimpleDagBag,
SimpleTaskInstance,
list_py_file_paths)
from airflow.utils.db import create_session, provide_session
from airflow.utils.email import get_email_address_list, send_email
from airflow.utils.log.logging_mixin import LoggingMixin, StreamLogWriter, set_context
from airflow.utils.net import get_hostname
from airflow.utils.sqlalchemy import UtcDateTime
from airflow.utils.state import State
Base = models.base.Base
ID_LEN = models.ID_LEN
class BaseJob(Base, LoggingMixin):
"""
Abstract class to be derived for jobs. Jobs are processing items with state
and duration that aren't task instances. For instance a BackfillJob is
a collection of task instance runs, but should have its own state, start
and end time.
"""
__tablename__ = "job"
id = Column(Integer, primary_key=True)
dag_id = Column(String(ID_LEN),)
state = Column(String(20))
job_type = Column(String(30))
start_date = Column(UtcDateTime())
end_date = Column(UtcDateTime())
latest_heartbeat = Column(UtcDateTime())
executor_class = Column(String(500))
hostname = Column(String(500))
unixname = Column(String(1000))
__mapper_args__ = {
'polymorphic_on': job_type,
'polymorphic_identity': 'BaseJob'
}
__table_args__ = (
Index('job_type_heart', job_type, latest_heartbeat),
Index('idx_job_state_heartbeat', state, latest_heartbeat),
)
def __init__(
self,
executor=executors.GetDefaultExecutor(),
heartrate=conf.getfloat('scheduler', 'JOB_HEARTBEAT_SEC'),
*args, **kwargs):
self.hostname = get_hostname()
self.executor = executor
self.executor_class = executor.__class__.__name__
self.start_date = timezone.utcnow()
self.latest_heartbeat = timezone.utcnow()
self.heartrate = heartrate
self.unixname = getpass.getuser()
self.max_tis_per_query = conf.getint('scheduler', 'max_tis_per_query')
super(BaseJob, self).__init__(*args, **kwargs)
def is_alive(self):
return (
(timezone.utcnow() - self.latest_heartbeat).seconds <
(conf.getint('scheduler', 'JOB_HEARTBEAT_SEC') * 2.1)
)
@provide_session
def kill(self, session=None):
job = session.query(BaseJob).filter(BaseJob.id == self.id).first()
job.end_date = timezone.utcnow()
try:
self.on_kill()
except Exception as e:
self.log.error('on_kill() method failed: {}'.format(e))
session.merge(job)
session.commit()
raise AirflowException("Job shut down externally.")
def on_kill(self):
"""
Will be called when an external kill command is received
"""
pass
def heartbeat_callback(self, session=None):
pass
def heartbeat(self):
"""
Heartbeats update the job's entry in the database with a timestamp
for the latest_heartbeat and allows for the job to be killed
externally. This allows at the system level to monitor what is
actually active.
For instance, an old heartbeat for SchedulerJob would mean something
is wrong.
This also allows for any job to be killed externally, regardless
of who is running it or on which machine it is running.
Note that if your heartbeat is set to 60 seconds and you call this
method after 10 seconds of processing since the last heartbeat, it
will sleep 50 seconds to complete the 60 seconds and keep a steady
heart rate. If you go over 60 seconds before calling it, it won't
sleep at all.
"""
try:
with create_session() as session:
job = session.query(BaseJob).filter_by(id=self.id).one()
make_transient(job)
session.commit()
if job.state == State.SHUTDOWN:
self.kill()
# Figure out how long to sleep for
sleep_for = 0
if job.latest_heartbeat:
sleep_for = max(
0,
self.heartrate - (timezone.utcnow() -
job.latest_heartbeat).total_seconds())
sleep(sleep_for)
# Update last heartbeat time
with create_session() as session:
job = session.query(BaseJob).filter(BaseJob.id == self.id).first()
job.latest_heartbeat = timezone.utcnow()
session.merge(job)
session.commit()
self.heartbeat_callback(session=session)
self.log.debug('[heartbeat]')
except OperationalError as e:
self.log.error("Scheduler heartbeat got an exception: %s", str(e))
def run(self):
Stats.incr(self.__class__.__name__.lower() + '_start', 1, 1)
# Adding an entry in the DB
with create_session() as session:
self.state = State.RUNNING
session.add(self)
session.commit()
id_ = self.id
make_transient(self)
self.id = id_
try:
self._execute()
# In case of max runs or max duration
self.state = State.SUCCESS
except SystemExit:
# In case of ^C or SIGTERM
self.state = State.SUCCESS
except Exception:
self.state = State.FAILED
raise
finally:
self.end_date = timezone.utcnow()
session.merge(self)
session.commit()
Stats.incr(self.__class__.__name__.lower() + '_end', 1, 1)
def _execute(self):
raise NotImplementedError("This method needs to be overridden")
@provide_session
def reset_state_for_orphaned_tasks(self, filter_by_dag_run=None, session=None):
"""
This function checks if there are any tasks in the dagrun (or all)
that have a scheduled state but are not known by the
executor. If it finds those it will reset the state to None
so they will get picked up again.
The batch option is for performance reasons as the queries are made in
sequence.
:param filter_by_dag_run: the dag_run we want to process, None if all
:type filter_by_dag_run: models.DagRun
:return: the TIs reset (in expired SQLAlchemy state)
:rtype: List(TaskInstance)
"""
queued_tis = self.executor.queued_tasks
# also consider running as the state might not have changed in the db yet
running_tis = self.executor.running
resettable_states = [State.SCHEDULED, State.QUEUED]
TI = models.TaskInstance
DR = models.DagRun
if filter_by_dag_run is None:
resettable_tis = (
session
.query(TI)
.join(
DR,
and_(
TI.dag_id == DR.dag_id,
TI.execution_date == DR.execution_date))
.filter(
DR.state == State.RUNNING,
DR.run_id.notlike(BackfillJob.ID_PREFIX + '%'),
TI.state.in_(resettable_states))).all()
else:
resettable_tis = filter_by_dag_run.get_task_instances(state=resettable_states,
session=session)
tis_to_reset = []
# Can't use an update here since it doesn't support joins
for ti in resettable_tis:
if ti.key not in queued_tis and ti.key not in running_tis:
tis_to_reset.append(ti)
if len(tis_to_reset) == 0:
return []
def query(result, items):
filter_for_tis = ([and_(TI.dag_id == ti.dag_id,
TI.task_id == ti.task_id,
TI.execution_date == ti.execution_date)
for ti in items])
reset_tis = (
session
.query(TI)
.filter(or_(*filter_for_tis), TI.state.in_(resettable_states))
.with_for_update()
.all())
for ti in reset_tis:
ti.state = State.NONE
session.merge(ti)
return result + reset_tis
reset_tis = helpers.reduce_in_chunks(query,
tis_to_reset,
[],
self.max_tis_per_query)
task_instance_str = '\n\t'.join(
["{}".format(x) for x in reset_tis])
session.commit()
self.log.info(
"Reset the following %s TaskInstances:\n\t%s",
len(reset_tis), task_instance_str
)
return reset_tis
class DagFileProcessor(AbstractDagFileProcessor, LoggingMixin):
"""Helps call SchedulerJob.process_file() in a separate process."""
# Counter that increments everytime an instance of this class is created
class_creation_counter = 0
def __init__(self, file_path, pickle_dags, dag_id_white_list, zombies):
"""
:param file_path: a Python file containing Airflow DAG definitions
:type file_path: unicode
:param pickle_dags: whether to serialize the DAG objects to the DB
:type pickle_dags: bool
:param dag_id_whitelist: If specified, only look at these DAG ID's
:type dag_id_whitelist: list[unicode]
:param zombies: zombie task instances to kill
:type zombies: list[SimpleTaskInstance]
"""
self._file_path = file_path
# Queue that's used to pass results from the child process.
self._result_queue = multiprocessing.Queue()
# The process that was launched to process the given .
self._process = None
self._dag_id_white_list = dag_id_white_list
self._pickle_dags = pickle_dags
self._zombies = zombies
# The result of Scheduler.process_file(file_path).
self._result = None
# Whether the process is done running.
self._done = False
# When the process started.
self._start_time = None
# This ID is use to uniquely name the process / thread that's launched
# by this processor instance
self._instance_id = DagFileProcessor.class_creation_counter
DagFileProcessor.class_creation_counter += 1
@property
def file_path(self):
return self._file_path
@staticmethod
def _launch_process(result_queue,
file_path,
pickle_dags,
dag_id_white_list,
thread_name,
zombies):
"""
Launch a process to process the given file.
:param result_queue: the queue to use for passing back the result
:type result_queue: multiprocessing.Queue
:param file_path: the file to process
:type file_path: unicode
:param pickle_dags: whether to pickle the DAGs found in the file and
save them to the DB
:type pickle_dags: bool
:param dag_id_white_list: if specified, only examine DAG ID's that are
in this list
:type dag_id_white_list: list[unicode]
:param thread_name: the name to use for the process that is launched
:type thread_name: unicode
:return: the process that was launched
:rtype: multiprocessing.Process
:param zombies: zombie task instances to kill
:type zombies: list[SimpleTaskInstance]
"""
def helper():
# This helper runs in the newly created process
log = logging.getLogger("airflow.processor")
stdout = StreamLogWriter(log, logging.INFO)
stderr = StreamLogWriter(log, logging.WARN)
set_context(log, file_path)
try:
# redirect stdout/stderr to log
sys.stdout = stdout
sys.stderr = stderr
# Re-configure the ORM engine as there are issues with multiple processes
settings.configure_orm()
# Change the thread name to differentiate log lines. This is
# really a separate process, but changing the name of the
# process doesn't work, so changing the thread name instead.
threading.current_thread().name = thread_name
start_time = time.time()
log.info("Started process (PID=%s) to work on %s",
os.getpid(), file_path)
scheduler_job = SchedulerJob(dag_ids=dag_id_white_list, log=log)
result = scheduler_job.process_file(file_path,
zombies,
pickle_dags)
result_queue.put(result)
end_time = time.time()
log.info(
"Processing %s took %.3f seconds", file_path, end_time - start_time
)
except Exception:
# Log exceptions through the logging framework.
log.exception("Got an exception! Propagating...")
raise
finally:
sys.stdout = sys.__stdout__
sys.stderr = sys.__stderr__
# We re-initialized the ORM within this Process above so we need to
# tear it down manually here
settings.dispose_orm()
p = multiprocessing.Process(target=helper,
args=(),
name="{}-Process".format(thread_name))
p.start()
return p
def start(self):
"""
Launch the process and start processing the DAG.
"""
self._process = DagFileProcessor._launch_process(
self._result_queue,
self.file_path,
self._pickle_dags,
self._dag_id_white_list,
"DagFileProcessor{}".format(self._instance_id),
self._zombies)
self._start_time = timezone.utcnow()
def terminate(self, sigkill=False):
"""
Terminate (and then kill) the process launched to process the file.
:param sigkill: whether to issue a SIGKILL if SIGTERM doesn't work.
:type sigkill: bool
"""
if self._process is None:
raise AirflowException("Tried to call stop before starting!")
# The queue will likely get corrupted, so remove the reference
self._result_queue = None
self._process.terminate()
# Arbitrarily wait 5s for the process to die
self._process.join(5)
if sigkill and self._process.is_alive():
self.log.warning("Killing PID %s", self._process.pid)
os.kill(self._process.pid, signal.SIGKILL)
@property
def pid(self):
"""
:return: the PID of the process launched to process the given file
:rtype: int
"""
if self._process is None:
raise AirflowException("Tried to get PID before starting!")
return self._process.pid
@property
def exit_code(self):
"""
After the process is finished, this can be called to get the return code
:return: the exit code of the process
:rtype: int
"""
if not self._done:
raise AirflowException("Tried to call retcode before process was finished!")
return self._process.exitcode
@property
def done(self):
"""
Check if the process launched to process this file is done.
:return: whether the process is finished running
:rtype: bool
"""
if self._process is None:
raise AirflowException("Tried to see if it's done before starting!")
if self._done:
return True
# In case result queue is corrupted.
if self._result_queue and not self._result_queue.empty():
self._result = self._result_queue.get_nowait()
self._done = True
self.log.debug("Waiting for %s", self._process)
self._process.join()
return True
# Potential error case when process dies
if self._result_queue and not self._process.is_alive():
self._done = True
# Get the object from the queue or else join() can hang.
if not self._result_queue.empty():
self._result = self._result_queue.get_nowait()
self.log.debug("Waiting for %s", self._process)
self._process.join()
return True
return False
@property
def result(self):
"""
:return: result of running SchedulerJob.process_file()
:rtype: SimpleDag
"""
if not self.done:
raise AirflowException("Tried to get the result before it's done!")
return self._result
@property
def start_time(self):
"""
:return: when this started to process the file
:rtype: datetime
"""
if self._start_time is None:
raise AirflowException("Tried to get start time before it started!")
return self._start_time
class SchedulerJob(BaseJob):
"""
This SchedulerJob runs for a specific time interval and schedules the jobs
that are ready to run. It figures out the latest runs for each
task and sees if the dependencies for the next schedules are met.
If so, it creates appropriate TaskInstances and sends run commands to the
executor. It does this for each task in each DAG and repeats.
"""
__mapper_args__ = {
'polymorphic_identity': 'SchedulerJob'
}
def __init__(
self,
dag_id=None,
dag_ids=None,
subdir=settings.DAGS_FOLDER,
num_runs=-1,
processor_poll_interval=1.0,
run_duration=None,
do_pickle=False,
log=None,
*args, **kwargs):
"""
:param dag_id: if specified, only schedule tasks with this DAG ID
:type dag_id: unicode
:param dag_ids: if specified, only schedule tasks with these DAG IDs
:type dag_ids: list[unicode]
:param subdir: directory containing Python files with Airflow DAG
definitions, or a specific path to a file
:type subdir: unicode
:param num_runs: The number of times to try to schedule each DAG file.
-1 for unlimited within the run_duration.
:type num_runs: int
:param processor_poll_interval: The number of seconds to wait between
polls of running processors
:type processor_poll_interval: int
:param run_duration: how long to run (in seconds) before exiting
:type run_duration: int
:param do_pickle: once a DAG object is obtained by executing the Python
file, whether to serialize the DAG object to the DB
:type do_pickle: bool
"""
# for BaseJob compatibility
self.dag_id = dag_id
self.dag_ids = [dag_id] if dag_id else []
if dag_ids:
self.dag_ids.extend(dag_ids)
self.subdir = subdir
self.num_runs = num_runs
self.run_duration = run_duration
self._processor_poll_interval = processor_poll_interval
self.do_pickle = do_pickle
super(SchedulerJob, self).__init__(*args, **kwargs)
self.heartrate = conf.getint('scheduler', 'SCHEDULER_HEARTBEAT_SEC')
self.max_threads = conf.getint('scheduler', 'max_threads')
if log:
self._log = log
self.using_sqlite = False
if 'sqlite' in conf.get('core', 'sql_alchemy_conn'):
self.using_sqlite = True
self.max_tis_per_query = conf.getint('scheduler', 'max_tis_per_query')
if run_duration is None:
self.run_duration = conf.getint('scheduler',
'run_duration')
self.processor_agent = None
self._last_loop = False
signal.signal(signal.SIGINT, self._exit_gracefully)
signal.signal(signal.SIGTERM, self._exit_gracefully)
def _exit_gracefully(self, signum, frame):
"""
Helper method to clean up processor_agent to avoid leaving orphan processes.
"""
self.log.info("Exiting gracefully upon receiving signal {}".format(signum))
if self.processor_agent:
self.processor_agent.end()
sys.exit(os.EX_OK)
@provide_session
def manage_slas(self, dag, session=None):
"""
Finding all tasks that have SLAs defined, and sending alert emails
where needed. New SLA misses are also recorded in the database.
Where assuming that the scheduler runs often, so we only check for
tasks that should have succeeded in the past hour.
"""
if not any([ti.sla for ti in dag.tasks]):
self.log.info(
"Skipping SLA check for %s because no tasks in DAG have SLAs",
dag
)
return
TI = models.TaskInstance
sq = (
session
.query(
TI.task_id,
func.max(TI.execution_date).label('max_ti'))
.with_hint(TI, 'USE INDEX (PRIMARY)', dialect_name='mysql')
.filter(TI.dag_id == dag.dag_id)
.filter(or_(
TI.state == State.SUCCESS,
TI.state == State.SKIPPED))
.filter(TI.task_id.in_(dag.task_ids))
.group_by(TI.task_id).subquery('sq')
)
max_tis = session.query(TI).filter(
TI.dag_id == dag.dag_id,
TI.task_id == sq.c.task_id,
TI.execution_date == sq.c.max_ti,
).all()
ts = timezone.utcnow()
SlaMiss = models.SlaMiss
for ti in max_tis:
task = dag.get_task(ti.task_id)
dttm = ti.execution_date
if task.sla:
dttm = dag.following_schedule(dttm)
while dttm < timezone.utcnow():
following_schedule = dag.following_schedule(dttm)
if following_schedule + task.sla < timezone.utcnow():
session.merge(models.SlaMiss(
task_id=ti.task_id,
dag_id=ti.dag_id,
execution_date=dttm,
timestamp=ts))
dttm = dag.following_schedule(dttm)
session.commit()
slas = (
session
.query(SlaMiss)
.filter(SlaMiss.notification_sent == False) # noqa: E712
.filter(SlaMiss.dag_id == dag.dag_id)
.all()
)
if slas:
sla_dates = [sla.execution_date for sla in slas]
qry = (
session
.query(TI)
.filter(TI.state != State.SUCCESS)
.filter(TI.execution_date.in_(sla_dates))
.filter(TI.dag_id == dag.dag_id)
.all()
)
blocking_tis = []
for ti in qry:
if ti.task_id in dag.task_ids:
ti.task = dag.get_task(ti.task_id)
blocking_tis.append(ti)
else:
session.delete(ti)
session.commit()
task_list = "\n".join([
sla.task_id + ' on ' + sla.execution_date.isoformat()
for sla in slas])
blocking_task_list = "\n".join([
ti.task_id + ' on ' + ti.execution_date.isoformat()
for ti in blocking_tis])
# Track whether email or any alert notification sent
# We consider email or the alert callback as notifications
email_sent = False
notification_sent = False
if dag.sla_miss_callback:
# Execute the alert callback
self.log.info(' --------------> ABOUT TO CALL SLA MISS CALL BACK ')
try:
dag.sla_miss_callback(dag, task_list, blocking_task_list, slas,
blocking_tis)
notification_sent = True
except Exception:
self.log.exception("Could not call sla_miss_callback for DAG %s",
dag.dag_id)
email_content = """\
Here's a list of tasks that missed their SLAs:
<pre><code>{task_list}\n<code></pre>
Blocking tasks:
<pre><code>{blocking_task_list}\n{bug}<code></pre>
""".format(bug=asciiart.bug, **locals())
emails = set()
for task in dag.tasks:
if task.email:
if isinstance(task.email, basestring):
emails |= set(get_email_address_list(task.email))
elif isinstance(task.email, (list, tuple)):
emails |= set(task.email)
if emails and len(slas):
try:
send_email(
emails,
"[airflow] SLA miss on DAG=" + dag.dag_id,
email_content)
email_sent = True
notification_sent = True
except Exception:
self.log.exception("Could not send SLA Miss email notification for"
" DAG %s", dag.dag_id)
# If we sent any notification, update the sla_miss table
if notification_sent:
for sla in slas:
if email_sent:
sla.email_sent = True
sla.notification_sent = True
session.merge(sla)
session.commit()
@staticmethod
def update_import_errors(session, dagbag):
"""
For the DAGs in the given DagBag, record any associated import errors and clears
errors for files that no longer have them. These are usually displayed through the
Airflow UI so that users know that there are issues parsing DAGs.
:param session: session for ORM operations
:type session: sqlalchemy.orm.session.Session
:param dagbag: DagBag containing DAGs with import errors
:type dagbag: models.Dagbag
"""
# Clear the errors of the processed files
for dagbag_file in dagbag.file_last_changed:
session.query(models.ImportError).filter(
models.ImportError.filename == dagbag_file
).delete()
# Add the errors of the processed files
for filename, stacktrace in six.iteritems(dagbag.import_errors):
session.add(models.ImportError(
filename=filename,
stacktrace=stacktrace))
session.commit()
@provide_session
def create_dag_run(self, dag, session=None):
"""
This method checks whether a new DagRun needs to be created
for a DAG based on scheduling interval.
Returns DagRun if one is scheduled. Otherwise returns None.
"""
if dag.schedule_interval and conf.getboolean('scheduler', 'USE_JOB_SCHEDULE'):
active_runs = DagRun.find(
dag_id=dag.dag_id,
state=State.RUNNING,
external_trigger=False,
session=session
)
# return if already reached maximum active runs and no timeout setting
if len(active_runs) >= dag.max_active_runs and not dag.dagrun_timeout:
return
timedout_runs = 0
for dr in active_runs:
if (
dr.start_date and dag.dagrun_timeout and
dr.start_date < timezone.utcnow() - dag.dagrun_timeout):
dr.state = State.FAILED
dr.end_date = timezone.utcnow()
dag.handle_callback(dr, success=False, reason='dagrun_timeout',
session=session)
timedout_runs += 1
session.commit()
if len(active_runs) - timedout_runs >= dag.max_active_runs:
return
# this query should be replaced by find dagrun
qry = (
session.query(func.max(DagRun.execution_date))
.filter_by(dag_id=dag.dag_id)
.filter(or_(
DagRun.external_trigger == False, # noqa: E712
# add % as a wildcard for the like query
DagRun.run_id.like(DagRun.ID_PREFIX + '%')
))
)
last_scheduled_run = qry.scalar()
# don't schedule @once again
if dag.schedule_interval == '@once' and last_scheduled_run:
return None
# don't do scheduler catchup for dag's that don't have dag.catchup = True
if not (dag.catchup or dag.schedule_interval == '@once'):
# The logic is that we move start_date up until
# one period before, so that timezone.utcnow() is AFTER
# the period end, and the job can be created...
now = timezone.utcnow()
next_start = dag.following_schedule(now)
last_start = dag.previous_schedule(now)
if next_start <= now:
new_start = last_start
else:
new_start = dag.previous_schedule(last_start)
if dag.start_date:
if new_start >= dag.start_date:
dag.start_date = new_start
else:
dag.start_date = new_start
next_run_date = None
if not last_scheduled_run:
# First run
task_start_dates = [t.start_date for t in dag.tasks]
if task_start_dates:
next_run_date = dag.normalize_schedule(min(task_start_dates))
self.log.debug(
"Next run date based on tasks %s",
next_run_date
)
else:
next_run_date = dag.following_schedule(last_scheduled_run)
# make sure backfills are also considered
last_run = dag.get_last_dagrun(session=session)
if last_run and next_run_date:
while next_run_date <= last_run.execution_date:
next_run_date = dag.following_schedule(next_run_date)
# don't ever schedule prior to the dag's start_date
if dag.start_date:
next_run_date = (dag.start_date if not next_run_date
else max(next_run_date, dag.start_date))
if next_run_date == dag.start_date:
next_run_date = dag.normalize_schedule(dag.start_date)
self.log.debug(
"Dag start date: %s. Next run date: %s",
dag.start_date, next_run_date
)
# don't ever schedule in the future
if next_run_date > timezone.utcnow():
return
# this structure is necessary to avoid a TypeError from concatenating
# NoneType
if dag.schedule_interval == '@once':
period_end = next_run_date
elif next_run_date:
period_end = dag.following_schedule(next_run_date)
# Don't schedule a dag beyond its end_date (as specified by the dag param)
if next_run_date and dag.end_date and next_run_date > dag.end_date:
return
# Don't schedule a dag beyond its end_date (as specified by the task params)
# Get the min task end date, which may come from the dag.default_args
min_task_end_date = []
task_end_dates = [t.end_date for t in dag.tasks if t.end_date]
if task_end_dates:
min_task_end_date = min(task_end_dates)
if next_run_date and min_task_end_date and next_run_date > min_task_end_date:
return
if next_run_date and period_end and period_end <= timezone.utcnow():
next_run = dag.create_dagrun(
run_id=DagRun.ID_PREFIX + next_run_date.isoformat(),
execution_date=next_run_date,
start_date=timezone.utcnow(),
state=State.RUNNING,
external_trigger=False
)
return next_run
@provide_session
def _process_task_instances(self, dag, queue, session=None):
"""
This method schedules the tasks for a single DAG by looking at the
active DAG runs and adding task instances that should run to the
queue.
"""
# update the state of the previously active dag runs
dag_runs = DagRun.find(dag_id=dag.dag_id, state=State.RUNNING, session=session)
active_dag_runs = []
for run in dag_runs:
self.log.info("Examining DAG run %s", run)
# don't consider runs that are executed in the future
if run.execution_date > timezone.utcnow():
self.log.error(
"Execution date is in future: %s",
run.execution_date
)
continue
if len(active_dag_runs) >= dag.max_active_runs:
self.log.info("Active dag runs > max_active_run.")
continue
# skip backfill dagruns for now as long as they are not really scheduled
if run.is_backfill:
continue
# todo: run.dag is transient but needs to be set
run.dag = dag
# todo: preferably the integrity check happens at dag collection time
run.verify_integrity(session=session)
run.update_state(session=session)
if run.state == State.RUNNING:
make_transient(run)
active_dag_runs.append(run)
for run in active_dag_runs:
self.log.debug("Examining active DAG run: %s", run)
# this needs a fresh session sometimes tis get detached
tis = run.get_task_instances(state=(State.NONE,
State.UP_FOR_RETRY))
# this loop is quite slow as it uses are_dependencies_met for
# every task (in ti.is_runnable). This is also called in
# update_state above which has already checked these tasks
for ti in tis:
task = dag.get_task(ti.task_id)
# fixme: ti.task is transient but needs to be set
ti.task = task
# future: remove adhoc
if task.adhoc:
continue
if ti.are_dependencies_met(
dep_context=DepContext(flag_upstream_failed=True),
session=session):
self.log.debug('Queuing task: %s', ti)
queue.append(ti.key)
@provide_session
def _change_state_for_tis_without_dagrun(self,
simple_dag_bag,
old_states,
new_state,
session=None):
"""
For all DAG IDs in the SimpleDagBag, look for task instances in the
old_states and set them to new_state if the corresponding DagRun
does not exist or exists but is not in the running state. This
normally should not happen, but it can if the state of DagRuns are
changed manually.
:param old_states: examine TaskInstances in this state
:type old_state: list[State]
:param new_state: set TaskInstances to this state
:type new_state: State
:param simple_dag_bag: TaskInstances associated with DAGs in the
simple_dag_bag and with states in the old_state will be examined
:type simple_dag_bag: SimpleDagBag
"""
tis_changed = 0
query = session \
.query(models.TaskInstance) \
.outerjoin(models.DagRun, and_(
models.TaskInstance.dag_id == models.DagRun.dag_id,
models.TaskInstance.execution_date == models.DagRun.execution_date)) \
.filter(models.TaskInstance.dag_id.in_(simple_dag_bag.dag_ids)) \
.filter(models.TaskInstance.state.in_(old_states)) \
.filter(or_(
models.DagRun.state != State.RUNNING,
models.DagRun.state.is_(None)))
if self.using_sqlite:
tis_to_change = query \
.with_for_update() \
.all()
for ti in tis_to_change:
ti.set_state(new_state, session=session)
tis_changed += 1
else:
subq = query.subquery()
tis_changed = session \
.query(models.TaskInstance) \
.filter(and_(
models.TaskInstance.dag_id == subq.c.dag_id,
models.TaskInstance.task_id == subq.c.task_id,
models.TaskInstance.execution_date ==
subq.c.execution_date)) \
.update({models.TaskInstance.state: new_state},
synchronize_session=False)
session.commit()
if tis_changed > 0:
self.log.warning(
"Set %s task instances to state=%s as their associated DagRun was not in RUNNING state",
tis_changed, new_state
)
@provide_session
def __get_task_concurrency_map(self, states, session=None):
"""
Returns a map from tasks to number in the states list given.
:param states: List of states to query for
:type states: List[State]
:return: A map from (dag_id, task_id) to count of tasks in states
:rtype: Dict[[String, String], Int]
"""
TI = models.TaskInstance
ti_concurrency_query = (
session
.query(TI.task_id, TI.dag_id, func.count('*'))
.filter(TI.state.in_(states))
.group_by(TI.task_id, TI.dag_id)
).all()
task_map = defaultdict(int)
for result in ti_concurrency_query:
task_id, dag_id, count = result
task_map[(dag_id, task_id)] = count
return task_map
@provide_session
def _find_executable_task_instances(self, simple_dag_bag, states, session=None):
"""
Finds TIs that are ready for execution with respect to pool limits,
dag concurrency, executor state, and priority.
:param simple_dag_bag: TaskInstances associated with DAGs in the
simple_dag_bag will be fetched from the DB and executed
:type simple_dag_bag: SimpleDagBag
:param executor: the executor that runs task instances
:type executor: BaseExecutor
:param states: Execute TaskInstances in these states
:type states: Tuple[State]
:return: List[TaskInstance]
"""
executable_tis = []
# Get all the queued task instances from associated with scheduled
# DagRuns which are not backfilled, in the given states,
# and the dag is not paused
TI = models.TaskInstance
DR = models.DagRun
DM = models.DagModel
ti_query = (
session
.query(TI)
.filter(TI.dag_id.in_(simple_dag_bag.dag_ids))
.outerjoin(
DR,
and_(DR.dag_id == TI.dag_id, DR.execution_date == TI.execution_date)
)
.filter(or_(DR.run_id == None, # noqa: E711
not_(DR.run_id.like(BackfillJob.ID_PREFIX + '%'))))
.outerjoin(DM, DM.dag_id == TI.dag_id)
.filter(or_(DM.dag_id == None, # noqa: E711
not_(DM.is_paused)))
)
if None in states:
ti_query = ti_query.filter(
or_(TI.state == None, TI.state.in_(states)) # noqa: E711
)
else:
ti_query = ti_query.filter(TI.state.in_(states))
task_instances_to_examine = ti_query.all()
if len(task_instances_to_examine) == 0:
self.log.debug("No tasks to consider for execution.")
return executable_tis
# Put one task instance on each line
task_instance_str = "\n\t".join(
["{}".format(x) for x in task_instances_to_examine])
self.log.info("{} tasks up for execution:\n\t{}"
.format(len(task_instances_to_examine),
task_instance_str))
# Get the pool settings
pools = {p.pool: p for p in session.query(models.Pool).all()}
pool_to_task_instances = defaultdict(list)
for task_instance in task_instances_to_examine:
pool_to_task_instances[task_instance.pool].append(task_instance)
states_to_count_as_running = [State.RUNNING, State.QUEUED]
task_concurrency_map = self.__get_task_concurrency_map(
states=states_to_count_as_running, session=session)
# Go through each pool, and queue up a task for execution if there are
# any open slots in the pool.
for pool, task_instances in pool_to_task_instances.items():
if not pool:
# Arbitrary:
# If queued outside of a pool, trigger no more than
# non_pooled_task_slot_count per run
open_slots = conf.getint('core', 'non_pooled_task_slot_count')
else:
if pool not in pools:
self.log.warning(
"Tasks using non-existent pool '%s' will not be scheduled",
pool
)
open_slots = 0
else:
open_slots = pools[pool].open_slots(session=session)
num_queued = len(task_instances)
self.log.info(
"Figuring out tasks to run in Pool(name={pool}) with {open_slots} "
"open slots and {num_queued} task instances in queue".format(
**locals()
)
)
priority_sorted_task_instances = sorted(
task_instances, key=lambda ti: (-ti.priority_weight, ti.execution_date))
# DAG IDs with running tasks that equal the concurrency limit of the dag
dag_id_to_possibly_running_task_count = {}
for task_instance in priority_sorted_task_instances:
if open_slots <= 0:
self.log.info(
"Not scheduling since there are %s open slots in pool %s",
open_slots, pool
)
# Can't schedule any more since there are no more open slots.
break
# Check to make sure that the task concurrency of the DAG hasn't been
# reached.
dag_id = task_instance.dag_id
simple_dag = simple_dag_bag.get_dag(dag_id)
if dag_id not in dag_id_to_possibly_running_task_count:
dag_id_to_possibly_running_task_count[dag_id] = \
DAG.get_num_task_instances(
dag_id,
simple_dag_bag.get_dag(dag_id).task_ids,
states=states_to_count_as_running,
session=session)
current_task_concurrency = dag_id_to_possibly_running_task_count[dag_id]
task_concurrency_limit = simple_dag_bag.get_dag(dag_id).concurrency
self.log.info(
"DAG %s has %s/%s running and queued tasks",
dag_id, current_task_concurrency, task_concurrency_limit
)
if current_task_concurrency >= task_concurrency_limit:
self.log.info(
"Not executing %s since the number of tasks running or queued "
"from DAG %s is >= to the DAG's task concurrency limit of %s",
task_instance, dag_id, task_concurrency_limit
)
continue
task_concurrency = simple_dag.get_task_special_arg(
task_instance.task_id,
'task_concurrency')
if task_concurrency is not None:
num_running = task_concurrency_map[
(task_instance.dag_id, task_instance.task_id)
]
if num_running >= task_concurrency:
self.log.info("Not executing %s since the task concurrency for"
" this task has been reached.", task_instance)
continue
else:
task_concurrency_map[(task_instance.dag_id, task_instance.task_id)] += 1
if self.executor.has_task(task_instance):
self.log.debug(
"Not handling task %s as the executor reports it is running",
task_instance.key
)
continue
executable_tis.append(task_instance)
open_slots -= 1
dag_id_to_possibly_running_task_count[dag_id] += 1
task_instance_str = "\n\t".join(
["{}".format(x) for x in executable_tis])
self.log.info(
"Setting the following tasks to queued state:\n\t%s", task_instance_str)
# so these dont expire on commit
for ti in executable_tis:
copy_dag_id = ti.dag_id
copy_execution_date = ti.execution_date
copy_task_id = ti.task_id
make_transient(ti)
ti.dag_id = copy_dag_id
ti.execution_date = copy_execution_date
ti.task_id = copy_task_id
return executable_tis
@provide_session
def _change_state_for_executable_task_instances(self, task_instances,
acceptable_states, session=None):
"""
Changes the state of task instances in the list with one of the given states
to QUEUED atomically, and returns the TIs changed in SimpleTaskInstance format.
:param task_instances: TaskInstances to change the state of
:type task_instances: List[TaskInstance]
:param acceptable_states: Filters the TaskInstances updated to be in these states
:type acceptable_states: Iterable[State]
:return: List[SimpleTaskInstance]
"""
if len(task_instances) == 0:
session.commit()
return []
TI = models.TaskInstance
filter_for_ti_state_change = (
[and_(
TI.dag_id == ti.dag_id,
TI.task_id == ti.task_id,
TI.execution_date == ti.execution_date)
for ti in task_instances])
ti_query = (
session
.query(TI)
.filter(or_(*filter_for_ti_state_change)))
if None in acceptable_states:
ti_query = ti_query.filter(
or_(TI.state == None, TI.state.in_(acceptable_states)) # noqa: E711
)
else:
ti_query = ti_query.filter(TI.state.in_(acceptable_states))
tis_to_set_to_queued = (
ti_query
.with_for_update()
.all())
if len(tis_to_set_to_queued) == 0:
self.log.info("No tasks were able to have their state changed to queued.")
session.commit()
return []
# set TIs to queued state
for task_instance in tis_to_set_to_queued:
task_instance.state = State.QUEUED
task_instance.queued_dttm = (timezone.utcnow()
if not task_instance.queued_dttm
else task_instance.queued_dttm)
session.merge(task_instance)
# Generate a list of SimpleTaskInstance for the use of queuing
# them in the executor.
simple_task_instances = [SimpleTaskInstance(ti) for ti in
tis_to_set_to_queued]
task_instance_str = "\n\t".join(
["{}".format(x) for x in tis_to_set_to_queued])
session.commit()
self.log.info("Setting the following {} tasks to queued state:\n\t{}"
.format(len(tis_to_set_to_queued), task_instance_str))
return simple_task_instances
def _enqueue_task_instances_with_queued_state(self, simple_dag_bag,
simple_task_instances):
"""
Takes task_instances, which should have been set to queued, and enqueues them
with the executor.
:param simple_task_instances: TaskInstances to enqueue
:type simple_task_instances: List[SimpleTaskInstance]
:param simple_dag_bag: Should contains all of the task_instances' dags
:type simple_dag_bag: SimpleDagBag
"""
TI = models.TaskInstance
# actually enqueue them
for simple_task_instance in simple_task_instances:
simple_dag = simple_dag_bag.get_dag(simple_task_instance.dag_id)
command = TI.generate_command(
simple_task_instance.dag_id,
simple_task_instance.task_id,
simple_task_instance.execution_date,
local=True,
mark_success=False,
ignore_all_deps=False,
ignore_depends_on_past=False,
ignore_task_deps=False,
ignore_ti_state=False,
pool=simple_task_instance.pool,
file_path=simple_dag.full_filepath,
pickle_id=simple_dag.pickle_id)
priority = simple_task_instance.priority_weight
queue = simple_task_instance.queue
self.log.info(
"Sending %s to executor with priority %s and queue %s",
simple_task_instance.key, priority, queue
)
self.executor.queue_command(
simple_task_instance,
command,
priority=priority,
queue=queue)
@provide_session
def _execute_task_instances(self,
simple_dag_bag,
states,
session=None):
"""
Attempts to execute TaskInstances that should be executed by the scheduler.
There are three steps:
1. Pick TIs by priority with the constraint that they are in the expected states
and that we do exceed max_active_runs or pool limits.
2. Change the state for the TIs above atomically.
3. Enqueue the TIs in the executor.
:param simple_dag_bag: TaskInstances associated with DAGs in the
simple_dag_bag will be fetched from the DB and executed
:type simple_dag_bag: SimpleDagBag
:param states: Execute TaskInstances in these states
:type states: Tuple[State]
:return: Number of task instance with state changed.
"""
executable_tis = self._find_executable_task_instances(simple_dag_bag, states,
session=session)
def query(result, items):
simple_tis_with_state_changed = \
self._change_state_for_executable_task_instances(items,
states,
session=session)
self._enqueue_task_instances_with_queued_state(
simple_dag_bag,
simple_tis_with_state_changed)
session.commit()
return result + len(simple_tis_with_state_changed)
return helpers.reduce_in_chunks(query, executable_tis, 0, self.max_tis_per_query)
@provide_session
def _change_state_for_tasks_failed_to_execute(self, session):
"""
If there are tasks left over in the executor,
we set them back to SCHEDULED to avoid creating hanging tasks.
:param session: session for ORM operations
"""
if self.executor.queued_tasks:
TI = models.TaskInstance
filter_for_ti_state_change = (
[and_(
TI.dag_id == dag_id,
TI.task_id == task_id,
TI.execution_date == execution_date,
# The TI.try_number will return raw try_number+1 since the
# ti is not running. And we need to -1 to match the DB record.
TI._try_number == try_number - 1,
TI.state == State.QUEUED)
for dag_id, task_id, execution_date, try_number
in self.executor.queued_tasks.keys()])
ti_query = (session.query(TI)
.filter(or_(*filter_for_ti_state_change)))
tis_to_set_to_scheduled = (ti_query
.with_for_update()
.all())
if len(tis_to_set_to_scheduled) == 0:
session.commit()
return
# set TIs to queued state
for task_instance in tis_to_set_to_scheduled:
task_instance.state = State.SCHEDULED
task_instance_str = "\n\t".join(
["{}".format(x) for x in tis_to_set_to_scheduled])
session.commit()
self.log.info("Set the following tasks to scheduled state:\n\t{}"
.format(task_instance_str))
def _process_dags(self, dagbag, dags, tis_out):
"""
Iterates over the dags and processes them. Processing includes:
1. Create appropriate DagRun(s) in the DB.
2. Create appropriate TaskInstance(s) in the DB.
3. Send emails for tasks that have missed SLAs.
:param dagbag: a collection of DAGs to process
:type dagbag: models.DagBag
:param dags: the DAGs from the DagBag to process
:type dags: DAG
:param tis_out: A queue to add generated TaskInstance objects
:type tis_out: multiprocessing.Queue[TaskInstance]
:return: None
"""
for dag in dags:
dag = dagbag.get_dag(dag.dag_id)
if dag.is_paused:
self.log.info("Not processing DAG %s since it's paused", dag.dag_id)
continue
if not dag:
self.log.error("DAG ID %s was not found in the DagBag", dag.dag_id)
continue
self.log.info("Processing %s", dag.dag_id)
dag_run = self.create_dag_run(dag)
if dag_run:
self.log.info("Created %s", dag_run)
self._process_task_instances(dag, tis_out)
self.manage_slas(dag)
@provide_session
def _process_executor_events(self, simple_dag_bag, session=None):
"""
Respond to executor events.
"""
# TODO: this shares quite a lot of code with _manage_executor_state
TI = models.TaskInstance
for key, state in list(self.executor.get_event_buffer(simple_dag_bag.dag_ids)
.items()):
dag_id, task_id, execution_date, try_number = key
self.log.info(
"Executor reports %s.%s execution_date=%s as %s for try_number %s",
dag_id, task_id, execution_date, state, try_number
)
if state == State.FAILED or state == State.SUCCESS:
qry = session.query(TI).filter(TI.dag_id == dag_id,
TI.task_id == task_id,
TI.execution_date == execution_date)
ti = qry.first()
if not ti:
self.log.warning("TaskInstance %s went missing from the database", ti)
continue
# TODO: should we fail RUNNING as well, as we do in Backfills?
if ti.try_number == try_number and ti.state == State.QUEUED:
msg = ("Executor reports task instance {} finished ({}) "
"although the task says its {}. Was the task "
"killed externally?".format(ti, state, ti.state))
self.log.error(msg)
try:
simple_dag = simple_dag_bag.get_dag(dag_id)
dagbag = models.DagBag(simple_dag.full_filepath)
dag = dagbag.get_dag(dag_id)
ti.task = dag.get_task(task_id)
ti.handle_failure(msg)
except Exception:
self.log.error("Cannot load the dag bag to handle failure for %s"
". Setting task to FAILED without callbacks or "
"retries. Do you have enough resources?", ti)
ti.state = State.FAILED
session.merge(ti)
session.commit()
def _execute(self):
self.log.info("Starting the scheduler")
# DAGs can be pickled for easier remote execution by some executors
pickle_dags = False
if self.do_pickle and self.executor.__class__ not in \
(executors.LocalExecutor, executors.SequentialExecutor):
pickle_dags = True
self.log.info("Running execute loop for %s seconds", self.run_duration)
self.log.info("Processing each file at most %s times", self.num_runs)
# Build up a list of Python files that could contain DAGs
self.log.info("Searching for files in %s", self.subdir)
known_file_paths = list_py_file_paths(self.subdir)
self.log.info("There are %s files in %s", len(known_file_paths), self.subdir)
def processor_factory(file_path, zombies):
return DagFileProcessor(file_path,
pickle_dags,
self.dag_ids,
zombies)
# When using sqlite, we do not use async_mode
# so the scheduler job and DAG parser don't access the DB at the same time.
async_mode = not self.using_sqlite
self.processor_agent = DagFileProcessorAgent(self.subdir,
known_file_paths,
self.num_runs,
processor_factory,
async_mode)
try:
self._execute_helper()
except Exception:
self.log.exception("Exception when executing execute_helper")
finally:
self.processor_agent.end()
self.log.info("Exited execute loop")
def _execute_helper(self):
"""
The actual scheduler loop. The main steps in the loop are:
#. Harvest DAG parsing results through DagFileProcessorAgent
#. Find and queue executable tasks
#. Change task instance state in DB
#. Queue tasks in executor
#. Heartbeat executor
#. Execute queued tasks in executor asynchronously
#. Sync on the states of running tasks
Following is a graphic representation of these steps.
.. image:: ../docs/img/scheduler_loop.jpg
:return: None
"""
self.executor.start()
self.log.info("Resetting orphaned tasks for active dag runs")
self.reset_state_for_orphaned_tasks()
# Start after resetting orphaned tasks to avoid stressing out DB.
self.processor_agent.start()
execute_start_time = timezone.utcnow()
# Last time that self.heartbeat() was called.
last_self_heartbeat_time = timezone.utcnow()
# For the execute duration, parse and schedule DAGs
while (timezone.utcnow() - execute_start_time).total_seconds() < \
self.run_duration or self.run_duration < 0:
self.log.debug("Starting Loop...")
loop_start_time = time.time()
if self.using_sqlite:
self.processor_agent.heartbeat()
# For the sqlite case w/ 1 thread, wait until the processor
# is finished to avoid concurrent access to the DB.
self.log.debug(
"Waiting for processors to finish since we're using sqlite")
self.processor_agent.wait_until_finished()
self.log.info("Harvesting DAG parsing results")
simple_dags = self.processor_agent.harvest_simple_dags()
self.log.debug("Harvested {} SimpleDAGs".format(len(simple_dags)))
# Send tasks for execution if available
simple_dag_bag = SimpleDagBag(simple_dags)
if len(simple_dags) > 0:
try:
simple_dag_bag = SimpleDagBag(simple_dags)
# Handle cases where a DAG run state is set (perhaps manually) to
# a non-running state. Handle task instances that belong to
# DAG runs in those states
# If a task instance is up for retry but the corresponding DAG run
# isn't running, mark the task instance as FAILED so we don't try
# to re-run it.
self._change_state_for_tis_without_dagrun(simple_dag_bag,
[State.UP_FOR_RETRY],
State.FAILED)
# If a task instance is scheduled or queued, but the corresponding
# DAG run isn't running, set the state to NONE so we don't try to
# re-run it.
self._change_state_for_tis_without_dagrun(simple_dag_bag,
[State.QUEUED,
State.SCHEDULED],
State.NONE)
self._execute_task_instances(simple_dag_bag,
(State.SCHEDULED,))
except Exception as e:
self.log.error("Error queuing tasks")
self.log.exception(e)
continue
# Call heartbeats
self.log.debug("Heartbeating the executor")
self.executor.heartbeat()
self._change_state_for_tasks_failed_to_execute()
# Process events from the executor
self._process_executor_events(simple_dag_bag)
# Heartbeat the scheduler periodically
time_since_last_heartbeat = (timezone.utcnow() -
last_self_heartbeat_time).total_seconds()
if time_since_last_heartbeat > self.heartrate:
self.log.debug("Heartbeating the scheduler")
self.heartbeat()
last_self_heartbeat_time = timezone.utcnow()
loop_end_time = time.time()
loop_duration = loop_end_time - loop_start_time
self.log.debug(
"Ran scheduling loop in %.2f seconds",
loop_duration)
self.log.debug("Sleeping for %.2f seconds", self._processor_poll_interval)
time.sleep(self._processor_poll_interval)
# Exit early for a test mode, run one additional scheduler loop
# to reduce the possibility that parsed DAG was put into the queue
# by the DAG manager but not yet received by DAG agent.
if self.processor_agent.done:
self._last_loop = True
if self._last_loop:
self.log.info("Exiting scheduler loop as all files"
" have been processed {} times".format(self.num_runs))
break
if loop_duration < 1:
sleep_length = 1 - loop_duration
self.log.debug(
"Sleeping for {0:.2f} seconds to prevent excessive logging"
.format(sleep_length))
sleep(sleep_length)
# Stop any processors
self.processor_agent.terminate()
# Verify that all files were processed, and if so, deactivate DAGs that
# haven't been touched by the scheduler as they likely have been
# deleted.
if self.processor_agent.all_files_processed:
self.log.info(
"Deactivating DAGs that haven't been touched since %s",
execute_start_time.isoformat()
)
models.DAG.deactivate_stale_dags(execute_start_time)
self.executor.end()
settings.Session.remove()
@provide_session
def process_file(self, file_path, zombies, pickle_dags=False, session=None):
"""
Process a Python file containing Airflow DAGs.
This includes:
1. Execute the file and look for DAG objects in the namespace.
2. Pickle the DAG and save it to the DB (if necessary).
3. For each DAG, see what tasks should run and create appropriate task
instances in the DB.
4. Record any errors importing the file into ORM
5. Kill (in ORM) any task instances belonging to the DAGs that haven't
issued a heartbeat in a while.
Returns a list of SimpleDag objects that represent the DAGs found in
the file
:param file_path: the path to the Python file that should be executed
:type file_path: unicode
:param zombies: zombie task instances to kill.
:type zombies: list[SimpleTaskInstance]
:param pickle_dags: whether serialize the DAGs found in the file and
save them to the db
:type pickle_dags: bool
:return: a list of SimpleDags made from the Dags found in the file
:rtype: list[SimpleDag]
"""
self.log.info("Processing file %s for tasks to queue", file_path)
# As DAGs are parsed from this file, they will be converted into SimpleDags
simple_dags = []
try:
dagbag = models.DagBag(file_path, include_examples=False)
except Exception:
self.log.exception("Failed at reloading the DAG file %s", file_path)
Stats.incr('dag_file_refresh_error', 1, 1)
return []
if len(dagbag.dags) > 0:
self.log.info("DAG(s) %s retrieved from %s", dagbag.dags.keys(), file_path)
else:
self.log.warning("No viable dags retrieved from %s", file_path)
self.update_import_errors(session, dagbag)
return []
# Save individual DAGs in the ORM and update DagModel.last_scheduled_time
for dag in dagbag.dags.values():
dag.sync_to_db()
paused_dag_ids = [dag.dag_id for dag in dagbag.dags.values()
if dag.is_paused]
# Pickle the DAGs (if necessary) and put them into a SimpleDag
for dag_id in dagbag.dags:
dag = dagbag.get_dag(dag_id)
pickle_id = None
if pickle_dags:
pickle_id = dag.pickle(session).id
# Only return DAGs that are not paused
if dag_id not in paused_dag_ids:
simple_dags.append(SimpleDag(dag, pickle_id=pickle_id))
if len(self.dag_ids) > 0:
dags = [dag for dag in dagbag.dags.values()
if dag.dag_id in self.dag_ids and
dag.dag_id not in paused_dag_ids]
else:
dags = [dag for dag in dagbag.dags.values()
if not dag.parent_dag and
dag.dag_id not in paused_dag_ids]
# Not using multiprocessing.Queue() since it's no longer a separate
# process and due to some unusual behavior. (empty() incorrectly
# returns true?)
ti_keys_to_schedule = []
self._process_dags(dagbag, dags, ti_keys_to_schedule)
for ti_key in ti_keys_to_schedule:
dag = dagbag.dags[ti_key[0]]
task = dag.get_task(ti_key[1])
ti = models.TaskInstance(task, ti_key[2])
ti.refresh_from_db(session=session, lock_for_update=True)
# We can defer checking the task dependency checks to the worker themselves
# since they can be expensive to run in the scheduler.
dep_context = DepContext(deps=QUEUE_DEPS, ignore_task_deps=True)
# Only schedule tasks that have their dependencies met, e.g. to avoid
# a task that recently got its state changed to RUNNING from somewhere
# other than the scheduler from getting its state overwritten.
# TODO(aoen): It's not great that we have to check all the task instance
# dependencies twice; once to get the task scheduled, and again to actually
# run the task. We should try to come up with a way to only check them once.
if ti.are_dependencies_met(
dep_context=dep_context,
session=session,
verbose=True):
# Task starts out in the scheduled state. All tasks in the
# scheduled state will be sent to the executor
ti.state = State.SCHEDULED
# Also save this task instance to the DB.
self.log.info("Creating / updating %s in ORM", ti)
session.merge(ti)
# commit batch
session.commit()
# Record import errors into the ORM
try:
self.update_import_errors(session, dagbag)
except Exception:
self.log.exception("Error logging import errors!")
try:
dagbag.kill_zombies(zombies)
except Exception:
self.log.exception("Error killing zombies!")
return simple_dags
@provide_session
def heartbeat_callback(self, session=None):
Stats.incr('scheduler_heartbeat', 1, 1)
class BackfillJob(BaseJob):
"""
A backfill job consists of a dag or subdag for a specific time range. It
triggers a set of task instance runs, in the right order and lasts for
as long as it takes for the set of task instance to be completed.
"""
ID_PREFIX = 'backfill_'
ID_FORMAT_PREFIX = ID_PREFIX + '{0}'
__mapper_args__ = {
'polymorphic_identity': 'BackfillJob'
}
class _DagRunTaskStatus(object):
"""
Internal status of the backfill job. This class is intended to be instantiated
only within a BackfillJob instance and will track the execution of tasks,
e.g. running, skipped, succeeded, failed, etc. Information about the dag runs
related to the backfill job are also being tracked in this structure,
.e.g finished runs, etc. Any other status related information related to the
execution of dag runs / tasks can be included in this structure since it makes
it easier to pass it around.
"""
# TODO(edgarRd): AIRFLOW-1444: Add consistency check on counts
def __init__(self,
to_run=None,
running=None,
skipped=None,
succeeded=None,
failed=None,
not_ready=None,
deadlocked=None,
active_runs=None,
executed_dag_run_dates=None,
finished_runs=0,
total_runs=0,
):
"""
:param to_run: Tasks to run in the backfill
:type to_run: dict[Tuple[String, String, DateTime], TaskInstance]
:param running: Maps running task instance key to task instance object
:type running: dict[Tuple[String, String, DateTime], TaskInstance]
:param skipped: Tasks that have been skipped
:type skipped: set[Tuple[String, String, DateTime]]
:param succeeded: Tasks that have succeeded so far
:type succeeded: set[Tuple[String, String, DateTime]]
:param failed: Tasks that have failed
:type failed: set[Tuple[String, String, DateTime]]
:param not_ready: Tasks not ready for execution
:type not_ready: set[Tuple[String, String, DateTime]]
:param deadlocked: Deadlocked tasks
:type deadlocked: set[Tuple[String, String, DateTime]]
:param active_runs: Active dag runs at a certain point in time
:type active_runs: list[DagRun]
:param executed_dag_run_dates: Datetime objects for the executed dag runs
:type executed_dag_run_dates: set[Datetime]
:param finished_runs: Number of finished runs so far
:type finished_runs: int
:param total_runs: Number of total dag runs able to run
:type total_runs: int
"""
self.to_run = to_run or dict()
self.running = running or dict()
self.skipped = skipped or set()
self.succeeded = succeeded or set()
self.failed = failed or set()
self.not_ready = not_ready or set()
self.deadlocked = deadlocked or set()
self.active_runs = active_runs or list()
self.executed_dag_run_dates = executed_dag_run_dates or set()
self.finished_runs = finished_runs
self.total_runs = total_runs
def __init__(
self,
dag,
start_date=None,
end_date=None,
mark_success=False,
donot_pickle=False,
ignore_first_depends_on_past=False,
ignore_task_deps=False,
pool=None,
delay_on_limit_secs=1.0,
verbose=False,
conf=None,
rerun_failed_tasks=False,
*args, **kwargs):
|
def _update_counters(self, ti_status):
"""
Updates the counters per state of the tasks that were running. Can re-add
to tasks to run in case required.
:param ti_status: the internal status of the backfill job tasks
:type ti_status: BackfillJob._DagRunTaskStatus
"""
for key, ti in list(ti_status.running.items()):
ti.refresh_from_db()
if ti.state == State.SUCCESS:
ti_status.succeeded.add(key)
self.log.debug("Task instance %s succeeded. Don't rerun.", ti)
ti_status.running.pop(key)
continue
elif ti.state == State.SKIPPED:
ti_status.skipped.add(key)
self.log.debug("Task instance %s skipped. Don't rerun.", ti)
ti_status.running.pop(key)
continue
elif ti.state == State.FAILED:
self.log.error("Task instance %s failed", ti)
ti_status.failed.add(key)
ti_status.running.pop(key)
continue
# special case: if the task needs to run again put it back
elif ti.state == State.UP_FOR_RETRY:
self.log.warning("Task instance %s is up for retry", ti)
ti_status.running.pop(key)
ti_status.to_run[key] = ti
# special case: The state of the task can be set to NONE by the task itself
# when it reaches concurrency limits. It could also happen when the state
# is changed externally, e.g. by clearing tasks from the ui. We need to cover
# for that as otherwise those tasks would fall outside of the scope of
# the backfill suddenly.
elif ti.state == State.NONE:
self.log.warning(
"FIXME: task instance %s state was set to none externally or "
"reaching concurrency limits. Re-adding task to queue.",
ti
)
ti.set_state(State.SCHEDULED)
ti_status.running.pop(key)
ti_status.to_run[key] = ti
def _manage_executor_state(self, running):
"""
Checks if the executor agrees with the state of task instances
that are running
:param running: dict of key, task to verify
"""
executor = self.executor
for key, state in list(executor.get_event_buffer().items()):
if key not in running:
self.log.warning(
"%s state %s not in running=%s",
key, state, running.values()
)
continue
ti = running[key]
ti.refresh_from_db()
self.log.debug("Executor state: %s task %s", state, ti)
if state == State.FAILED or state == State.SUCCESS:
if ti.state == State.RUNNING or ti.state == State.QUEUED:
msg = ("Executor reports task instance {} finished ({}) "
"although the task says its {}. Was the task "
"killed externally?".format(ti, state, ti.state))
self.log.error(msg)
ti.handle_failure(msg)
@provide_session
def _get_dag_run(self, run_date, session=None):
"""
Returns a dag run for the given run date, which will be matched to an existing
dag run if available or create a new dag run otherwise. If the max_active_runs
limit is reached, this function will return None.
:param run_date: the execution date for the dag run
:type run_date: datetime
:param session: the database session object
:type session: Session
:return: a DagRun in state RUNNING or None
"""
run_id = BackfillJob.ID_FORMAT_PREFIX.format(run_date.isoformat())
# consider max_active_runs but ignore when running subdags
respect_dag_max_active_limit = (True
if (self.dag.schedule_interval and
not self.dag.is_subdag)
else False)
current_active_dag_count = self.dag.get_num_active_runs(external_trigger=False)
# check if we are scheduling on top of a already existing dag_run
# we could find a "scheduled" run instead of a "backfill"
run = DagRun.find(dag_id=self.dag.dag_id,
execution_date=run_date,
session=session)
if run is not None and len(run) > 0:
run = run[0]
if run.state == State.RUNNING:
respect_dag_max_active_limit = False
else:
run = None
# enforce max_active_runs limit for dag, special cases already
# handled by respect_dag_max_active_limit
if (respect_dag_max_active_limit and
current_active_dag_count >= self.dag.max_active_runs):
return None
run = run or self.dag.create_dagrun(
run_id=run_id,
execution_date=run_date,
start_date=timezone.utcnow(),
state=State.RUNNING,
external_trigger=False,
session=session,
conf=self.conf,
)
# set required transient field
run.dag = self.dag
# explicitly mark as backfill and running
run.state = State.RUNNING
run.run_id = run_id
run.verify_integrity(session=session)
return run
@provide_session
def _task_instances_for_dag_run(self, dag_run, session=None):
"""
Returns a map of task instance key to task instance object for the tasks to
run in the given dag run.
:param dag_run: the dag run to get the tasks from
:type dag_run: models.DagRun
:param session: the database session object
:type session: Session
"""
tasks_to_run = {}
if dag_run is None:
return tasks_to_run
# check if we have orphaned tasks
self.reset_state_for_orphaned_tasks(filter_by_dag_run=dag_run, session=session)
# for some reason if we don't refresh the reference to run is lost
dag_run.refresh_from_db()
make_transient(dag_run)
# TODO(edgarRd): AIRFLOW-1464 change to batch query to improve perf
for ti in dag_run.get_task_instances():
# all tasks part of the backfill are scheduled to run
if ti.state == State.NONE:
ti.set_state(State.SCHEDULED, session=session)
if ti.state != State.REMOVED:
tasks_to_run[ti.key] = ti
return tasks_to_run
def _log_progress(self, ti_status):
msg = ' | '.join([
"[backfill progress]",
"finished run {0} of {1}",
"tasks waiting: {2}",
"succeeded: {3}",
"running: {4}",
"failed: {5}",
"skipped: {6}",
"deadlocked: {7}",
"not ready: {8}"
]).format(
ti_status.finished_runs,
ti_status.total_runs,
len(ti_status.to_run),
len(ti_status.succeeded),
len(ti_status.running),
len(ti_status.failed),
len(ti_status.skipped),
len(ti_status.deadlocked),
len(ti_status.not_ready))
self.log.info(msg)
self.log.debug(
"Finished dag run loop iteration. Remaining tasks %s",
ti_status.to_run.values()
)
@provide_session
def _process_backfill_task_instances(self,
ti_status,
executor,
pickle_id,
start_date=None, session=None):
"""
Process a set of task instances from a set of dag runs. Special handling is done
to account for different task instance states that could be present when running
them in a backfill process.
:param ti_status: the internal status of the job
:type ti_status: BackfillJob._DagRunTaskStatus
:param executor: the executor to run the task instances
:type executor: BaseExecutor
:param pickle_id: the pickle_id if dag is pickled, None otherwise
:type pickle_id: int
:param start_date: the start date of the backfill job
:type start_date: datetime
:param session: the current session object
:type session: Session
:return: the list of execution_dates for the finished dag runs
:rtype: list
"""
executed_run_dates = []
while ((len(ti_status.to_run) > 0 or len(ti_status.running) > 0) and
len(ti_status.deadlocked) == 0):
self.log.debug("*** Clearing out not_ready list ***")
ti_status.not_ready.clear()
# we need to execute the tasks bottom to top
# or leaf to root, as otherwise tasks might be
# determined deadlocked while they are actually
# waiting for their upstream to finish
for task in self.dag.topological_sort():
for key, ti in list(ti_status.to_run.items()):
if task.task_id != ti.task_id:
continue
ti.refresh_from_db()
task = self.dag.get_task(ti.task_id)
ti.task = task
ignore_depends_on_past = (
self.ignore_first_depends_on_past and
ti.execution_date == (start_date or ti.start_date))
self.log.debug(
"Task instance to run %s state %s", ti, ti.state)
# The task was already marked successful or skipped by a
# different Job. Don't rerun it.
if ti.state == State.SUCCESS:
ti_status.succeeded.add(key)
self.log.debug("Task instance %s succeeded. Don't rerun.", ti)
ti_status.to_run.pop(key)
if key in ti_status.running:
ti_status.running.pop(key)
continue
elif ti.state == State.SKIPPED:
ti_status.skipped.add(key)
self.log.debug("Task instance %s skipped. Don't rerun.", ti)
ti_status.to_run.pop(key)
if key in ti_status.running:
ti_status.running.pop(key)
continue
# guard against externally modified tasks instances or
# in case max concurrency has been reached at task runtime
elif ti.state == State.NONE:
self.log.warning(
"FIXME: task instance {} state was set to None "
"externally. This should not happen"
)
ti.set_state(State.SCHEDULED, session=session)
if self.rerun_failed_tasks:
# Rerun failed tasks or upstreamed failed tasks
if ti.state in (State.FAILED, State.UPSTREAM_FAILED):
self.log.error("Task instance {ti} "
"with state {state}".format(ti=ti,
state=ti.state))
if key in ti_status.running:
ti_status.running.pop(key)
# Reset the failed task in backfill to scheduled state
ti.set_state(State.SCHEDULED, session=session)
else:
# Default behaviour which works for subdag.
if ti.state in (State.FAILED, State.UPSTREAM_FAILED):
self.log.error("Task instance {ti} "
"with {state} state".format(ti=ti,
state=ti.state))
ti_status.failed.add(key)
ti_status.to_run.pop(key)
if key in ti_status.running:
ti_status.running.pop(key)
continue
backfill_context = DepContext(
deps=RUN_DEPS,
ignore_depends_on_past=ignore_depends_on_past,
ignore_task_deps=self.ignore_task_deps,
flag_upstream_failed=True)
# Is the task runnable? -- then run it
# the dependency checker can change states of tis
if ti.are_dependencies_met(
dep_context=backfill_context,
session=session,
verbose=self.verbose):
ti.refresh_from_db(lock_for_update=True, session=session)
if ti.state == State.SCHEDULED or ti.state == State.UP_FOR_RETRY:
if executor.has_task(ti):
self.log.debug(
"Task Instance %s already in executor "
"waiting for queue to clear",
ti
)
else:
self.log.debug('Sending %s to executor', ti)
# Skip scheduled state, we are executing immediately
ti.state = State.QUEUED
session.merge(ti)
cfg_path = None
if executor.__class__ in (executors.LocalExecutor,
executors.SequentialExecutor):
cfg_path = tmp_configuration_copy()
executor.queue_task_instance(
ti,
mark_success=self.mark_success,
pickle_id=pickle_id,
ignore_task_deps=self.ignore_task_deps,
ignore_depends_on_past=ignore_depends_on_past,
pool=self.pool,
cfg_path=cfg_path)
ti_status.running[key] = ti
ti_status.to_run.pop(key)
session.commit()
continue
if ti.state == State.UPSTREAM_FAILED:
self.log.error("Task instance %s upstream failed", ti)
ti_status.failed.add(key)
ti_status.to_run.pop(key)
if key in ti_status.running:
ti_status.running.pop(key)
continue
# special case
if ti.state == State.UP_FOR_RETRY:
self.log.debug(
"Task instance %s retry period not "
"expired yet", ti)
if key in ti_status.running:
ti_status.running.pop(key)
ti_status.to_run[key] = ti
continue
# all remaining tasks
self.log.debug('Adding %s to not_ready', ti)
ti_status.not_ready.add(key)
# execute the tasks in the queue
self.heartbeat()
executor.heartbeat()
# If the set of tasks that aren't ready ever equals the set of
# tasks to run and there are no running tasks then the backfill
# is deadlocked
if (ti_status.not_ready and
ti_status.not_ready == set(ti_status.to_run) and
len(ti_status.running) == 0):
self.log.warning(
"Deadlock discovered for ti_status.to_run=%s",
ti_status.to_run.values()
)
ti_status.deadlocked.update(ti_status.to_run.values())
ti_status.to_run.clear()
# check executor state
self._manage_executor_state(ti_status.running)
# update the task counters
self._update_counters(ti_status=ti_status)
# update dag run state
_dag_runs = ti_status.active_runs[:]
for run in _dag_runs:
run.update_state(session=session)
if run.state in State.finished():
ti_status.finished_runs += 1
ti_status.active_runs.remove(run)
executed_run_dates.append(run.execution_date)
self._log_progress(ti_status)
# return updated status
return executed_run_dates
@provide_session
def _collect_errors(self, ti_status, session=None):
err = ''
if ti_status.failed:
err += (
"---------------------------------------------------\n"
"Some task instances failed:\n{}\n".format(ti_status.failed))
if ti_status.deadlocked:
err += (
'---------------------------------------------------\n'
'BackfillJob is deadlocked.')
deadlocked_depends_on_past = any(
t.are_dependencies_met(
dep_context=DepContext(ignore_depends_on_past=False),
session=session,
verbose=self.verbose) !=
t.are_dependencies_met(
dep_context=DepContext(ignore_depends_on_past=True),
session=session,
verbose=self.verbose)
for t in ti_status.deadlocked)
if deadlocked_depends_on_past:
err += (
'Some of the deadlocked tasks were unable to run because '
'of "depends_on_past" relationships. Try running the '
'backfill with the option '
'"ignore_first_depends_on_past=True" or passing "-I" at '
'the command line.')
err += ' These tasks have succeeded:\n{}\n'.format(ti_status.succeeded)
err += ' These tasks are running:\n{}\n'.format(ti_status.running)
err += ' These tasks have failed:\n{}\n'.format(ti_status.failed)
err += ' These tasks are skipped:\n{}\n'.format(ti_status.skipped)
err += ' These tasks are deadlocked:\n{}\n'.format(ti_status.deadlocked)
return err
@provide_session
def _execute_for_run_dates(self, run_dates, ti_status, executor, pickle_id,
start_date, session=None):
"""
Computes the dag runs and their respective task instances for
the given run dates and executes the task instances.
Returns a list of execution dates of the dag runs that were executed.
:param run_dates: Execution dates for dag runs
:type run_dates: list
:param ti_status: internal BackfillJob status structure to tis track progress
:type ti_status: BackfillJob._DagRunTaskStatus
:param executor: the executor to use, it must be previously started
:type executor: BaseExecutor
:param pickle_id: numeric id of the pickled dag, None if not pickled
:type pickle_id: int
:param start_date: backfill start date
:type start_date: datetime
:param session: the current session object
:type session: Session
"""
for next_run_date in run_dates:
dag_run = self._get_dag_run(next_run_date, session=session)
tis_map = self._task_instances_for_dag_run(dag_run,
session=session)
if dag_run is None:
continue
ti_status.active_runs.append(dag_run)
ti_status.to_run.update(tis_map or {})
processed_dag_run_dates = self._process_backfill_task_instances(
ti_status=ti_status,
executor=executor,
pickle_id=pickle_id,
start_date=start_date,
session=session)
ti_status.executed_dag_run_dates.update(processed_dag_run_dates)
@provide_session
def _execute(self, session=None):
"""
Initializes all components required to run a dag for a specified date range and
calls helper method to execute the tasks.
"""
ti_status = BackfillJob._DagRunTaskStatus()
start_date = self.bf_start_date
# Get intervals between the start/end dates, which will turn into dag runs
run_dates = self.dag.get_run_dates(start_date=start_date,
end_date=self.bf_end_date)
if len(run_dates) == 0:
self.log.info("No run dates were found for the given dates and dag interval.")
return
# picklin'
pickle_id = None
if not self.donot_pickle and self.executor.__class__ not in (
executors.LocalExecutor, executors.SequentialExecutor):
pickle = DagPickle(self.dag)
session.add(pickle)
session.commit()
pickle_id = pickle.id
executor = self.executor
executor.start()
ti_status.total_runs = len(run_dates) # total dag runs in backfill
try:
remaining_dates = ti_status.total_runs
while remaining_dates > 0:
dates_to_process = [run_date for run_date in run_dates
if run_date not in ti_status.executed_dag_run_dates]
self._execute_for_run_dates(run_dates=dates_to_process,
ti_status=ti_status,
executor=executor,
pickle_id=pickle_id,
start_date=start_date,
session=session)
remaining_dates = (
ti_status.total_runs - len(ti_status.executed_dag_run_dates)
)
err = self._collect_errors(ti_status=ti_status, session=session)
if err:
raise AirflowException(err)
if remaining_dates > 0:
self.log.info(
"max_active_runs limit for dag %s has been reached "
" - waiting for other dag runs to finish",
self.dag_id
)
time.sleep(self.delay_on_limit_secs)
finally:
executor.end()
session.commit()
self.log.info("Backfill done. Exiting.")
class LocalTaskJob(BaseJob):
__mapper_args__ = {
'polymorphic_identity': 'LocalTaskJob'
}
def __init__(
self,
task_instance,
ignore_all_deps=False,
ignore_depends_on_past=False,
ignore_task_deps=False,
ignore_ti_state=False,
mark_success=False,
pickle_id=None,
pool=None,
*args, **kwargs):
self.task_instance = task_instance
self.dag_id = task_instance.dag_id
self.ignore_all_deps = ignore_all_deps
self.ignore_depends_on_past = ignore_depends_on_past
self.ignore_task_deps = ignore_task_deps
self.ignore_ti_state = ignore_ti_state
self.pool = pool
self.pickle_id = pickle_id
self.mark_success = mark_success
# terminating state is used so that a job don't try to
# terminate multiple times
self.terminating = False
super(LocalTaskJob, self).__init__(*args, **kwargs)
def _execute(self):
self.task_runner = get_task_runner(self)
def signal_handler(signum, frame):
"""Setting kill signal handler"""
self.log.error("Received SIGTERM. Terminating subprocesses")
self.on_kill()
raise AirflowException("LocalTaskJob received SIGTERM signal")
signal.signal(signal.SIGTERM, signal_handler)
if not self.task_instance._check_and_change_state_before_execution(
mark_success=self.mark_success,
ignore_all_deps=self.ignore_all_deps,
ignore_depends_on_past=self.ignore_depends_on_past,
ignore_task_deps=self.ignore_task_deps,
ignore_ti_state=self.ignore_ti_state,
job_id=self.id,
pool=self.pool):
self.log.info("Task is not able to be run")
return
try:
self.task_runner.start()
last_heartbeat_time = time.time()
heartbeat_time_limit = conf.getint('scheduler',
'scheduler_zombie_task_threshold')
while True:
# Monitor the task to see if it's done
return_code = self.task_runner.return_code()
if return_code is not None:
self.log.info("Task exited with return code %s", return_code)
return
# Periodically heartbeat so that the scheduler doesn't think this
# is a zombie
try:
self.heartbeat()
last_heartbeat_time = time.time()
except OperationalError:
Stats.incr('local_task_job_heartbeat_failure', 1, 1)
self.log.exception(
"Exception while trying to heartbeat! Sleeping for %s seconds",
self.heartrate
)
time.sleep(self.heartrate)
# If it's been too long since we've heartbeat, then it's possible that
# the scheduler rescheduled this task, so kill launched processes.
time_since_last_heartbeat = time.time() - last_heartbeat_time
if time_since_last_heartbeat > heartbeat_time_limit:
Stats.incr('local_task_job_prolonged_heartbeat_failure', 1, 1)
self.log.error("Heartbeat time limited exceeded!")
raise AirflowException("Time since last heartbeat({:.2f}s) "
"exceeded limit ({}s)."
.format(time_since_last_heartbeat,
heartbeat_time_limit))
finally:
self.on_kill()
def on_kill(self):
self.task_runner.terminate()
self.task_runner.on_finish()
@provide_session
def heartbeat_callback(self, session=None):
"""Self destruct task if state has been moved away from running externally"""
if self.terminating:
# ensure termination if processes are created later
self.task_runner.terminate()
return
self.task_instance.refresh_from_db()
ti = self.task_instance
fqdn = get_hostname()
same_hostname = fqdn == ti.hostname
same_process = ti.pid == os.getpid()
if ti.state == State.RUNNING:
if not same_hostname:
self.log.warning("The recorded hostname {ti.hostname} "
"does not match this instance's hostname "
"{fqdn}".format(**locals()))
raise AirflowException("Hostname of job runner does not match")
elif not same_process:
current_pid = os.getpid()
self.log.warning("Recorded pid {ti.pid} does not match "
"the current pid "
"{current_pid}".format(**locals()))
raise AirflowException("PID of job runner does not match")
elif (
self.task_runner.return_code() is None and
hasattr(self.task_runner, 'process')
):
self.log.warning(
"State of this instance has been externally set to %s. "
"Taking the poison pill.",
ti.state
)
self.task_runner.terminate()
self.terminating = True
|
"""
:param dag: DAG object.
:type dag: `class DAG`.
:param start_date: start date for the backfill date range.
:type start_date: datetime.
:param end_date: end date for the backfill date range.
:type end_date: datetime
:param mark_success: flag whether to mark the task auto success.
:type mark_success: bool
:param donot_pickle: whether pickle
:type donot_pickle: bool
:param ignore_first_depends_on_past: whether to ignore depend on past
:type ignore_first_depends_on_past: bool
:param ignore_task_deps: whether to ignore the task dependency
:type ignore_task_deps: bool
:param pool:
:type pool: list
:param delay_on_limit_secs:
:param verbose:
:type verbose: flag to whether display verbose message to backfill console
:param conf: a dictionary which user could pass k-v pairs for backfill
:type conf: dictionary
:param rerun_failed_tasks: flag to whether to
auto rerun the failed task in backfill
:type rerun_failed_tasks: bool
:param args:
:param kwargs:
"""
self.dag = dag
self.dag_id = dag.dag_id
self.bf_start_date = start_date
self.bf_end_date = end_date
self.mark_success = mark_success
self.donot_pickle = donot_pickle
self.ignore_first_depends_on_past = ignore_first_depends_on_past
self.ignore_task_deps = ignore_task_deps
self.pool = pool
self.delay_on_limit_secs = delay_on_limit_secs
self.verbose = verbose
self.conf = conf
self.rerun_failed_tasks = rerun_failed_tasks
super(BackfillJob, self).__init__(*args, **kwargs)
|
transfermarket.py
|
from lxml import html
import requests
|
url = 'https://www.transfermarkt.com/ac-mailand/transfers/verein/5/saison_id/2017'
headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.9; rv:45.0) Gecko/20100101 Firefox/45.0'
}
page = requests.get(url, headers=headers)
tree = html.fromstring(page.content)
players = tree.xpath('//a[@class="spielprofil_tooltip"]/text()')
print('Players: ', players)
| |
codec.rs
|
use bitvec::prelude::*;
use std::io;
const STOP_BYTE: u8 = 0x80;
const SIGNIFICANT_BYTE: u8 = !STOP_BYTE;
const NEGATIVE_SIGN_MASK: u8 = 0x40;
/// A trait to (de)serialize on-the-wire representations of entities.
pub trait Codec {
fn deserialize(&mut self, input: &mut impl io::Read) -> io::Result<usize>;
fn serialize(&self, output: &mut impl io::Write) -> io::Result<usize>;
}
impl Codec for u32 {
fn serialize(&self, output: &mut impl io::Write) -> io::Result<usize> {
let mut bytes = [0u8; 5];
let i;
if *self >= 0x1000_0000 {
bytes[0] = (self >> 28) as u8;
bytes[1] = (self >> 21) as u8 & SIGNIFICANT_BYTE;
bytes[2] = (self >> 14) as u8 & SIGNIFICANT_BYTE;
bytes[3] = (self >> 7) as u8 & SIGNIFICANT_BYTE;
bytes[4] = *self as u8 | STOP_BYTE;
i = 5;
} else if *self >= 0x20_0000 {
bytes[0] = (self >> 21) as u8 & SIGNIFICANT_BYTE;
bytes[1] = (self >> 14) as u8 & SIGNIFICANT_BYTE;
bytes[2] = (self >> 7) as u8 & SIGNIFICANT_BYTE;
bytes[3] = *self as u8 | STOP_BYTE;
i = 4;
} else if *self >= 0x4000 {
bytes[0] = (self >> 14) as u8 & SIGNIFICANT_BYTE;
bytes[1] = (self >> 7) as u8 & SIGNIFICANT_BYTE;
bytes[2] = *self as u8 | STOP_BYTE;
i = 3;
} else if *self >= 0x80 {
bytes[0] = (self >> 7) as u8 & SIGNIFICANT_BYTE;
bytes[1] = *self as u8 | STOP_BYTE;
i = 2;
} else {
bytes[0] = *self as u8 | STOP_BYTE;
i = 1;
}
output.write_all(&bytes[..i])?;
Ok(i)
}
fn deserialize(&mut self, input: &mut impl io::Read) -> io::Result<usize> {
*self = 0;
let bytes = decode_stop_bit_entity(input)?;
for byte in &bytes {
*self = (*self << 7) | u32::from(*byte);
}
Ok(bytes.len())
}
}
impl Codec for i32 {
fn serialize(&self, output: &mut impl io::Write) -> io::Result<usize> {
let mut bytes = [0u8; 5];
let i;
let abs = self.abs();
if abs >= 0x800_0000 {
bytes[0] = (self >> 28) as u8;
bytes[1] = (self >> 21) as u8 & SIGNIFICANT_BYTE;
bytes[2] = (self >> 14) as u8 & SIGNIFICANT_BYTE;
bytes[3] = (self >> 7) as u8 & SIGNIFICANT_BYTE;
bytes[4] = *self as u8 | STOP_BYTE;
i = 5;
} else if abs >= 0x10_0000 {
bytes[0] = (self >> 21) as u8 & SIGNIFICANT_BYTE;
bytes[1] = (self >> 14) as u8 & SIGNIFICANT_BYTE;
bytes[2] = (self >> 7) as u8 & SIGNIFICANT_BYTE;
bytes[3] = *self as u8 | STOP_BYTE;
i = 4;
} else if abs >= 0x2000 {
bytes[0] = (self >> 14) as u8 & SIGNIFICANT_BYTE;
bytes[1] = (self >> 7) as u8 & SIGNIFICANT_BYTE;
bytes[2] = *self as u8 | STOP_BYTE;
i = 3;
} else if abs >= 0x40 {
bytes[0] = (self >> 7) as u8 & SIGNIFICANT_BYTE;
bytes[1] = *self as u8 | STOP_BYTE;
i = 2;
} else {
bytes[0] = *self as u8 | STOP_BYTE;
i = 1;
}
output.write_all(&bytes[..i])?;
Ok(i)
}
fn deserialize(&mut self, input: &mut impl io::Read) -> io::Result<usize> {
let bytes = decode_stop_bit_entity(input)?;
let is_negative = (bytes[0] & NEGATIVE_SIGN_MASK) != 0;
*self = -(is_negative as i32);
for byte in &bytes {
*self = (*self << 7) | i32::from(*byte);
}
Ok(bytes.len())
}
}
impl Codec for u64 {
fn deserialize(&mut self, _input: &mut impl io::Read) -> io::Result<usize> {
todo!();
}
fn serialize(&self, _output: &mut impl io::Write) -> io::Result<usize> {
todo!();
}
}
impl Codec for i64 {
fn deserialize(&mut self, _input: &mut impl io::Read) -> io::Result<usize> {
todo!();
}
fn serialize(&self, _output: &mut impl io::Write) -> io::Result<usize> {
todo!();
}
}
impl Codec for Vec<u8> {
fn serialize(&self, output: &mut impl io::Write) -> io::Result<usize> {
let len = self.len() as u32;
len.serialize(output)?;
output.write_all(self)?;
Ok(len as usize)
}
fn deserialize(&mut self, input: &mut impl io::Read) -> io::Result<usize> {
let mut len = 0u32;
len.deserialize(input)?;
*self = vec![0u8; len as usize];
input.read_exact(&mut self[..])?;
Ok(len as usize)
}
}
impl Codec for String {
fn serialize(&self, output: &mut impl io::Write) -> io::Result<usize> {
let len = self.len() as u32;
let bytes = self.as_bytes();
len.serialize(output)?;
output.write_all(bytes)?;
Ok(len as usize)
}
fn deserialize(&mut self, input: &mut impl io::Read) -> io::Result<usize> {
let mut len = 0u32;
len.deserialize(input)?;
let mut bytes = vec![0u8; len as usize];
input.read_exact(&mut bytes[..])?;
*self = String::from_utf8_lossy(&bytes[..]).to_string();
Ok(len as usize)
}
}
fn _serialize_bitvec(bits: &BitSlice<Msb0, u8>, output: &mut impl io::Write) -> io::Result<usize> {
let significant_data_bits_per_byte = bits.chunks_exact(7);
let mut i = 0;
let remaineder = significant_data_bits_per_byte.remainder().load::<u8>();
for significant_data_bits in significant_data_bits_per_byte {
let byte = significant_data_bits.load::<u8>();
if byte != 0 {
output.write_all(&[byte])?;
i += 1;
}
}
if remaineder != 0 {
output.write_all(&[STOP_BYTE | remaineder])?;
}
Ok(i)
}
#[derive(Debug, Clone)]
pub struct PresenceMap {
bits: BitVec,
}
impl PresenceMap {
pub fn bits(&self) -> impl Iterator<Item = &bool> {
self.bits.iter()
}
}
impl Codec for PresenceMap {
fn serialize(&self, _output: &mut impl io::Write) -> io::Result<usize>
|
fn deserialize(&mut self, input: &mut impl io::Read) -> io::Result<usize> {
self.bits = BitVec::new();
let mut stop_bit = false;
while !stop_bit {
let mut buffer = [0u8; 1];
input.read_exact(&mut buffer[..])?;
let byte = buffer[0];
stop_bit = byte >= STOP_BYTE;
if !stop_bit {
self.bits.push(byte >> 7 == 1);
}
self.bits.push((byte >> 6) & 1 == 1);
self.bits.push((byte >> 5) & 1 == 1);
self.bits.push((byte >> 4) & 1 == 1);
self.bits.push((byte >> 4) & 1 == 1);
self.bits.push((byte >> 3) & 1 == 1);
self.bits.push((byte >> 2) & 1 == 1);
self.bits.push((byte >> 1) & 1 == 1);
self.bits.push((byte >> 0) & 1 == 1);
}
Ok(self.bits.len())
}
}
//pub fn encode_stop_bit_entity(target: &mut impl io::Write, buffer: &[u8]) -> io::Result<usize> {
// let bits = BitVec::from(buffer);
// for bit in bits {
// target:w
// }
//}
pub fn decode_stop_bit_entity(input: &mut impl io::Read) -> io::Result<Vec<u8>> {
let mut bytes = Vec::new();
loop {
let mut byte = [0u8; 1];
input.read_exact(&mut byte[..])?;
if byte[0] >= STOP_BYTE {
byte[0] ^= STOP_BYTE;
bytes.push(byte[0]);
break;
} else {
bytes.push(byte[0]);
}
}
Ok(bytes)
}
#[allow(dead_code)]
pub fn decode_stop_bit_bitvec(input: &mut impl io::Read) -> io::Result<BitVec> {
let mut bits = BitVec::new();
let mut stop_bit = false;
while !stop_bit {
let mut buffer = [0u8; 1];
input.read_exact(&mut buffer[..])?;
let byte = buffer[0];
stop_bit = byte >= STOP_BYTE;
if !stop_bit {
bits.push(byte >> 7 == 1);
}
bits.push((byte >> 6) & 1 == 1);
bits.push((byte >> 5) & 1 == 1);
bits.push((byte >> 4) & 1 == 1);
bits.push((byte >> 4) & 1 == 1);
bits.push((byte >> 3) & 1 == 1);
bits.push((byte >> 2) & 1 == 1);
bits.push((byte >> 1) & 1 == 1);
bits.push((byte >> 0) & 1 == 1);
}
Ok(bits)
}
#[cfg(test)]
mod test {
use super::*;
use quickcheck_macros::quickcheck;
#[quickcheck]
fn encode_then_decode_u32(expected_value: u32) -> bool {
let mut bytes: Vec<u8> = Vec::new();
expected_value.serialize(&mut bytes).unwrap();
let mut value = 0u32;
value.deserialize(&mut &bytes[..]).unwrap();
value == expected_value
}
#[test]
fn encode_i32_example() {
let mut bytes: Vec<u8> = Vec::new();
(-794_2755_i32).serialize(&mut bytes).unwrap();
assert_eq!(bytes, vec![0x7c, 0x1b, 0x1b, 0x9d]);
}
#[test]
fn decode_i32_fast_doc_example() {
let bytes: Vec<u8> = vec![0x7c, 0x1b, 0x1b, 0x9d];
let mut value = 0i32;
value.deserialize(&mut &bytes[..]).unwrap();
assert_eq!(value, -794_2755);
}
#[test]
fn encode_64i32_regression() {
let mut bytes: Vec<u8> = Vec::new();
(64i32).serialize(&mut bytes).unwrap();
assert_eq!(bytes, vec![0x00, 0xc0]);
}
#[test]
fn encode_then_decode_99i32_regression() {
let expected_value = 99i32;
let mut bytes: Vec<u8> = Vec::new();
expected_value.serialize(&mut bytes).unwrap();
let mut value = 0i32;
value.deserialize(&mut &bytes[..]).unwrap();
assert_eq!(value, expected_value);
}
#[quickcheck]
fn encode_then_decode_string(expected_value: String) -> bool {
let mut bytes: Vec<u8> = Vec::new();
expected_value.serialize(&mut bytes).unwrap();
let mut value = String::default();
value.deserialize(&mut &bytes[..]).unwrap();
*value == expected_value
}
#[quickcheck]
fn encode_then_decode_bytes(expected_value: Vec<u8>) -> bool {
let mut bytes: Vec<u8> = Vec::default();
expected_value.serialize(&mut bytes).unwrap();
let mut value = Vec::default();
value.deserialize(&mut &bytes[..]).unwrap();
*value == expected_value
}
}
|
{
todo!();
}
|
federated_trainer.py
|
# Copyright 2020, Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Runs federated training on various tasks using a generalized form of FedAvg.
Specifically, we create (according to flags) an iterative processes that allows
for client and server learning rate schedules, as well as various client and
server optimization methods. For more details on the learning rate scheduling
and optimization methods, see `shared/optimizer_utils.py`. For details on the
iterative process, see `shared/fed_avg_schedule.py`.
"""
import collections
import os.path
from typing import Callable
from absl import app
from absl import flags
import tensorflow as tf
import tensorflow_federated as tff
from optimization.cifar100 import federated_cifar100
from optimization.emnist import federated_emnist
from optimization.emnist_ae import federated_emnist_ae
from optimization.shakespeare import federated_shakespeare
from optimization.shared import fed_avg_schedule
from optimization.shared import optimizer_utils
from optimization.shared import training_specs
from optimization.stackoverflow import federated_stackoverflow
from optimization.stackoverflow_lr import federated_stackoverflow_lr
from utils import training_loop
from utils import utils_impl
_SUPPORTED_TASKS = [
'cifar100', 'emnist_cr', 'emnist_ae', 'shakespeare', 'stackoverflow_nwp',
'stackoverflow_lr'
]
with utils_impl.record_hparam_flags() as optimizer_flags:
# Defining optimizer flags
optimizer_utils.define_optimizer_flags('client')
optimizer_utils.define_optimizer_flags('server')
optimizer_utils.define_lr_schedule_flags('client')
optimizer_utils.define_lr_schedule_flags('server')
with utils_impl.record_hparam_flags() as shared_flags:
# Federated training hyperparameters
flags.DEFINE_integer('client_epochs_per_round', 1,
'Number of epochs in the client to take per round.')
flags.DEFINE_integer('client_batch_size', 20, 'Batch size on the clients.')
flags.DEFINE_integer('clients_per_round', 10,
'How many clients to sample per round.')
flags.DEFINE_integer('client_datasets_random_seed', 1,
'Random seed for client sampling.')
# Training loop configuration
flags.DEFINE_string(
'experiment_name', None, 'The name of this experiment. Will be append to '
'--root_output_dir to separate experiment results.')
flags.mark_flag_as_required('experiment_name')
flags.DEFINE_string('root_output_dir', '/tmp/fed_opt/',
'Root directory for writing experiment output.')
flags.DEFINE_integer('total_rounds', 200, 'Number of total training rounds.')
flags.DEFINE_integer(
'rounds_per_eval', 1,
'How often to evaluate the global model on the validation dataset.')
flags.DEFINE_integer('rounds_per_checkpoint', 50,
'How often to checkpoint the global model.')
with utils_impl.record_hparam_flags() as task_flags:
# Task specification
flags.DEFINE_enum('task', None, _SUPPORTED_TASKS,
'Which task to perform federated training on.')
with utils_impl.record_hparam_flags() as cifar100_flags:
# CIFAR-100 flags
flags.DEFINE_integer('cifar100_crop_size', 24, 'The height and width of '
'images after preprocessing.')
flags.DEFINE_bool(
'cifar100_distort_train_images', True, 'If set to True, '
'train images will be randomly cropped. Otherwise, all '
'images will simply be resized.')
with utils_impl.record_hparam_flags() as emnist_cr_flags:
# EMNIST CR flags
flags.DEFINE_enum(
'emnist_cr_model', 'cnn', ['cnn', '2nn'], 'Which model to '
'use. This can be a convolutional model (cnn) or a two '
'hidden-layer densely connected network (2nn).')
with utils_impl.record_hparam_flags() as shakespeare_flags:
# Shakespeare flags
flags.DEFINE_integer(
'shakespeare_sequence_length', 80,
'Length of character sequences to use for the RNN model.')
with utils_impl.record_hparam_flags() as so_nwp_flags:
# Stack Overflow NWP flags
flags.DEFINE_integer('so_nwp_vocab_size', 10000, 'Size of vocab to use.')
flags.DEFINE_integer('so_nwp_num_oov_buckets', 1,
'Number of out of vocabulary buckets.')
flags.DEFINE_integer('so_nwp_sequence_length', 20,
'Max sequence length to use.')
flags.DEFINE_integer('so_nwp_max_elements_per_user', 1000, 'Max number of '
'training sentences to use per user.')
flags.DEFINE_integer(
'so_nwp_num_validation_examples', 10000, 'Number of examples '
'to use from test set for per-round validation.')
with utils_impl.record_hparam_flags() as so_lr_flags:
# Stack Overflow LR flags
flags.DEFINE_integer('so_lr_vocab_tokens_size', 10000,
'Vocab tokens size used.')
flags.DEFINE_integer('so_lr_vocab_tags_size', 500, 'Vocab tags size used.')
flags.DEFINE_integer(
'so_lr_num_validation_examples', 10000, 'Number of examples '
'to use from test set for per-round validation.')
flags.DEFINE_integer('so_lr_max_elements_per_user', 1000,
'Max number of training '
'sentences to use per user.')
FLAGS = flags.FLAGS
TASK_FLAGS = collections.OrderedDict(
cifar100=cifar100_flags,
emnist_cr=emnist_cr_flags,
shakespeare=shakespeare_flags,
stackoverflow_nwp=so_nwp_flags,
stackoverflow_lr=so_lr_flags)
def _write_hparam_flags():
"""Creates an ordered dictionary of hyperparameter flags and writes to CSV."""
hparam_dict = utils_impl.lookup_flag_values(shared_flags)
# Update with optimizer flags corresponding to the chosen optimizers.
opt_flag_dict = utils_impl.lookup_flag_values(optimizer_flags)
opt_flag_dict = optimizer_utils.remove_unused_flags('client', opt_flag_dict)
opt_flag_dict = optimizer_utils.remove_unused_flags('server', opt_flag_dict)
hparam_dict.update(opt_flag_dict)
# Update with task-specific flags.
task_name = FLAGS.task
if task_name in TASK_FLAGS:
task_hparam_dict = utils_impl.lookup_flag_values(TASK_FLAGS[task_name])
hparam_dict.update(task_hparam_dict)
results_dir = os.path.join(FLAGS.root_output_dir, 'results',
FLAGS.experiment_name)
utils_impl.create_directory_if_not_exists(results_dir)
hparam_file = os.path.join(results_dir, 'hparams.csv')
utils_impl.atomic_write_series_to_csv(hparam_dict, hparam_file)
def main(argv):
if len(argv) > 1:
raise app.UsageError('Expected no command-line arguments, '
'got: {}'.format(argv))
client_optimizer_fn = optimizer_utils.create_optimizer_fn_from_flags('client')
server_optimizer_fn = optimizer_utils.create_optimizer_fn_from_flags('server')
client_lr_schedule = optimizer_utils.create_lr_schedule_from_flags('client')
server_lr_schedule = optimizer_utils.create_lr_schedule_from_flags('server')
def iterative_process_builder(
model_fn: Callable[[],
tff.learning.Model]) -> tff.templates.IterativeProcess:
"""Creates an iterative process using a given TFF `model_fn`.
Args:
model_fn: A no-arg function returning a `tff.learning.Model`.
Returns:
A `tff.templates.IterativeProcess`.
"""
if FLAGS.task == 'shakespeare' or FLAGS.task == 'stackoverflow_nwp':
def
|
(local_outputs):
return tf.cast(tf.squeeze(local_outputs['num_tokens']), tf.float32)
else:
client_weight_fn = None
return fed_avg_schedule.build_fed_avg_process(
model_fn=model_fn,
client_optimizer_fn=client_optimizer_fn,
client_lr=client_lr_schedule,
server_optimizer_fn=server_optimizer_fn,
server_lr=server_lr_schedule,
client_weight_fn=client_weight_fn)
task_spec = training_specs.TaskSpec(
iterative_process_builder=iterative_process_builder,
client_epochs_per_round=FLAGS.client_epochs_per_round,
client_batch_size=FLAGS.client_batch_size,
clients_per_round=FLAGS.clients_per_round,
client_datasets_random_seed=FLAGS.client_datasets_random_seed)
if FLAGS.task == 'cifar100':
runner_spec = federated_cifar100.configure_training(
task_spec,
crop_size=FLAGS.cifar100_crop_size,
distort_train_images=FLAGS.cifar100_distort_train_images)
elif FLAGS.task == 'emnist_cr':
runner_spec = federated_emnist.configure_training(
task_spec, model=FLAGS.emnist_cr_model)
elif FLAGS.task == 'emnist_ae':
runner_spec = federated_emnist_ae.configure_training(task_spec)
elif FLAGS.task == 'shakespeare':
runner_spec = federated_shakespeare.configure_training(
task_spec, sequence_length=FLAGS.shakespeare_sequence_length)
elif FLAGS.task == 'stackoverflow_nwp':
runner_spec = federated_stackoverflow.configure_training(
task_spec,
vocab_size=FLAGS.so_nwp_vocab_size,
num_oov_buckets=FLAGS.so_nwp_num_oov_buckets,
sequence_length=FLAGS.so_nwp_sequence_length,
max_elements_per_user=FLAGS.so_nwp_max_elements_per_user,
num_validation_examples=FLAGS.so_nwp_num_validation_examples)
elif FLAGS.task == 'stackoverflow_lr':
runner_spec = federated_stackoverflow_lr.configure_training(
task_spec,
vocab_tokens_size=FLAGS.so_lr_vocab_tokens_size,
vocab_tags_size=FLAGS.so_lr_vocab_tags_size,
max_elements_per_user=FLAGS.so_lr_max_elements_per_user,
num_validation_examples=FLAGS.so_lr_num_validation_examples)
else:
raise ValueError(
'--task flag {} is not supported, must be one of {}.'.format(
FLAGS.task, _SUPPORTED_TASKS))
_write_hparam_flags()
training_loop.run(
iterative_process=runner_spec.iterative_process,
client_datasets_fn=runner_spec.client_datasets_fn,
validation_fn=runner_spec.validation_fn,
test_fn=runner_spec.test_fn,
total_rounds=FLAGS.total_rounds,
experiment_name=FLAGS.experiment_name,
root_output_dir=FLAGS.root_output_dir,
rounds_per_eval=FLAGS.rounds_per_eval,
rounds_per_checkpoint=FLAGS.rounds_per_checkpoint)
if __name__ == '__main__':
app.run(main)
|
client_weight_fn
|
dropdown.class.ts
|
import { Query } from "../model";
export class DropdownQuestion extends Query<string> {
controlType = "dropdown";
options: { key: string; value: string }[] = [];
|
}
}
|
constructor(options: { [key: string]: any } = {}) {
super(options);
this.options = options.options || [];
|
common.go
|
// Copyright 2021 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package typeparams contains common utilities for writing tools that interact
// with generic Go code, as introduced with Go 1.18.
//
// Many of the types and functions in this package are proxies for the new APIs
// introduced in the standard library with Go 1.18. For example, the
// typeparams.Union type is an alias for go/types.Union, and the ForTypeSpec
// function returns the value of the go/ast.TypeSpec.TypeParams field. At Go
// versions older than 1.18 these helpers are implemented as stubs, allowing
// users of this package to write code that handles generic constructs inline,
// even if the Go version being used to compile does not support generics.
//
// Additionally, this package contains common utilities for working with the
// new generic constructs, to supplement the standard library APIs. Notably,
// the StructuralTerms API computes a minimal representation of the structural
// restrictions on a type parameter. In the future, this API may be available
// from go/types.
//
// See the example/README.md for a more detailed guide on how to update tools
// to support generics.
package typeparams
import (
"go/ast"
"go/token"
"go/types"
)
// UnpackIndexExpr extracts data from AST nodes that represent index
// expressions.
//
// For an ast.IndexExpr, the resulting indices slice will contain exactly one
// index expression. For an ast.IndexListExpr (go1.18+), it may have a variable
// number of index expressions.
//
// For nodes that don't represent index expressions, the first return value of
// UnpackIndexExpr will be nil.
func UnpackIndexExpr(n ast.Node) (x ast.Expr, lbrack token.Pos, indices []ast.Expr, rbrack token.Pos) {
switch e := n.(type) {
case *ast.IndexExpr:
return e.X, e.Lbrack, []ast.Expr{e.Index}, e.Rbrack
case *IndexListExpr:
return e.X, e.Lbrack, e.Indices, e.Rbrack
}
return nil, token.NoPos, nil, token.NoPos
}
// PackIndexExpr returns an *ast.IndexExpr or *ast.IndexListExpr, depending on
// the cardinality of indices. Calling PackIndexExpr with len(indices) == 0
// will panic.
func PackIndexExpr(x ast.Expr, lbrack token.Pos, indices []ast.Expr, rbrack token.Pos) ast.Expr {
switch len(indices) {
case 0:
panic("empty indices")
case 1:
return &ast.IndexExpr{
X: x,
Lbrack: lbrack,
Index: indices[0],
Rbrack: rbrack,
}
default:
return &IndexListExpr{
X: x,
Lbrack: lbrack,
Indices: indices,
Rbrack: rbrack,
}
}
}
// IsTypeParam reports whether t is a type parameter.
func IsTypeParam(t types.Type) bool {
_, ok := t.(*TypeParam)
return ok
}
// OriginMethod returns the origin method associated with the method fn.
// For methods on a non-generic receiver base type, this is just
// fn. However, for methods with a generic receiver, OriginMethod returns the
// corresponding method in the method set of the origin type.
//
// As a special case, if fn is not a method (has no receiver), OriginMethod
// returns fn.
func OriginMethod(fn *types.Func) *types.Func {
recv := fn.Type().(*types.Signature).Recv()
if recv == nil {
return fn
}
base := recv.Type()
p, isPtr := base.(*types.Pointer)
if isPtr {
base = p.Elem()
}
named, isNamed := base.(*types.Named)
if !isNamed {
// Receiver is a *types.Interface.
return fn
}
if ForNamed(named).Len() == 0 {
// Receiver base has no type parameters, so we can avoid the lookup below.
return fn
}
orig := NamedTypeOrigin(named)
gfn, _, _ := types.LookupFieldOrMethod(orig, true, fn.Pkg(), fn.Name())
return gfn.(*types.Func)
}
// GenericAssignableTo is a generalization of types.AssignableTo that
// implements the following rule for uninstantiated generic types:
//
// If V and T are generic named types, then V is considered assignable to T if,
// for every possible instantation of V[A_1, ..., A_N], the instantiation
// T[A_1, ..., A_N] is valid and V[A_1, ..., A_N] implements T[A_1, ..., A_N].
//
// If T has structural constraints, they must be satisfied by V.
//
// For example, consider the following type declarations:
//
// type Interface[T any] interface {
// Accept(T)
// }
//
// type Container[T any] struct {
// Element T
// }
//
// func (c Container[T]) Accept(t T) { c.Element = t }
//
// In this case, GenericAssignableTo reports that instantiations of Container
// are assignable to the corresponding instantiation of Interface.
func
|
(ctxt *Context, V, T types.Type) bool {
// If V and T are not both named, or do not have matching non-empty type
// parameter lists, fall back on types.AssignableTo.
VN, Vnamed := V.(*types.Named)
TN, Tnamed := T.(*types.Named)
if !Vnamed || !Tnamed {
return types.AssignableTo(V, T)
}
vtparams := ForNamed(VN)
ttparams := ForNamed(TN)
if vtparams.Len() == 0 || vtparams.Len() != ttparams.Len() || NamedTypeArgs(VN).Len() != 0 || NamedTypeArgs(TN).Len() != 0 {
return types.AssignableTo(V, T)
}
// V and T have the same (non-zero) number of type params. Instantiate both
// with the type parameters of V. This must always succeed for V, and will
// succeed for T if and only if the type set of each type parameter of V is a
// subset of the type set of the corresponding type parameter of T, meaning
// that every instantiation of V corresponds to a valid instantiation of T.
// Minor optimization: ensure we share a context across the two
// instantiations below.
if ctxt == nil {
ctxt = NewContext()
}
var targs []types.Type
for i := 0; i < vtparams.Len(); i++ {
targs = append(targs, vtparams.At(i))
}
vinst, err := Instantiate(ctxt, V, targs, true)
if err != nil {
panic("type parameters should satisfy their own constraints")
}
tinst, err := Instantiate(ctxt, T, targs, true)
if err != nil {
return false
}
return types.AssignableTo(vinst, tinst)
}
|
GenericAssignableTo
|
discovery.pb.gw.go
|
// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT.
// source: discovery.proto
/*
Package discovery is a reverse proxy.
It translates gRPC into RESTful JSON APIs.
*/
package discovery
import (
"context"
"io"
"net/http"
"github.com/grpc-ecosystem/grpc-gateway/v2/runtime"
"github.com/grpc-ecosystem/grpc-gateway/v2/utilities"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/grpclog"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/status"
"google.golang.org/protobuf/proto"
)
// Suppress "imported and not used" errors
var _ codes.Code
var _ io.Reader
var _ status.Status
var _ = runtime.String
var _ = utilities.NewDoubleArray
var _ = metadata.Join
func request_Discovery_Start_0(ctx context.Context, marshaler runtime.Marshaler, client DiscoveryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var protoReq StartDiscoveryRequest
var metadata runtime.ServerMetadata
newReader, berr := utilities.IOReaderFactory(req.Body)
if berr != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr)
}
if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
msg, err := client.Start(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func local_request_Discovery_Start_0(ctx context.Context, marshaler runtime.Marshaler, server DiscoveryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var protoReq StartDiscoveryRequest
var metadata runtime.ServerMetadata
newReader, berr := utilities.IOReaderFactory(req.Body)
if berr != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr)
}
if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
msg, err := server.Start(ctx, &protoReq)
return msg, metadata, err
}
func request_Discovery_Query_0(ctx context.Context, marshaler runtime.Marshaler, client DiscoveryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var protoReq QueryRequest
var metadata runtime.ServerMetadata
newReader, berr := utilities.IOReaderFactory(req.Body)
if berr != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr)
}
if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
msg, err := client.Query(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
return msg, metadata, err
}
func local_request_Discovery_Query_0(ctx context.Context, marshaler runtime.Marshaler, server DiscoveryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
var protoReq QueryRequest
var metadata runtime.ServerMetadata
newReader, berr := utilities.IOReaderFactory(req.Body)
if berr != nil {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr)
}
if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF {
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
}
msg, err := server.Query(ctx, &protoReq)
return msg, metadata, err
}
// RegisterDiscoveryHandlerServer registers the http handlers for service Discovery to "mux".
// UnaryRPC :call DiscoveryServer directly.
// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906.
// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterDiscoveryHandlerFromEndpoint instead.
func RegisterDiscoveryHandlerServer(ctx context.Context, mux *runtime.ServeMux, server DiscoveryServer) error {
mux.Handle("POST", pattern_Discovery_Start_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
var stream runtime.ServerTransportStream
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/clouditor.Discovery/Start", runtime.WithHTTPPathPattern("/v1/discovery/start"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := local_request_Discovery_Start_0(rctx, inboundMarshaler, server, req, pathParams)
md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
ctx = runtime.NewServerMetadataContext(ctx, md)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
forward_Discovery_Start_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
|
})
mux.Handle("POST", pattern_Discovery_Query_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
var stream runtime.ServerTransportStream
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/clouditor.Discovery/Query", runtime.WithHTTPPathPattern("/v1/discovery/query"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := local_request_Discovery_Query_0(rctx, inboundMarshaler, server, req, pathParams)
md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
ctx = runtime.NewServerMetadataContext(ctx, md)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
forward_Discovery_Query_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
return nil
}
// RegisterDiscoveryHandlerFromEndpoint is same as RegisterDiscoveryHandler but
// automatically dials to "endpoint" and closes the connection when "ctx" gets done.
func RegisterDiscoveryHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) {
conn, err := grpc.Dial(endpoint, opts...)
if err != nil {
return err
}
defer func() {
if err != nil {
if cerr := conn.Close(); cerr != nil {
grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr)
}
return
}
go func() {
<-ctx.Done()
if cerr := conn.Close(); cerr != nil {
grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr)
}
}()
}()
return RegisterDiscoveryHandler(ctx, mux, conn)
}
// RegisterDiscoveryHandler registers the http handlers for service Discovery to "mux".
// The handlers forward requests to the grpc endpoint over "conn".
func RegisterDiscoveryHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error {
return RegisterDiscoveryHandlerClient(ctx, mux, NewDiscoveryClient(conn))
}
// RegisterDiscoveryHandlerClient registers the http handlers for service Discovery
// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "DiscoveryClient".
// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "DiscoveryClient"
// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in
// "DiscoveryClient" to call the correct interceptors.
func RegisterDiscoveryHandlerClient(ctx context.Context, mux *runtime.ServeMux, client DiscoveryClient) error {
mux.Handle("POST", pattern_Discovery_Start_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
rctx, err := runtime.AnnotateContext(ctx, mux, req, "/clouditor.Discovery/Start", runtime.WithHTTPPathPattern("/v1/discovery/start"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := request_Discovery_Start_0(rctx, inboundMarshaler, client, req, pathParams)
ctx = runtime.NewServerMetadataContext(ctx, md)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
forward_Discovery_Start_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
mux.Handle("POST", pattern_Discovery_Query_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
ctx, cancel := context.WithCancel(req.Context())
defer cancel()
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
rctx, err := runtime.AnnotateContext(ctx, mux, req, "/clouditor.Discovery/Query", runtime.WithHTTPPathPattern("/v1/discovery/query"))
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
resp, md, err := request_Discovery_Query_0(rctx, inboundMarshaler, client, req, pathParams)
ctx = runtime.NewServerMetadataContext(ctx, md)
if err != nil {
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
return
}
forward_Discovery_Query_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
})
return nil
}
var (
pattern_Discovery_Start_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "discovery", "start"}, ""))
pattern_Discovery_Query_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "discovery", "query"}, ""))
)
var (
forward_Discovery_Start_0 = runtime.ForwardResponseMessage
forward_Discovery_Query_0 = runtime.ForwardResponseMessage
)
| |
dubbo_converter.go
|
package convert
import (
"net"
|
types2 "github.com/symcn/meshach/pkg/adapter/types"
"github.com/symcn/meshach/pkg/utils"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/klog"
)
// DubboConverter ...
type DubboConverter struct {
DefaultNamespace string
}
// FlagConfigParameter just for customized setting of dubbo
type FlagConfigParameter struct {
Flags []*Flag
Manual bool
}
// Flag ...
type Flag struct {
Key string
Weight int32
}
// ToConfiguredService Convert service between these two formats
func (dc *DubboConverter) ToConfiguredService(s *types2.ServiceEvent) *v1.ConfiguredService {
// TODO Assuming every service can only provide an unique fixed port to adapt the dubbo case.
cs := &v1.ConfiguredService{
ObjectMeta: metav1.ObjectMeta{
Name: utils.FormatToDNS1123(s.Service.Name),
Namespace: dc.DefaultNamespace,
},
Spec: v1.ConfiguredServiceSpec{
OriginalName: s.Service.Name,
},
}
var instances []*v1.Instance
for _, i := range s.Instances {
ins := &v1.Instance{}
ins.Host = utils.RemovePort(i.Host)
ins.Port = ToPort(i.Port)
ins.Labels = i.Labels
instances = append(instances, ins)
}
cs.Spec.Instances = instances
// yamlbyte, err := yaml.Marshal(cs)
// if err != nil {
// klog.Errorf("Marshal yaml err:%+v", err)
// return cs
// }
// fmt.Println(string(yamlbyte))
return cs
}
// ToServiceConfig ...
func (dc *DubboConverter) ToServiceConfig(cc *types2.ConfiguratorConfig) *v1.ServiceConfig {
if cc == nil || len(cc.Key) == 0 {
klog.Infof("config's key is empty, skip it.")
return nil
}
sc := v1.ServiceConfig{
ObjectMeta: metav1.ObjectMeta{
Name: utils.FormatToDNS1123(cc.Key),
Namespace: dc.DefaultNamespace,
},
Spec: v1.ServiceConfigSpec{
OriginalName: cc.Key,
},
}
// Policy
policy := &v1.Policy{
LoadBalancer: make(map[string]string),
}
// find out the default configuration if it presents.
// it will be used to assemble both the service and instances without customized configurations.
defaultConfig := findDefaultConfig(cc.Configs)
// Setting the service's configuration such as policy
if defaultConfig != nil && defaultConfig.Enabled {
if t, ok := defaultConfig.Parameters["timeout"]; ok {
policy.Timeout = t
}
if r, ok := defaultConfig.Parameters["retries"]; ok {
policy.MaxRetries = utils.ToInt32(r)
}
}
sc.Spec.Policy = policy
// Instances' config
var instanceConfigs []*v1.InstanceConfig
for _, ci := range cc.Configs {
if ci.Enabled && ci.Addresses[0] != "0.0.0.0" && ci.Side == "provider" {
h, p, _ := net.SplitHostPort(ci.Addresses[0])
instanceConfigs = append(instanceConfigs, &v1.InstanceConfig{
Host: h,
Port: &v1.Port{Name: "", Number: utils.ToUint32(p), Protocol: ""},
Weight: utils.ToUint32(ci.Parameters["weight"]),
})
}
}
sc.Spec.Instances = instanceConfigs
// Routes
var routes []*v1.Destination
// setting flag configurator
flagConfig := findFlagConfig(cc.Configs)
if flagConfig != nil {
fc, ok := flagConfig.Parameters["flag_config"]
if ok {
klog.Infof("Flag config: %s", fc)
fcp := &FlagConfigParameter{}
err := yaml.Unmarshal([]byte(fc), fcp)
if err != nil {
klog.Errorf("Parsing the flag_config parameter has an error: %v", err)
} else if flagConfig.Enabled && fcp.Manual {
// clear the default routes firstly
routes = routes[:0]
for _, f := range fcp.Flags {
routes = append(routes, &v1.Destination{
Subset: f.Key,
Weight: f.Weight,
})
}
}
}
} else {
klog.Infof("Could not find any route config.")
}
sc.Spec.Route = routes
sc.Spec.RerouteOption = &v1.RerouteOption{
ReroutePolicy: v1.Default,
}
sc.Spec.CanaryRerouteOption = nil
return &sc
}
// findDefaultConfig
func findDefaultConfig(configs []types2.ConfigItem) *types2.ConfigItem {
var defaultConfig *types2.ConfigItem
for _, c := range configs {
if c.Side == "provider" {
for _, a := range c.Addresses {
if a == "0.0.0.0" {
defaultConfig = &c
return defaultConfig
}
}
}
}
return defaultConfig
}
// findFlagConfig
func findFlagConfig(configs []types2.ConfigItem) *types2.ConfigItem {
var config *types2.ConfigItem
for _, c := range configs {
if c.Side == "consumer" {
for _, a := range c.Addresses {
if a == "0.0.0.0" {
config = &c
return config
}
}
}
}
return config
}
|
"github.com/ghodss/yaml"
v1 "github.com/symcn/meshach/api/v1alpha1"
|
executor_stack_bindings_test.py
|
# Copyright 2021, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from absl.testing import absltest
from absl.testing import parameterized
import numpy as np
from pybind11_abseil import status as absl_status
from tensorflow_federated.python.core.impl.executor_stacks import executor_stack_bindings
|
_CARDINALITIES = {placements.CLIENTS: 5}
class ExecutorStackBindingsTest(parameterized.TestCase):
@parameterized.named_parameters(('from_target_list', list),
('from_target_tuple', tuple),
('from_target_ndarray', np.array))
def test_executor_construction_raises_no_channels_available(
self, container_constructor):
with self.assertRaisesRegex(absl_status.StatusNotOk, 'UNAVAILABLE'):
executor_stack_bindings.create_remote_executor_stack(
channels=container_constructor([
executor_bindings.create_insecure_grpc_channel(t)
for t in _TARGET_LIST
]),
cardinalities=_CARDINALITIES)
if __name__ == '__main__':
absltest.main()
|
from tensorflow_federated.python.core.impl.executors import executor_bindings
from tensorflow_federated.python.core.impl.types import placements
_TARGET_LIST = ['localhost:8000', 'localhost:8001']
|
app.module.ts
|
import { NgModule, APP_INITIALIZER, Injectable } from '@angular/core';
import { BrowserModule } from '@angular/platform-browser';
import { FormsModule } from '@angular/forms';
import { HttpModule } from '@angular/http';
import { TranslationModule, LocaleService, TranslationService } from 'angular-l10n';
import { AppComponent } from './app.component';
import { routing } from './app.routing';
import { MdCardModule, MdDialogModule } from '@angular/material';
import { AuthGuard } from './guards/auth.guard';
import { ContentComponent } from './content360/content.component';
import { CriticalPathsComponent } from './criticalPaths/critical-paths.component';
import { ProcessMonitorComponent } from './processMonitor/process-monitor.component';
import { CampaignComponent } from './processMonitor/campaign/campaign.component';
import { HomeComponent } from './home/home.component';
import { ResourcesTrackerComponent } from './resourcesTracker/resources-tracker.component';
import { LoginComponent } from './login/login.component';
import { LoginService } from './login/login.service';
import { PatternComponent } from './pattern/pattern.component';
import { RouterLinkStubDirective } from './testing/router-stubs';
import { RouterOutletStubComponent } from './testing/router-stubs';
import { CampaignService } from './processMonitor/campaign/campaign.service';
import { HomeContentComponent } from './home/content/homeContent.component';
import 'hammerjs';
import { BrowserAnimationsModule } from '@angular/platform-browser/animations';
import { MaterialModule } from '@angular/material';
import { LotsComponent } from './processMonitor/lots/lots.component';
import { LotsService } from './processMonitor/lots/lots.service';
import { BusyModule, BusyConfig } from 'angular2-busy';
import { ProjectCampaignComponent } from './processMonitor/project/campaign/project-campaign.component';
import { ProjectCampaignService } from './processMonitor/project/campaign/project-campaign.service';
import { MolecularFormatComponent } from './processMonitor/molecularFormat/molecularFormat.component';
import { ManufacturingUnitComponent } from './processMonitor/manufacturingUnit/manufacturing-unit.component';
import { ManufacturingUnitService } from './processMonitor/manufacturingUnit/manufacturing-unit.service';
import { SourceComponent } from './processMonitor/source/source.component';
import { SourceService } from './processMonitor/source/source.service';
import { UtilizationComponent } from './resourcesTracker/utilization/utilization.component';
import { DownTimeComponent } from './resourcesTracker/downtime/downtime.component';
import { CalibrationComponent } from './resourcesTracker/calibration/calibration.component';
import { BlastViewComponent } from './processMonitor/blastView/blast-view.component';
import { BlastViewService } from './processMonitor/blastView/blast-view.service';
import { MdTableModule } from '@angular/material';
import { CdkTableModule } from '@angular/cdk';
import { InstrumentTypesComponent } from './resourcesTracker/utilization/instrument-types/instrument-types.component';
import { InstrumentTypesService } from './resourcesTracker/utilization/instrument-types/instrument-types.service';
import { ChartsModule } from 'ng2-charts';
@Injectable() export class LocalizationConfig {
constructor(public locale: LocaleService, public translation: TranslationService) { }
load(): Promise<void> {
this.locale.addConfiguration()
.addLanguages(['en', 'it'])
.setCookieExpiration(30)
.defineLanguage('en');
this.translation.addConfiguration()
.addProvider('./assets/locale-');
return this.translation.init();
}
}
// AoT compilation requires a reference to an exported function.
export function
|
(localizationConfig: LocalizationConfig): Function {
return () => localizationConfig.load();
}
@NgModule({
imports: [BrowserModule, FormsModule, HttpModule, routing, ChartsModule,
MaterialModule, MdTableModule, CdkTableModule,
BrowserAnimationsModule, MdCardModule, MdDialogModule, TranslationModule.forRoot(),
BusyModule.forRoot(<BusyConfig>{
message: 'Loading...',
backdrop: false,
template: '<div id="busy"><img src="../assets/images/loading-blue.gif" class="image" style="width: 30px;"/>{{message}}</div>',
delay: 200,
minDuration: 600,
wrapperClass: 'my-class'
})
],
declarations: [AppComponent, HomeContentComponent, LotsComponent, ContentComponent, CriticalPathsComponent,
ProcessMonitorComponent, BlastViewComponent, CampaignComponent,
HomeComponent, SourceComponent, ResourcesTrackerComponent, LoginComponent, PatternComponent, ManufacturingUnitComponent,
RouterLinkStubDirective, RouterOutletStubComponent, ProjectCampaignComponent, MolecularFormatComponent, UtilizationComponent,
DownTimeComponent, InstrumentTypesComponent, CalibrationComponent ],
providers: [AuthGuard, LoginService, LotsService, LocalizationConfig, CampaignService, ManufacturingUnitService,
SourceService, BlastViewService, InstrumentTypesService, ProjectCampaignService,
{
provide: APP_INITIALIZER,
useFactory: initLocalization,
deps: [LocalizationConfig],
multi: true
}],
entryComponents: [SourceComponent, BlastViewComponent],
bootstrap: [AppComponent]
})
export class AppModule { }
|
initLocalization
|
product.input.ts
|
import { InputType, Field, Int } from 'type-graphql';
@InputType()
export class ProductInput {
|
@Field()
readonly description: string;
@Field()
readonly imageURL: string;
@Field()
readonly size: string;
@Field()
readonly color: string;
@Field()
readonly price: number;
}
|
@Field()
readonly name: string;
|
component---src-pages-who-we-are-js-2f98e7f89bd07b024c85.js
|
(window.webpackJsonp=window.webpackJsonp||[]).push([[8],{"+10x":function(e,t,n){e.exports=n.p+"static/p2-1cbff0cd2d6a258c6ac7d7db1f84be92.png"},EVdn:function(e,t,n){var r;n("2Spj"),n("eM6i"),n("HAE/"),n("f3/d"),n("SRfc"),n("dRSK"),n("0l/t"),n("Oyvg"),n("KKXr"),n("rE2o"),n("ioFf"),n("pIFo"),n("LK8F"),n("Vd3H"),n("bWfx"),n("a1Th"),n("h7Nl"),n("Btvt"),n("V+eJ"),function(t,n){"use strict";"object"==typeof e.exports?e.exports=t.document?n(t,!0):function(e){if(!e.document)throw new Error("jQuery requires a window with a document");return n(e)}:n(t)}("undefined"!=typeof window?window:this,(function(n,a){"use strict";var i=[],o=n.document,s=Object.getPrototypeOf,l=i.slice,c=i.concat,u=i.push,f=i.indexOf,d={},p=d.toString,h=d.hasOwnProperty,m=h.toString,g=m.call(Object),v={},y=function(e){return"function"==typeof e&&"number"!=typeof e.nodeType},A=function(e){return null!=e&&e===e.window},b={type:!0,src:!0,nonce:!0,noModule:!0};function x(e,t,n){var r,a,i=(n=n||o).createElement("script");if(i.text=e,t)for(r in b)(a=t[r]||t.getAttribute&&t.getAttribute(r))&&i.setAttribute(r,a);n.head.appendChild(i).parentNode.removeChild(i)}function w(e){return null==e?e+"":"object"==typeof e||"function"==typeof e?d[p.call(e)]||"object":typeof e}var N=function e(t,n){return new e.fn.init(t,n)},E=/^[\s\uFEFF\xA0]+|[\s\uFEFF\xA0]+$/g;function C(e){var t=!!e&&"length"in e&&e.length,n=w(e);return!y(e)&&!A(e)&&("array"===n||0===t||"number"==typeof t&&t>0&&t-1 in e)}N.fn=N.prototype={jquery:"3.4.1",constructor:N,length:0,toArray:function(){return l.call(this)},get:function(e){return null==e?l.call(this):e<0?this[e+this.length]:this[e]},pushStack:function(e){var t=N.merge(this.constructor(),e);return t.prevObject=this,t},each:function(e){return N.each(this,e)},map:function(e){return this.pushStack(N.map(this,(function(t,n){return e.call(t,n,t)})))},slice:function(){return this.pushStack(l.apply(this,arguments))},first:function(){return this.eq(0)},last:function(){return this.eq(-1)},eq:function(e){var t=this.length,n=+e+(e<0?t:0);return this.pushStack(n>=0&&n<t?[this[n]]:[])},end:function(){return this.prevObject||this.constructor()},push:u,sort:i.sort,splice:i.splice},N.extend=N.fn.extend=function(){var e,t,n,r,a,i,o=arguments[0]||{},s=1,l=arguments.length,c=!1;for("boolean"==typeof o&&(c=o,o=arguments[s]||{},s++),"object"==typeof o||y(o)||(o={}),s===l&&(o=this,s--);s<l;s++)if(null!=(e=arguments[s]))for(t in e)r=e[t],"__proto__"!==t&&o!==r&&(c&&r&&(N.isPlainObject(r)||(a=Array.isArray(r)))?(n=o[t],i=a&&!Array.isArray(n)?[]:a||N.isPlainObject(n)?n:{},a=!1,o[t]=N.extend(c,i,r)):void 0!==r&&(o[t]=r));return o},N.extend({expando:"jQuery"+("3.4.1"+Math.random()).replace(/\D/g,""),isReady:!0,error:function(e){throw new Error(e)},noop:function(){},isPlainObject:function(e){var t,n;return!(!e||"[object Object]"!==p.call(e))&&(!(t=s(e))||"function"==typeof(n=h.call(t,"constructor")&&t.constructor)&&m.call(n)===g)},isEmptyObject:function(e){var t;for(t in e)return!1;return!0},globalEval:function(e,t){x(e,{nonce:t&&t.nonce})},each:function(e,t){var n,r=0;if(C(e))for(n=e.length;r<n&&!1!==t.call(e[r],r,e[r]);r++);else for(r in e)if(!1===t.call(e[r],r,e[r]))break;return e},trim:function(e){return null==e?"":(e+"").replace(E,"")},makeArray:function(e,t){var n=t||[];return null!=e&&(C(Object(e))?N.merge(n,"string"==typeof e?[e]:e):u.call(n,e)),n},inArray:function(e,t,n){return null==t?-1:f.call(t,e,n)},merge:function(e,t){for(var n=+t.length,r=0,a=e.length;r<n;r++)e[a++]=t[r];return e.length=a,e},grep:function(e,t,n){for(var r=[],a=0,i=e.length,o=!n;a<i;a++)!t(e[a],a)!==o&&r.push(e[a]);return r},map:function(e,t,n){var r,a,i=0,o=[];if(C(e))for(r=e.length;i<r;i++)null!=(a=t(e[i],i,n))&&o.push(a);else for(i in e)null!=(a=t(e[i],i,n))&&o.push(a);return c.apply([],o)},guid:1,support:v}),"function"==typeof Symbol&&(N.fn[Symbol.iterator]=i[Symbol.iterator]),N.each("Boolean Number String Function Array Date RegExp Object Error Symbol".split(" "),(function(e,t){d["[object "+t+"]"]=t.toLowerCase()}));var T=function(e){var t,n,r,a,i,o,s,l,c,u,f,d,p,h,m,g,v,y,A,b="sizzle"+1*new Date,x=e.document,w=0,N=0,E=le(),C=le(),T=le(),D=le(),L=function(e,t){return e===t&&(f=!0),0},k={}.hasOwnProperty,j=[],q=j.pop,S=j.push,H=j.push,B=j.slice,W=function(e,t){for(var n=0,r=e.length;n<r;n++)if(e[n]===t)return n;return-1},O="checked|selected|async|autofocus|autoplay|controls|defer|disabled|hidden|ismap|loop|multiple|open|readonly|required|scoped",P="[\\x20\\t\\r\\n\\f]",M="(?:\\\\.|[\\w-]|[^\0-\\xa0])+",I="\\["+P+"*("+M+")(?:"+P+"*([*^$|!~]?=)"+P+"*(?:'((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\"|("+M+"))|)"+P+"*\\]",Q=":("+M+")(?:\\((('((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\")|((?:\\\\.|[^\\\\()[\\]]|"+I+")*)|.*)\\)|)",R=new RegExp(P+"+","g"),X=new RegExp("^"+P+"+|((?:^|[^\\\\])(?:\\\\.)*)"+P+"+$","g"),V=new RegExp("^"+P+"*,"+P+"*"),G=new RegExp("^"+P+"*([>+~]|"+P+")"+P+"*"),F=new RegExp(P+"|>"),U=new RegExp(Q),z=new RegExp("^"+M+"$"),Y={ID:new RegExp("^#("+M+")"),CLASS:new RegExp("^\\.("+M+")"),TAG:new RegExp("^("+M+"|[*])"),ATTR:new RegExp("^"+I),PSEUDO:new RegExp("^"+Q),CHILD:new RegExp("^:(only|first|last|nth|nth-last)-(child|of-type)(?:\\("+P+"*(even|odd|(([+-]|)(\\d*)n|)"+P+"*(?:([+-]|)"+P+"*(\\d+)|))"+P+"*\\)|)","i"),bool:new RegExp("^(?:"+O+")$","i"),needsContext:new RegExp("^"+P+"*[>+~]|:(even|odd|eq|gt|lt|nth|first|last)(?:\\("+P+"*((?:-\\d)?\\d*)"+P+"*\\)|)(?=[^-]|$)","i")},J=/HTML$/i,Z=/^(?:input|select|textarea|button)$/i,K=/^h\d$/i,$=/^[^{]+\{\s*\[native \w/,_=/^(?:#([\w-]+)|(\w+)|\.([\w-]+))$/,ee=/[+~]/,te=new RegExp("\\\\([\\da-f]{1,6}"+P+"?|("+P+")|.)","ig"),ne=function(e,t,n){var r="0x"+t-65536;return r!=r||n?t:r<0?String.fromCharCode(r+65536):String.fromCharCode(r>>10|55296,1023&r|56320)},re=/([\0-\x1f\x7f]|^-?\d)|^-$|[^\0-\x1f\x7f-\uFFFF\w-]/g,ae=function(e,t){return t?"\0"===e?"�":e.slice(0,-1)+"\\"+e.charCodeAt(e.length-1).toString(16)+" ":"\\"+e},ie=function(){d()},oe=be((function(e){return!0===e.disabled&&"fieldset"===e.nodeName.toLowerCase()}),{dir:"parentNode",next:"legend"});try{H.apply(j=B.call(x.childNodes),x.childNodes),j[x.childNodes.length].nodeType}catch(Ce){H={apply:j.length?function(e,t){S.apply(e,B.call(t))}:function(e,t){for(var n=e.length,r=0;e[n++]=t[r++];);e.length=n-1}}}function se(e,t,r,a){var i,s,c,u,f,h,v,y=t&&t.ownerDocument,w=t?t.nodeType:9;if(r=r||[],"string"!=typeof e||!e||1!==w&&9!==w&&11!==w)return r;if(!a&&((t?t.ownerDocument||t:x)!==p&&d(t),t=t||p,m)){if(11!==w&&(f=_.exec(e)))if(i=f[1]){if(9===w){if(!(c=t.getElementById(i)))return r;if(c.id===i)return r.push(c),r}else if(y&&(c=y.getElementById(i))&&A(t,c)&&c.id===i)return r.push(c),r}else{if(f[2])return H.apply(r,t.getElementsByTagName(e)),r;if((i=f[3])&&n.getElementsByClassName&&t.getElementsByClassName)return H.apply(r,t.getElementsByClassName(i)),r}if(n.qsa&&!D[e+" "]&&(!g||!g.test(e))&&(1!==w||"object"!==t.nodeName.toLowerCase())){if(v=e,y=t,1===w&&F.test(e)){for((u=t.getAttribute("id"))?u=u.replace(re,ae):t.setAttribute("id",u=b),s=(h=o(e)).length;s--;)h[s]="#"+u+" "+Ae(h[s]);v=h.join(","),y=ee.test(e)&&ve(t.parentNode)||t}try{return H.apply(r,y.querySelectorAll(v)),r}catch(N){D(e,!0)}finally{u===b&&t.removeAttribute("id")}}}return l(e.replace(X,"$1"),t,r,a)}function le(){var e=[];return function t(n,a){return e.push(n+" ")>r.cacheLength&&delete t[e.shift()],t[n+" "]=a}}function ce(e){return e[b]=!0,e}function ue(e){var t=p.createElement("fieldset");try{return!!e(t)}catch(Ce){return!1}finally{t.parentNode&&t.parentNode.removeChild(t),t=null}}function fe(e,t){for(var n=e.split("|"),a=n.length;a--;)r.attrHandle[n[a]]=t}function de(e,t){var n=t&&e,r=n&&1===e.nodeType&&1===t.nodeType&&e.sourceIndex-t.sourceIndex;if(r)return r;if(n)for(;n=n.nextSibling;)if(n===t)return-1;return e?1:-1}function pe(e){return function(t){return"input"===t.nodeName.toLowerCase()&&t.type===e}}function he(e){return function(t){var n=t.nodeName.toLowerCase();return("input"===n||"button"===n)&&t.type===e}}function me(e){return function(t){return"form"in t?t.parentNode&&!1===t.disabled?"label"in t?"label"in t.parentNode?t.parentNode.disabled===e:t.disabled===e:t.isDisabled===e||t.isDisabled!==!e&&oe(t)===e:t.disabled===e:"label"in t&&t.disabled===e}}function ge(e){return ce((function(t){return t=+t,ce((function(n,r){for(var a,i=e([],n.length,t),o=i.length;o--;)n[a=i[o]]&&(n[a]=!(r[a]=n[a]))}))}))}function ve(e){return e&&void 0!==e.getElementsByTagName&&e}for(t in n=se.support={},i=se.isXML=function(e){var t=e.namespaceURI,n=(e.ownerDocument||e).documentElement;return!J.test(t||n&&n.nodeName||"HTML")},d=se.setDocument=function(e){var t,a,o=e?e.ownerDocument||e:x;return o!==p&&9===o.nodeType&&o.documentElement?(h=(p=o).documentElement,m=!i(p),x!==p&&(a=p.defaultView)&&a.top!==a&&(a.addEventListener?a.addEventListener("unload",ie,!1):a.attachEvent&&a.attachEvent("onunload",ie)),n.attributes=ue((function(e){return e.className="i",!e.getAttribute("className")})),n.getElementsByTagName=ue((function(e){return e.appendChild(p.createComment("")),!e.getElementsByTagName("*").length})),n.getElementsByClassName=$.test(p.getElementsByClassName),n.getById=ue((function(e){return h.appendChild(e).id=b,!p.getElementsByName||!p.getElementsByName(b).length})),n.getById?(r.filter.ID=function(e){var t=e.replace(te,ne);return function(e){return e.getAttribute("id")===t}},r.find.ID=function(e,t){if(void 0!==t.getElementById&&m){var n=t.getElementById(e);return n?[n]:[]}}):(r.filter.ID=function(e){var t=e.replace(te,ne);return function(e){var n=void 0!==e.getAttributeNode&&e.getAttributeNode("id");return n&&n.value===t}},r.find.ID=function(e,t){if(void 0!==t.getElementById&&m){var n,r,a,i=t.getElementById(e);if(i){if((n=i.getAttributeNode("id"))&&n.value===e)return[i];for(a=t.getElementsByName(e),r=0;i=a[r++];)if((n=i.getAttributeNode("id"))&&n.value===e)return[i]}return[]}}),r.find.TAG=n.getElementsByTagName?function(e,t){return void 0!==t.getElementsByTagName?t.getElementsByTagName(e):n.qsa?t.querySelectorAll(e):void 0}:function(e,t){var n,r=[],a=0,i=t.getElementsByTagName(e);if("*"===e){for(;n=i[a++];)1===n.nodeType&&r.push(n);return r}return i},r.find.CLASS=n.getElementsByClassName&&function(e,t){if(void 0!==t.getElementsByClassName&&m)return t.getElementsByClassName(e)},v=[],g=[],(n.qsa=$.test(p.querySelectorAll))&&(ue((function(e){h.appendChild(e).innerHTML="<a id='"+b+"'></a><select id='"+b+"-\r\\' msallowcapture=''><option selected=''></option></select>",e.querySelectorAll("[msallowcapture^='']").length&&g.push("[*^$]="+P+"*(?:''|\"\")"),e.querySelectorAll("[selected]").length||g.push("\\["+P+"*(?:value|"+O+")"),e.querySelectorAll("[id~="+b+"-]").length||g.push("~="),e.querySelectorAll(":checked").length||g.push(":checked"),e.querySelectorAll("a#"+b+"+*").length||g.push(".#.+[+~]")})),ue((function(e){e.innerHTML="<a href='' disabled='disabled'></a><select disabled='disabled'><option/></select>";var t=p.createElement("input");t.setAttribute("type","hidden"),e.appendChild(t).setAttribute("name","D"),e.querySelectorAll("[name=d]").length&&g.push("name"+P+"*[*^$|!~]?="),2!==e.querySelectorAll(":enabled").length&&g.push(":enabled",":disabled"),h.appendChild(e).disabled=!0,2!==e.querySelectorAll(":disabled").length&&g.push(":enabled",":disabled"),e.querySelectorAll("*,:x"),g.push(",.*:")}))),(n.matchesSelector=$.test(y=h.matches||h.webkitMatchesSelector||h.mozMatchesSelector||h.oMatchesSelector||h.msMatchesSelector))&&ue((function(e){n.disconnectedMatch=y.call(e,"*"),y.call(e,"[s!='']:x"),v.push("!=",Q)})),g=g.length&&new RegExp(g.join("|")),v=v.length&&new RegExp(v.join("|")),t=$.test(h.compareDocumentPosition),A=t||$.test(h.contains)?function(e,t){var n=9===e.nodeType?e.documentElement:e,r=t&&t.parentNode;return e===r||!(!r||1!==r.nodeType||!(n.contains?n.contains(r):e.compareDocumentPosition&&16&e.compareDocumentPosition(r)))}:function(e,t){if(t)for(;t=t.parentNode;)if(t===e)return!0;return!1},L=t?function(e,t){if(e===t)return f=!0,0;var r=!e.compareDocumentPosition-!t.compareDocumentPosition;return r||(1&(r=(e.ownerDocument||e)===(t.ownerDocument||t)?e.compareDocumentPosition(t):1)||!n.sortDetached&&t.compareDocumentPosition(e)===r?e===p||e.ownerDocument===x&&A(x,e)?-1:t===p||t.ownerDocument===x&&A(x,t)?1:u?W(u,e)-W(u,t):0:4&r?-1:1)}:function(e,t){if(e===t)return f=!0,0;var n,r=0,a=e.parentNode,i=t.parentNode,o=[e],s=[t];if(!a||!i)return e===p?-1:t===p?1:a?-1:i?1:u?W(u,e)-W(u,t):0;if(a===i)return de(e,t);for(n=e;n=n.parentNode;)o.unshift(n);for(n=t;n=n.parentNode;)s.unshift(n);for(;o[r]===s[r];)r++;return r?de(o[r],s[r]):o[r]===x?-1:s[r]===x?1:0},p):p},se.matches=function(e,t){return se(e,null,null,t)},se.matchesSelector=function(e,t){if((e.ownerDocument||e)!==p&&d(e),n.matchesSelector&&m&&!D[t+" "]&&(!v||!v.test(t))&&(!g||!g.test(t)))try{var r=y.call(e,t);if(r||n.disconnectedMatch||e.document&&11!==e.document.nodeType)return r}catch(Ce){D(t,!0)}return se(t,p,null,[e]).length>0},se.contains=function(e,t){return(e.ownerDocument||e)!==p&&d(e),A(e,t)},se.attr=function(e,t){(e.ownerDocument||e)!==p&&d(e);var a=r.attrHandle[t.toLowerCase()],i=a&&k.call(r.attrHandle,t.toLowerCase())?a(e,t,!m):void 0;return void 0!==i?i:n.attributes||!m?e.getAttribute(t):(i=e.getAttributeNode(t))&&i.specified?i.value:null},se.escape=function(e){return(e+"").replace(re,ae)},se.error=function(e){throw new Error("Syntax error, unrecognized expression: "+e)},se.uniqueSort=function(e){var t,r=[],a=0,i=0;if(f=!n.detectDuplicates,u=!n.sortStable&&e.slice(0),e.sort(L),f){for(;t=e[i++];)t===e[i]&&(a=r.push(i));for(;a--;)e.splice(r[a],1)}return u=null,e},a=se.getText=function(e){var t,n="",r=0,i=e.nodeType;if(i){if(1===i||9===i||11===i){if("string"==typeof e.textContent)return e.textContent;for(e=e.firstChild;e;e=e.nextSibling)n+=a(e)}else if(3===i||4===i)return e.nodeValue}else for(;t=e[r++];)n+=a(t);return n},(r=se.selectors={cacheLength:50,createPseudo:ce,match:Y,attrHandle:{},find:{},relative:{">":{dir:"parentNode",first:!0}," ":{dir:"parentNode"},"+":{dir:"previousSibling",first:!0},"~":{dir:"previousSibling"}},preFilter:{ATTR:function(e){return e[1]=e[1].replace(te,ne),e[3]=(e[3]||e[4]||e[5]||"").replace(te,ne),"~="===e[2]&&(e[3]=" "+e[3]+" "),e.slice(0,4)},CHILD:function(e){return e[1]=e[1].toLowerCase(),"nth"===e[1].slice(0,3)?(e[3]||se.error(e[0]),e[4]=+(e[4]?e[5]+(e[6]||1):2*("even"===e[3]||"odd"===e[3])),e[5]=+(e[7]+e[8]||"odd"===e[3])):e[3]&&se.error(e[0]),e},PSEUDO:function(e){var t,n=!e[6]&&e[2];return Y.CHILD.test(e[0])?null:(e[3]?e[2]=e[4]||e[5]||"":n&&U.test(n)&&(t=o(n,!0))&&(t=n.indexOf(")",n.length-t)-n.length)&&(e[0]=e[0].slice(0,t),e[2]=n.slice(0,t)),e.slice(0,3))}},filter:{TAG:function(e){var t=e.replace(te,ne).toLowerCase();return"*"===e?function(){return!0}:function(e){return e.nodeName&&e.nodeName.toLowerCase()===t}},CLASS:function(e){var t=E[e+" "];return t||(t=new RegExp("(^|"+P+")"+e+"("+P+"|$)"))&&E(e,(function(e){return t.test("string"==typeof e.className&&e.className||void 0!==e.getAttribute&&e.getAttribute("class")||"")}))},ATTR:function(e,t,n){return function(r){var a=se.attr(r,e);return null==a?"!="===t:!t||(a+="","="===t?a===n:"!="===t?a!==n:"^="===t?n&&0===a.indexOf(n):"*="===t?n&&a.indexOf(n)>-1:"$="===t?n&&a.slice(-n.length)===n:"~="===t?(" "+a.replace(R," ")+" ").indexOf(n)>-1:"|="===t&&(a===n||a.slice(0,n.length+1)===n+"-"))}},CHILD:function(e,t,n,r,a){var i="nth"!==e.slice(0,3),o="last"!==e.slice(-4),s="of-type"===t;return 1===r&&0===a?function(e){return!!e.parentNode}:function(t,n,l){var c,u,f,d,p,h,m=i!==o?"nextSibling":"previousSibling",g=t.parentNode,v=s&&t.nodeName.toLowerCase(),y=!l&&!s,A=!1;if(g){if(i){for(;m;){for(d=t;d=d[m];)if(s?d.nodeName.toLowerCase()===v:1===d.nodeType)return!1;h=m="only"===e&&!h&&"nextSibling"}return!0}if(h=[o?g.firstChild:g.lastChild],o&&y){for(A=(p=(c=(u=(f=(d=g)[b]||(d[b]={}))[d.uniqueID]||(f[d.uniqueID]={}))[e]||[])[0]===w&&c[1])&&c[2],d=p&&g.childNodes[p];d=++p&&d&&d[m]||(A=p=0)||h.pop();)if(1===d.nodeType&&++A&&d===t){u[e]=[w,p,A];break}}else if(y&&(A=p=(c=(u=(f=(d=t)[b]||(d[b]={}))[d.uniqueID]||(f[d.uniqueID]={}))[e]||[])[0]===w&&c[1]),!1===A)for(;(d=++p&&d&&d[m]||(A=p=0)||h.pop())&&((s?d.nodeName.toLowerCase()!==v:1!==d.nodeType)||!++A||(y&&((u=(f=d[b]||(d[b]={}))[d.uniqueID]||(f[d.uniqueID]={}))[e]=[w,A]),d!==t)););return(A-=a)===r||A%r==0&&A/r>=0}}},PSEUDO:function(e,t){var n,a=r.pseudos[e]||r.setFilters[e.toLowerCase()]||se.error("unsupported pseudo: "+e);return a[b]?a(t):a.length>1?(n=[e,e,"",t],r.setFilters.hasOwnProperty(e.toLowerCase())?ce((function(e,n){for(var r,i=a(e,t),o=i.length;o--;)e[r=W(e,i[o])]=!(n[r]=i[o])})):function(e){return a(e,0,n)}):a}},pseudos:{not:ce((function(e){var t=[],n=[],r=s(e.replace(X,"$1"));return r[b]?ce((function(e,t,n,a){for(var i,o=r(e,null,a,[]),s=e.length;s--;)(i=o[s])&&(e[s]=!(t[s]=i))})):function(e,a,i){return t[0]=e,r(t,null,i,n),t[0]=null,!n.pop()}})),has:ce((function(e){return function(t){return se(e,t).length>0}})),contains:ce((function(e){return e=e.replace(te,ne),function(t){return(t.textContent||a(t)).indexOf(e)>-1}})),lang:ce((function(e){return z.test(e||"")||se.error("unsupported lang: "+e),e=e.replace(te,ne).toLowerCase(),function(t){var n;do{if(n=m?t.lang:t.getAttribute("xml:lang")||t.getAttribute("lang"))return(n=n.toLowerCase())===e||0===n.indexOf(e+"-")}while((t=t.parentNode)&&1===t.nodeType);return!1}})),target:function(t){var n=e.location&&e.location.hash;return n&&n.slice(1)===t.id},root:function(e){return e===h},focus:function(e){return e===p.activeElement&&(!p.hasFocus||p.hasFocus())&&!!(e.type||e.href||~e.tabIndex)},enabled:me(!1),disabled:me(!0),checked:function(e){var t=e.nodeName.toLowerCase();return"input"===t&&!!e.checked||"option"===t&&!!e.selected},selected:function(e){return e.parentNode&&e.parentNode.selectedIndex,!0===e.selected},empty:function(e){for(e=e.firstChild;e;e=e.nextSibling)if(e.nodeType<6)return!1;return!0},parent:function(e){return!r.pseudos.empty(e)},header:function(e){return K.test(e.nodeName)},input:function(e){return Z.test(e.nodeName)},button:function(e){var t=e.nodeName.toLowerCase();return"input"===t&&"button"===e.type||"button"===t},text:function(e){var t;return"input"===e.nodeName.toLowerCase()&&"text"===e.type&&(null==(t=e.getAttribute("type"))||"text"===t.toLowerCase())},first:ge((function(){return[0]})),last:ge((function(e,t){return[t-1]})),eq:ge((function(e,t,n){return[n<0?n+t:n]})),even:ge((function(e,t){for(var n=0;n<t;n+=2)e.push(n);return e})),odd:ge((function(e,t){for(var n=1;n<t;n+=2)e.push(n);return e})),lt:ge((function(e,t,n){for(var r=n<0?n+t:n>t?t:n;--r>=0;)e.push(r);return e})),gt:ge((function(e,t,n){for(var r=n<0?n+t:n;++r<t;)e.push(r);return e}))}}).pseudos.nth=r.pseudos.eq,{radio:!0,checkbox:!0,file:!0,password:!0,image:!0})r.pseudos[t]=pe(t);for(t in{submit:!0,reset:!0})r.pseudos[t]=he(t);function ye(){}function Ae(e){for(var t=0,n=e.length,r="";t<n;t++)r+=e[t].value;return r}function be(e,t,n){var r=t.dir,a=t.next,i=a||r,o=n&&"parentNode"===i,s=N++;return t.first?function(t,n,a){for(;t=t[r];)if(1===t.nodeType||o)return e(t,n,a);return!1}:function(t,n,l){var c,u,f,d=[w,s];if(l){for(;t=t[r];)if((1===t.nodeType||o)&&e(t,n,l))return!0}else for(;t=t[r];)if(1===t.nodeType||o)if(u=(f=t[b]||(t[b]={}))[t.uniqueID]||(f[t.uniqueID]={}),a&&a===t.nodeName.toLowerCase())t=t[r]||t;else{if((c=u[i])&&c[0]===w&&c[1]===s)return d[2]=c[2];if(u[i]=d,d[2]=e(t,n,l))return!0}return!1}}function xe(e){return e.length>1?function(t,n,r){for(var a=e.length;a--;)if(!e[a](t,n,r))return!1;return!0}:e[0]}function we(e,t,n,r,a){for(var i,o=[],s=0,l=e.length,c=null!=t;s<l;s++)(i=e[s])&&(n&&!n(i,r,a)||(o.push(i),c&&t.push(s)));return o}function Ne(e,t,n,r,a,i){return r&&!r[b]&&(r=Ne(r)),a&&!a[b]&&(a=Ne(a,i)),ce((function(i,o,s,l){var c,u,f,d=[],p=[],h=o.length,m=i||function(e,t,n){for(var r=0,a=t.length;r<a;r++)se(e,t[r],n);return n}(t||"*",s.nodeType?[s]:s,[]),g=!e||!i&&t?m:we(m,d,e,s,l),v=n?a||(i?e:h||r)?[]:o:g;if(n&&n(g,v,s,l),r)for(c=we(v,p),r(c,[],s,l),u=c.length;u--;)(f=c[u])&&(v[p[u]]=!(g[p[u]]=f));if(i){if(a||e){if(a){for(c=[],u=v.length;u--;)(f=v[u])&&c.push(g[u]=f);a(null,v=[],c,l)}for(u=v.length;u--;)(f=v[u])&&(c=a?W(i,f):d[u])>-1&&(i[c]=!(o[c]=f))}}else v=we(v===o?v.splice(h,v.length):v),a?a(null,o,v,l):H.apply(o,v)}))}function Ee(e){for(var t,n,a,i=e.length,o=r.relative[e[0].type],s=o||r.relative[" "],l=o?1:0,u=be((function(e){return e===t}),s,!0),f=be((function(e){return W(t,e)>-1}),s,!0),d=[function(e,n,r){var a=!o&&(r||n!==c)||((t=n).nodeType?u(e,n,r):f(e,n,r));return t=null,a}];l<i;l++)if(n=r.relative[e[l].type])d=[be(xe(d),n)];else{if((n=r.filter[e[l].type].apply(null,e[l].matches))[b]){for(a=++l;a<i&&!r.relative[e[a].type];a++);return Ne(l>1&&xe(d),l>1&&Ae(e.slice(0,l-1).concat({value:" "===e[l-2].type?"*":""})).replace(X,"$1"),n,l<a&&Ee(e.slice(l,a)),a<i&&Ee(e=e.slice(a)),a<i&&Ae(e))}d.push(n)}return xe(d)}return ye.prototype=r.filters=r.pseudos,r.setFilters=new ye,o=se.tokenize=function(e,t){var n,a,i,o,s,l,c,u=C[e+" "];if(u)return t?0:u.slice(0);for(s=e,l=[],c=r.preFilter;s;){for(o in n&&!(a=V.exec(s))||(a&&(s=s.slice(a[0].length)||s),l.push(i=[])),n=!1,(a=G.exec(s))&&(n=a.shift(),i.push({value:n,type:a[0].replace(X," ")}),s=s.slice(n.length)),r.filter)!(a=Y[o].exec(s))||c[o]&&!(a=c[o](a))||(n=a.shift(),i.push({value:n,type:o,matches:a}),s=s.slice(n.length));if(!n)break}return t?s.length:s?se.error(e):C(e,l).slice(0)},s=se.compile=function(e,t){var n,a=[],i=[],s=T[e+" "];if(!s){for(t||(t=o(e)),n=t.length;n--;)(s=Ee(t[n]))[b]?a.push(s):i.push(s);(s=T(e,function(e,t){var n=t.length>0,a=e.length>0,i=function(i,o,s,l,u){var f,h,g,v=0,y="0",A=i&&[],b=[],x=c,N=i||a&&r.find.TAG("*",u),E=w+=null==x?1:Math.random()||.1,C=N.length;for(u&&(c=o===p||o||u);y!==C&&null!=(f=N[y]);y++){if(a&&f){for(h=0,o||f.ownerDocument===p||(d(f),s=!m);g=e[h++];)if(g(f,o||p,s)){l.push(f);break}u&&(w=E)}n&&((f=!g&&f)&&v--,i&&A.push(f))}if(v+=y,n&&y!==v){for(h=0;g=t[h++];)g(A,b,o,s);if(i){if(v>0)for(;y--;)A[y]||b[y]||(b[y]=q.call(l));b=we(b)}H.apply(l,b),u&&!i&&b.length>0&&v+t.length>1&&se.uniqueSort(l)}return u&&(w=E,c=x),A};return n?ce(i):i}(i,a))).selector=e}return s},l=se.select=function(e,t,n,a){var i,l,c,u,f,d="function"==typeof e&&e,p=!a&&o(e=d.selector||e);if(n=n||[],1===p.length){if((l=p[0]=p[0].slice(0)).length>2&&"ID"===(c=l[0]).type&&9===t.nodeType&&m&&r.relative[l[1].type]){if(!(t=(r.find.ID(c.matches[0].replace(te,ne),t)||[])[0]))return n;d&&(t=t.parentNode),e=e.slice(l.shift().value.length)}for(i=Y.needsContext.test(e)?0:l.length;i--&&(c=l[i],!r.relative[u=c.type]);)if((f=r.find[u])&&(a=f(c.matches[0].replace(te,ne),ee.test(l[0].type)&&ve(t.parentNode)||t))){if(l.splice(i,1),!(e=a.length&&Ae(l)))return H.apply(n,a),n;break}}return(d||s(e,p))(a,t,!m,n,!t||ee.test(e)&&ve(t.parentNode)||t),n},n.sortStable=b.split("").sort(L).join("")===b,n.detectDuplicates=!!f,d(),n.sortDetached=ue((function(e){return 1&e.compareDocumentPosition(p.createElement("fieldset"))})),ue((function(e){return e.innerHTML="<a href='#'></a>","#"===e.firstChild.getAttribute("href")}))||fe("type|href|height|width",(function(e,t,n){if(!n)return e.getAttribute(t,"type"===t.toLowerCase()?1:2)})),n.attributes&&ue((function(e){return e.innerHTML="<input/>",e.firstChild.setAttribute("value",""),""===e.firstChild.getAttribute("value")}))||fe("value",(function(e,t,n){if(!n&&"input"===e.nodeName.toLowerCase())return e.defaultValue})),ue((function(e){return null==e.getAttribute("disabled")}))||fe(O,(function(e,t,n){var r;if(!n)return!0===e[t]?t.toLowerCase():(r=e.getAttributeNode(t))&&r.specified?r.value:null})),se}(n);N.find=T,(N.expr=T.selectors)[":"]=N.expr.pseudos,N.uniqueSort=N.unique=T.uniqueSort,N.text=T.getText,N.isXMLDoc=T.isXML,N.contains=T.contains,N.escapeSelector=T.escape;var D=function(e,t,n){for(var r=[],a=void 0!==n;(e=e[t])&&9!==e.nodeType;)if(1===e.nodeType){if(a&&N(e).is(n))break;r.push(e)}return r},L=function(e,t){for(var n=[];e;e=e.nextSibling)1===e.nodeType&&e!==t&&n.push(e);return n},k=N.expr.match.needsContext;function j(e,t){return e.nodeName&&e.nodeName.toLowerCase()===t.toLowerCase()}var q=/^<([a-z][^\/\0>:\x20\t\r\n\f]*)[\x20\t\r\n\f]*\/?>(?:<\/\1>|)$/i;function S(e,t,n){return y(t)?N.grep(e,(function(e,r){return!!t.call(e,r,e)!==n})):t.nodeType?N.grep(e,(function(e){return e===t!==n})):"string"!=typeof t?N.grep(e,(function(e){return f.call(t,e)>-1!==n})):N.filter(t,e,n)}N.filter=function(e,t,n){var r=t[0];return n&&(e=":not("+e+")"),1===t.length&&1===r.nodeType?N.find.matchesSelector(r,e)?[r]:[]:N.find.matches(e,N.grep(t,(function(e){return 1===e.nodeType})))},N.fn.extend({find:function(e){var t,n,r=this.length,a=this;if("string"!=typeof e)return this.pushStack(N(e).filter((function(){for(t=0;t<r;t++)if(N.contains(a[t],this))return!0})));for(n=this.pushStack([]),t=0;t<r;t++)N.find(e,a[t],n);return r>1?N.uniqueSort(n):n},filter:function(e){return this.pushStack(S(this,e||[],!1))},not:function(e){return this.pushStack(S(this,e||[],!0))},is:function(e){return!!S(this,"string"==typeof e&&k.test(e)?N(e):e||[],!1).length}});var H,B=/^(?:\s*(<[\w\W]+>)[^>]*|#([\w-]+))$/;(N.fn.init=function(e,t,n){var r,a;if(!e)return this;if(n=n||H,"string"==typeof e){if(!(r="<"===e[0]&&">"===e[e.length-1]&&e.length>=3?[null,e,null]:B.exec(e))||!r[1]&&t)return!t||t.jquery?(t||n).find(e):this.constructor(t).find(e);if(r[1]){if(t=t instanceof N?t[0]:t,N.merge(this,N.parseHTML(r[1],t&&t.nodeType?t.ownerDocument||t:o,!0)),q.test(r[1])&&N.isPlainObject(t))for(r in t)y(this[r])?this[r](t[r]):this.attr(r,t[r]);return this}return(a=o.getElementById(r[2]))&&(this[0]=a,this.length=1),this}return e.nodeType?(this[0]=e,this.length=1,this):y(e)?void 0!==n.ready?n.ready(e):e(N):N.makeArray(e,this)}).prototype=N.fn,H=N(o);var W=/^(?:parents|prev(?:Until|All))/,O={children:!0,contents:!0,next:!0,prev:!0};function P(e,t){for(;(e=e[t])&&1!==e.nodeType;);return e}N.fn.extend({has:function(e){var t=N(e,this),n=t.length;return this.filter((function(){for(var e=0;e<n;e++)if(N.contains(this,t[e]))return!0}))},closest:function(e,t){var n,r=0,a=this.length,i=[],o="string"!=typeof e&&N(e);if(!k.test(e))for(;r<a;r++)for(n=this[r];n&&n!==t;n=n.parentNode)if(n.nodeType<11&&(o?o.index(n)>-1:1===n.nodeType&&N.find.matchesSelector(n,e))){i.push(n);break}return this.pushStack(i.length>1?N.uniqueSort(i):i)},index:function(e){return e?"string"==typeof e?f.call(N(e),this[0]):f.call(this,e.jquery?e[0]:e):this[0]&&this[0].parentNode?this.first().prevAll().length:-1},add:function(e,t){return this.pushStack(N.uniqueSort(N.merge(this.get(),N(e,t))))},addBack:function(e){return this.add(null==e?this.prevObject:this.prevObject.filter(e))}}),N.each({parent:function(e){var t=e.parentNode;return t&&11!==t.nodeType?t:null},parents:function(e){return D(e,"parentNode")},parentsUntil:function(e,t,n){return D(e,"parentNode",n)},next:function(e){return P(e,"nextSibling")},prev:function(e){return P(e,"previousSibling")},nextAll:function(e){return D(e,"nextSibling")},prevAll:function(e){return D(e,"previousSibling")},nextUntil:function(e,t,n){return D(e,"nextSibling",n)},prevUntil:function(e,t,n){return D(e,"previousSibling",n)},siblings:function(e){return L((e.parentNode||{}).firstChild,e)},children:function(e){return L(e.firstChild)},contents:function(e){return void 0!==e.contentDocument?e.contentDocument:(j(e,"template")&&(e=e.content||e),N.merge([],e.childNodes))}},(function(e,t){N.fn[e]=function(n,r){var a=N.map(this,t,n);return"Until"!==e.slice(-5)&&(r=n),r&&"string"==typeof r&&(a=N.filter(r,a)),this.length>1&&(O[e]||N.uniqueSort(a),W.test(e)&&a.reverse()),this.pushStack(a)}}));var M=/[^\x20\t\r\n\f]+/g;function I(
|
){return e}function Q(e){throw e}function R(e,t,n,r){var a;try{e&&y(a=e.promise)?a.call(e).done(t).fail(n):e&&y(a=e.then)?a.call(e,t,n):t.apply(void 0,[e].slice(r))}catch(e){n.apply(void 0,[e])}}N.Callbacks=function(e){e="string"==typeof e?function(e){var t={};return N.each(e.match(M)||[],(function(e,n){t[n]=!0})),t}(e):N.extend({},e);var t,n,r,a,i=[],o=[],s=-1,l=function(){for(a=a||e.once,r=t=!0;o.length;s=-1)for(n=o.shift();++s<i.length;)!1===i[s].apply(n[0],n[1])&&e.stopOnFalse&&(s=i.length,n=!1);e.memory||(n=!1),t=!1,a&&(i=n?[]:"")},c={add:function(){return i&&(n&&!t&&(s=i.length-1,o.push(n)),function t(n){N.each(n,(function(n,r){y(r)?e.unique&&c.has(r)||i.push(r):r&&r.length&&"string"!==w(r)&&t(r)}))}(arguments),n&&!t&&l()),this},remove:function(){return N.each(arguments,(function(e,t){for(var n;(n=N.inArray(t,i,n))>-1;)i.splice(n,1),n<=s&&s--})),this},has:function(e){return e?N.inArray(e,i)>-1:i.length>0},empty:function(){return i&&(i=[]),this},disable:function(){return a=o=[],i=n="",this},disabled:function(){return!i},lock:function(){return a=o=[],n||t||(i=n=""),this},locked:function(){return!!a},fireWith:function(e,n){return a||(n=[e,(n=n||[]).slice?n.slice():n],o.push(n),t||l()),this},fire:function(){return c.fireWith(this,arguments),this},fired:function(){return!!r}};return c},N.extend({Deferred:function(e){var t=[["notify","progress",N.Callbacks("memory"),N.Callbacks("memory"),2],["resolve","done",N.Callbacks("once memory"),N.Callbacks("once memory"),0,"resolved"],["reject","fail",N.Callbacks("once memory"),N.Callbacks("once memory"),1,"rejected"]],r="pending",a={state:function(){return r},always:function(){return i.done(arguments).fail(arguments),this},catch:function(e){return a.then(null,e)},pipe:function(){var e=arguments;return N.Deferred((function(n){N.each(t,(function(t,r){var a=y(e[r[4]])&&e[r[4]];i[r[1]]((function(){var e=a&&a.apply(this,arguments);e&&y(e.promise)?e.promise().progress(n.notify).done(n.resolve).fail(n.reject):n[r[0]+"With"](this,a?[e]:arguments)}))})),e=null})).promise()},then:function(e,r,a){var i=0;function o(e,t,r,a){return function(){var s=this,l=arguments,c=function(){var n,c;if(!(e<i)){if((n=r.apply(s,l))===t.promise())throw new TypeError("Thenable self-resolution");c=n&&("object"==typeof n||"function"==typeof n)&&n.then,y(c)?a?c.call(n,o(i,t,I,a),o(i,t,Q,a)):(i++,c.call(n,o(i,t,I,a),o(i,t,Q,a),o(i,t,I,t.notifyWith))):(r!==I&&(s=void 0,l=[n]),(a||t.resolveWith)(s,l))}},u=a?c:function(){try{c()}catch(n){N.Deferred.exceptionHook&&N.Deferred.exceptionHook(n,u.stackTrace),e+1>=i&&(r!==Q&&(s=void 0,l=[n]),t.rejectWith(s,l))}};e?u():(N.Deferred.getStackHook&&(u.stackTrace=N.Deferred.getStackHook()),n.setTimeout(u))}}return N.Deferred((function(n){t[0][3].add(o(0,n,y(a)?a:I,n.notifyWith)),t[1][3].add(o(0,n,y(e)?e:I)),t[2][3].add(o(0,n,y(r)?r:Q))})).promise()},promise:function(e){return null!=e?N.extend(e,a):a}},i={};return N.each(t,(function(e,n){var o=n[2],s=n[5];a[n[1]]=o.add,s&&o.add((function(){r=s}),t[3-e][2].disable,t[3-e][3].disable,t[0][2].lock,t[0][3].lock),o.add(n[3].fire),i[n[0]]=function(){return i[n[0]+"With"](this===i?void 0:this,arguments),this},i[n[0]+"With"]=o.fireWith})),a.promise(i),e&&e.call(i,i),i},when:function(e){var t=arguments.length,n=t,r=Array(n),a=l.call(arguments),i=N.Deferred(),o=function(e){return function(n){r[e]=this,a[e]=arguments.length>1?l.call(arguments):n,--t||i.resolveWith(r,a)}};if(t<=1&&(R(e,i.done(o(n)).resolve,i.reject,!t),"pending"===i.state()||y(a[n]&&a[n].then)))return i.then();for(;n--;)R(a[n],o(n),i.reject);return i.promise()}});var X=/^(Eval|Internal|Range|Reference|Syntax|Type|URI)Error$/;N.Deferred.exceptionHook=function(e,t){n.console&&n.console.warn&&e&&X.test(e.name)&&n.console.warn("jQuery.Deferred exception: "+e.message,e.stack,t)},N.readyException=function(e){n.setTimeout((function(){throw e}))};var V=N.Deferred();function G(){o.removeEventListener("DOMContentLoaded",G),n.removeEventListener("load",G),N.ready()}N.fn.ready=function(e){return V.then(e).catch((function(e){N.readyException(e)})),this},N.extend({isReady:!1,readyWait:1,ready:function(e){(!0===e?--N.readyWait:N.isReady)||(N.isReady=!0,!0!==e&&--N.readyWait>0||V.resolveWith(o,[N]))}}),N.ready.then=V.then,"complete"===o.readyState||"loading"!==o.readyState&&!o.documentElement.doScroll?n.setTimeout(N.ready):(o.addEventListener("DOMContentLoaded",G),n.addEventListener("load",G));var F=function e(t,n,r,a,i,o,s){var l=0,c=t.length,u=null==r;if("object"===w(r))for(l in i=!0,r)e(t,n,l,r[l],!0,o,s);else if(void 0!==a&&(i=!0,y(a)||(s=!0),u&&(s?(n.call(t,a),n=null):(u=n,n=function(e,t,n){return u.call(N(e),n)})),n))for(;l<c;l++)n(t[l],r,s?a:a.call(t[l],l,n(t[l],r)));return i?t:u?n.call(t):c?n(t[0],r):o},U=/^-ms-/,z=/-([a-z])/g;function Y(e,t){return t.toUpperCase()}function J(e){return e.replace(U,"ms-").replace(z,Y)}var Z=function(e){return 1===e.nodeType||9===e.nodeType||!+e.nodeType};function K(){this.expando=N.expando+K.uid++}K.uid=1,K.prototype={cache:function(e){var t=e[this.expando];return t||(t={},Z(e)&&(e.nodeType?e[this.expando]=t:Object.defineProperty(e,this.expando,{value:t,configurable:!0}))),t},set:function(e,t,n){var r,a=this.cache(e);if("string"==typeof t)a[J(t)]=n;else for(r in t)a[J(r)]=t[r];return a},get:function(e,t){return void 0===t?this.cache(e):e[this.expando]&&e[this.expando][J(t)]},access:function(e,t,n){return void 0===t||t&&"string"==typeof t&&void 0===n?this.get(e,t):(this.set(e,t,n),void 0!==n?n:t)},remove:function(e,t){var n,r=e[this.expando];if(void 0!==r){if(void 0!==t){n=(t=Array.isArray(t)?t.map(J):(t=J(t))in r?[t]:t.match(M)||[]).length;for(;n--;)delete r[t[n]]}(void 0===t||N.isEmptyObject(r))&&(e.nodeType?e[this.expando]=void 0:delete e[this.expando])}},hasData:function(e){var t=e[this.expando];return void 0!==t&&!N.isEmptyObject(t)}};var $=new K,_=new K,ee=/^(?:\{[\w\W]*\}|\[[\w\W]*\])$/,te=/[A-Z]/g;function ne(e,t,n){var r;if(void 0===n&&1===e.nodeType)if(r="data-"+t.replace(te,"-$&").toLowerCase(),"string"==typeof(n=e.getAttribute(r))){try{n=function(e){return"true"===e||"false"!==e&&("null"===e?null:e===+e+""?+e:ee.test(e)?JSON.parse(e):e)}(n)}catch(a){}_.set(e,t,n)}else n=void 0;return n}N.extend({hasData:function(e){return _.hasData(e)||$.hasData(e)},data:function(e,t,n){return _.access(e,t,n)},removeData:function(e,t){_.remove(e,t)},_data:function(e,t,n){return $.access(e,t,n)},_removeData:function(e,t){$.remove(e,t)}}),N.fn.extend({data:function(e,t){var n,r,a,i=this[0],o=i&&i.attributes;if(void 0===e){if(this.length&&(a=_.get(i),1===i.nodeType&&!$.get(i,"hasDataAttrs"))){for(n=o.length;n--;)o[n]&&0===(r=o[n].name).indexOf("data-")&&(r=J(r.slice(5)),ne(i,r,a[r]));$.set(i,"hasDataAttrs",!0)}return a}return"object"==typeof e?this.each((function(){_.set(this,e)})):F(this,(function(t){var n;if(i&&void 0===t)return void 0!==(n=_.get(i,e))?n:void 0!==(n=ne(i,e))?n:void 0;this.each((function(){_.set(this,e,t)}))}),null,t,arguments.length>1,null,!0)},removeData:function(e){return this.each((function(){_.remove(this,e)}))}}),N.extend({queue:function(e,t,n){var r;if(e)return t=(t||"fx")+"queue",r=$.get(e,t),n&&(!r||Array.isArray(n)?r=$.access(e,t,N.makeArray(n)):r.push(n)),r||[]},dequeue:function(e,t){var n=N.queue(e,t=t||"fx"),r=n.length,a=n.shift(),i=N._queueHooks(e,t);"inprogress"===a&&(a=n.shift(),r--),a&&("fx"===t&&n.unshift("inprogress"),delete i.stop,a.call(e,(function(){N.dequeue(e,t)}),i)),!r&&i&&i.empty.fire()},_queueHooks:function(e,t){var n=t+"queueHooks";return $.get(e,n)||$.access(e,n,{empty:N.Callbacks("once memory").add((function(){$.remove(e,[t+"queue",n])}))})}}),N.fn.extend({queue:function(e,t){var n=2;return"string"!=typeof e&&(t=e,e="fx",n--),arguments.length<n?N.queue(this[0],e):void 0===t?this:this.each((function(){var n=N.queue(this,e,t);N._queueHooks(this,e),"fx"===e&&"inprogress"!==n[0]&&N.dequeue(this,e)}))},dequeue:function(e){return this.each((function(){N.dequeue(this,e)}))},clearQueue:function(e){return this.queue(e||"fx",[])},promise:function(e,t){var n,r=1,a=N.Deferred(),i=this,o=this.length,s=function(){--r||a.resolveWith(i,[i])};for("string"!=typeof e&&(t=e,e=void 0),e=e||"fx";o--;)(n=$.get(i[o],e+"queueHooks"))&&n.empty&&(r++,n.empty.add(s));return s(),a.promise(t)}});var re=/[+-]?(?:\d*\.|)\d+(?:[eE][+-]?\d+|)/.source,ae=new RegExp("^(?:([+-])=|)("+re+")([a-z%]*)$","i"),ie=["Top","Right","Bottom","Left"],oe=o.documentElement,se=function(e){return N.contains(e.ownerDocument,e)},le={composed:!0};oe.getRootNode&&(se=function(e){return N.contains(e.ownerDocument,e)||e.getRootNode(le)===e.ownerDocument});var ce=function(e,t){return"none"===(e=t||e).style.display||""===e.style.display&&se(e)&&"none"===N.css(e,"display")},ue=function(e,t,n,r){var a,i,o={};for(i in t)o[i]=e.style[i],e.style[i]=t[i];for(i in a=n.apply(e,r||[]),t)e.style[i]=o[i];return a};function fe(e,t,n,r){var a,i,o=20,s=r?function(){return r.cur()}:function(){return N.css(e,t,"")},l=s(),c=n&&n[3]||(N.cssNumber[t]?"":"px"),u=e.nodeType&&(N.cssNumber[t]||"px"!==c&&+l)&&ae.exec(N.css(e,t));if(u&&u[3]!==c){for(l/=2,c=c||u[3],u=+l||1;o--;)N.style(e,t,u+c),(1-i)*(1-(i=s()/l||.5))<=0&&(o=0),u/=i;N.style(e,t,(u*=2)+c),n=n||[]}return n&&(u=+u||+l||0,a=n[1]?u+(n[1]+1)*n[2]:+n[2],r&&(r.unit=c,r.start=u,r.end=a)),a}var de={};function pe(e){var t,n=e.ownerDocument,r=e.nodeName,a=de[r];return a||(t=n.body.appendChild(n.createElement(r)),a=N.css(t,"display"),t.parentNode.removeChild(t),"none"===a&&(a="block"),de[r]=a,a)}function he(e,t){for(var n,r,a=[],i=0,o=e.length;i<o;i++)(r=e[i]).style&&(n=r.style.display,t?("none"===n&&(a[i]=$.get(r,"display")||null,a[i]||(r.style.display="")),""===r.style.display&&ce(r)&&(a[i]=pe(r))):"none"!==n&&(a[i]="none",$.set(r,"display",n)));for(i=0;i<o;i++)null!=a[i]&&(e[i].style.display=a[i]);return e}N.fn.extend({show:function(){return he(this,!0)},hide:function(){return he(this)},toggle:function(e){return"boolean"==typeof e?e?this.show():this.hide():this.each((function(){ce(this)?N(this).show():N(this).hide()}))}});var me=/^(?:checkbox|radio)$/i,ge=/<([a-z][^\/\0>\x20\t\r\n\f]*)/i,ve=/^$|^module$|\/(?:java|ecma)script/i,ye={option:[1,"<select multiple='multiple'>","</select>"],thead:[1,"<table>","</table>"],col:[2,"<table><colgroup>","</colgroup></table>"],tr:[2,"<table><tbody>","</tbody></table>"],td:[3,"<table><tbody><tr>","</tr></tbody></table>"],_default:[0,"",""]};function Ae(e,t){var n;return n=void 0!==e.getElementsByTagName?e.getElementsByTagName(t||"*"):void 0!==e.querySelectorAll?e.querySelectorAll(t||"*"):[],void 0===t||t&&j(e,t)?N.merge([e],n):n}function be(e,t){for(var n=0,r=e.length;n<r;n++)$.set(e[n],"globalEval",!t||$.get(t[n],"globalEval"))}ye.optgroup=ye.option,ye.tbody=ye.tfoot=ye.colgroup=ye.caption=ye.thead,ye.th=ye.td;var xe,we,Ne=/<|&#?\w+;/;function Ee(e,t,n,r,a){for(var i,o,s,l,c,u,f=t.createDocumentFragment(),d=[],p=0,h=e.length;p<h;p++)if((i=e[p])||0===i)if("object"===w(i))N.merge(d,i.nodeType?[i]:i);else if(Ne.test(i)){for(o=o||f.appendChild(t.createElement("div")),s=(ge.exec(i)||["",""])[1].toLowerCase(),l=ye[s]||ye._default,o.innerHTML=l[1]+N.htmlPrefilter(i)+l[2],u=l[0];u--;)o=o.lastChild;N.merge(d,o.childNodes),(o=f.firstChild).textContent=""}else d.push(t.createTextNode(i));for(f.textContent="",p=0;i=d[p++];)if(r&&N.inArray(i,r)>-1)a&&a.push(i);else if(c=se(i),o=Ae(f.appendChild(i),"script"),c&&be(o),n)for(u=0;i=o[u++];)ve.test(i.type||"")&&n.push(i);return f}xe=o.createDocumentFragment().appendChild(o.createElement("div")),(we=o.createElement("input")).setAttribute("type","radio"),we.setAttribute("checked","checked"),we.setAttribute("name","t"),xe.appendChild(we),v.checkClone=xe.cloneNode(!0).cloneNode(!0).lastChild.checked,xe.innerHTML="<textarea>x</textarea>",v.noCloneChecked=!!xe.cloneNode(!0).lastChild.defaultValue;var Ce=/^key/,Te=/^(?:mouse|pointer|contextmenu|drag|drop)|click/,De=/^([^.]*)(?:\.(.+)|)/;function Le(){return!0}function ke(){return!1}function je(e,t){return e===function(){try{return o.activeElement}catch(e){}}()==("focus"===t)}function qe(e,t,n,r,a,i){var o,s;if("object"==typeof t){for(s in"string"!=typeof n&&(r=r||n,n=void 0),t)qe(e,s,n,r,t[s],i);return e}if(null==r&&null==a?(a=n,r=n=void 0):null==a&&("string"==typeof n?(a=r,r=void 0):(a=r,r=n,n=void 0)),!1===a)a=ke;else if(!a)return e;return 1===i&&(o=a,(a=function(e){return N().off(e),o.apply(this,arguments)}).guid=o.guid||(o.guid=N.guid++)),e.each((function(){N.event.add(this,t,a,r,n)}))}function Se(e,t,n){n?($.set(e,t,!1),N.event.add(e,t,{namespace:!1,handler:function(e){var r,a,i=$.get(this,t);if(1&e.isTrigger&&this[t]){if(i.length)(N.event.special[t]||{}).delegateType&&e.stopPropagation();else if(i=l.call(arguments),$.set(this,t,i),r=n(this,t),this[t](),i!==(a=$.get(this,t))||r?$.set(this,t,!1):a={},i!==a)return e.stopImmediatePropagation(),e.preventDefault(),a.value}else i.length&&($.set(this,t,{value:N.event.trigger(N.extend(i[0],N.Event.prototype),i.slice(1),this)}),e.stopImmediatePropagation())}})):void 0===$.get(e,t)&&N.event.add(e,t,Le)}N.event={global:{},add:function(e,t,n,r,a){var i,o,s,l,c,u,f,d,p,h,m,g=$.get(e);if(g)for(n.handler&&(n=(i=n).handler,a=i.selector),a&&N.find.matchesSelector(oe,a),n.guid||(n.guid=N.guid++),(l=g.events)||(l=g.events={}),(o=g.handle)||(o=g.handle=function(t){return N.event.triggered!==t.type?N.event.dispatch.apply(e,arguments):void 0}),c=(t=(t||"").match(M)||[""]).length;c--;)p=m=(s=De.exec(t[c])||[])[1],h=(s[2]||"").split(".").sort(),p&&(f=N.event.special[p]||{},p=(a?f.delegateType:f.bindType)||p,f=N.event.special[p]||{},u=N.extend({type:p,origType:m,data:r,handler:n,guid:n.guid,selector:a,needsContext:a&&N.expr.match.needsContext.test(a),namespace:h.join(".")},i),(d=l[p])||((d=l[p]=[]).delegateCount=0,f.setup&&!1!==f.setup.call(e,r,h,o)||e.addEventListener&&e.addEventListener(p,o)),f.add&&(f.add.call(e,u),u.handler.guid||(u.handler.guid=n.guid)),a?d.splice(d.delegateCount++,0,u):d.push(u),N.event.global[p]=!0)},remove:function(e,t,n,r,a){var i,o,s,l,c,u,f,d,p,h,m,g=$.hasData(e)&&$.get(e);if(g&&(l=g.events)){for(c=(t=(t||"").match(M)||[""]).length;c--;)if(p=m=(s=De.exec(t[c])||[])[1],h=(s[2]||"").split(".").sort(),p){for(f=N.event.special[p]||{},d=l[p=(r?f.delegateType:f.bindType)||p]||[],s=s[2]&&new RegExp("(^|\\.)"+h.join("\\.(?:.*\\.|)")+"(\\.|$)"),o=i=d.length;i--;)u=d[i],!a&&m!==u.origType||n&&n.guid!==u.guid||s&&!s.test(u.namespace)||r&&r!==u.selector&&("**"!==r||!u.selector)||(d.splice(i,1),u.selector&&d.delegateCount--,f.remove&&f.remove.call(e,u));o&&!d.length&&(f.teardown&&!1!==f.teardown.call(e,h,g.handle)||N.removeEvent(e,p,g.handle),delete l[p])}else for(p in l)N.event.remove(e,p+t[c],n,r,!0);N.isEmptyObject(l)&&$.remove(e,"handle events")}},dispatch:function(e){var t,n,r,a,i,o,s=N.event.fix(e),l=new Array(arguments.length),c=($.get(this,"events")||{})[s.type]||[],u=N.event.special[s.type]||{};for(l[0]=s,t=1;t<arguments.length;t++)l[t]=arguments[t];if(s.delegateTarget=this,!u.preDispatch||!1!==u.preDispatch.call(this,s)){for(o=N.event.handlers.call(this,s,c),t=0;(a=o[t++])&&!s.isPropagationStopped();)for(s.currentTarget=a.elem,n=0;(i=a.handlers[n++])&&!s.isImmediatePropagationStopped();)s.rnamespace&&!1!==i.namespace&&!s.rnamespace.test(i.namespace)||(s.handleObj=i,s.data=i.data,void 0!==(r=((N.event.special[i.origType]||{}).handle||i.handler).apply(a.elem,l))&&!1===(s.result=r)&&(s.preventDefault(),s.stopPropagation()));return u.postDispatch&&u.postDispatch.call(this,s),s.result}},handlers:function(e,t){var n,r,a,i,o,s=[],l=t.delegateCount,c=e.target;if(l&&c.nodeType&&!("click"===e.type&&e.button>=1))for(;c!==this;c=c.parentNode||this)if(1===c.nodeType&&("click"!==e.type||!0!==c.disabled)){for(i=[],o={},n=0;n<l;n++)void 0===o[a=(r=t[n]).selector+" "]&&(o[a]=r.needsContext?N(a,this).index(c)>-1:N.find(a,this,null,[c]).length),o[a]&&i.push(r);i.length&&s.push({elem:c,handlers:i})}return c=this,l<t.length&&s.push({elem:c,handlers:t.slice(l)}),s},addProp:function(e,t){Object.defineProperty(N.Event.prototype,e,{enumerable:!0,configurable:!0,get:y(t)?function(){if(this.originalEvent)return t(this.originalEvent)}:function(){if(this.originalEvent)return this.originalEvent[e]},set:function(t){Object.defineProperty(this,e,{enumerable:!0,configurable:!0,writable:!0,value:t})}})},fix:function(e){return e[N.expando]?e:new N.Event(e)},special:{load:{noBubble:!0},click:{setup:function(e){var t=this||e;return me.test(t.type)&&t.click&&j(t,"input")&&Se(t,"click",Le),!1},trigger:function(e){var t=this||e;return me.test(t.type)&&t.click&&j(t,"input")&&Se(t,"click"),!0},_default:function(e){var t=e.target;return me.test(t.type)&&t.click&&j(t,"input")&&$.get(t,"click")||j(t,"a")}},beforeunload:{postDispatch:function(e){void 0!==e.result&&e.originalEvent&&(e.originalEvent.returnValue=e.result)}}}},N.removeEvent=function(e,t,n){e.removeEventListener&&e.removeEventListener(t,n)},(N.Event=function(e,t){if(!(this instanceof N.Event))return new N.Event(e,t);e&&e.type?(this.originalEvent=e,this.type=e.type,this.isDefaultPrevented=e.defaultPrevented||void 0===e.defaultPrevented&&!1===e.returnValue?Le:ke,this.target=e.target&&3===e.target.nodeType?e.target.parentNode:e.target,this.currentTarget=e.currentTarget,this.relatedTarget=e.relatedTarget):this.type=e,t&&N.extend(this,t),this.timeStamp=e&&e.timeStamp||Date.now(),this[N.expando]=!0}).prototype={constructor:N.Event,isDefaultPrevented:ke,isPropagationStopped:ke,isImmediatePropagationStopped:ke,isSimulated:!1,preventDefault:function(){var e=this.originalEvent;this.isDefaultPrevented=Le,e&&!this.isSimulated&&e.preventDefault()},stopPropagation:function(){var e=this.originalEvent;this.isPropagationStopped=Le,e&&!this.isSimulated&&e.stopPropagation()},stopImmediatePropagation:function(){var e=this.originalEvent;this.isImmediatePropagationStopped=Le,e&&!this.isSimulated&&e.stopImmediatePropagation(),this.stopPropagation()}},N.each({altKey:!0,bubbles:!0,cancelable:!0,changedTouches:!0,ctrlKey:!0,detail:!0,eventPhase:!0,metaKey:!0,pageX:!0,pageY:!0,shiftKey:!0,view:!0,char:!0,code:!0,charCode:!0,key:!0,keyCode:!0,button:!0,buttons:!0,clientX:!0,clientY:!0,offsetX:!0,offsetY:!0,pointerId:!0,pointerType:!0,screenX:!0,screenY:!0,targetTouches:!0,toElement:!0,touches:!0,which:function(e){var t=e.button;return null==e.which&&Ce.test(e.type)?null!=e.charCode?e.charCode:e.keyCode:!e.which&&void 0!==t&&Te.test(e.type)?1&t?1:2&t?3:4&t?2:0:e.which}},N.event.addProp),N.each({focus:"focusin",blur:"focusout"},(function(e,t){N.event.special[e]={setup:function(){return Se(this,e,je),!1},trigger:function(){return Se(this,e),!0},delegateType:t}})),N.each({mouseenter:"mouseover",mouseleave:"mouseout",pointerenter:"pointerover",pointerleave:"pointerout"},(function(e,t){N.event.special[e]={delegateType:t,bindType:t,handle:function(e){var n,r=this,a=e.relatedTarget,i=e.handleObj;return a&&(a===r||N.contains(r,a))||(e.type=i.origType,n=i.handler.apply(this,arguments),e.type=t),n}}})),N.fn.extend({on:function(e,t,n,r){return qe(this,e,t,n,r)},one:function(e,t,n,r){return qe(this,e,t,n,r,1)},off:function(e,t,n){var r,a;if(e&&e.preventDefault&&e.handleObj)return r=e.handleObj,N(e.delegateTarget).off(r.namespace?r.origType+"."+r.namespace:r.origType,r.selector,r.handler),this;if("object"==typeof e){for(a in e)this.off(a,t,e[a]);return this}return!1!==t&&"function"!=typeof t||(n=t,t=void 0),!1===n&&(n=ke),this.each((function(){N.event.remove(this,e,n,t)}))}});var He=/<(?!area|br|col|embed|hr|img|input|link|meta|param)(([a-z][^\/\0>\x20\t\r\n\f]*)[^>]*)\/>/gi,Be=/<script|<style|<link/i,We=/checked\s*(?:[^=]|=\s*.checked.)/i,Oe=/^\s*<!(?:\[CDATA\[|--)|(?:\]\]|--)>\s*$/g;function Pe(e,t){return j(e,"table")&&j(11!==t.nodeType?t:t.firstChild,"tr")&&N(e).children("tbody")[0]||e}function Me(e){return e.type=(null!==e.getAttribute("type"))+"/"+e.type,e}function Ie(e){return"true/"===(e.type||"").slice(0,5)?e.type=e.type.slice(5):e.removeAttribute("type"),e}function Qe(e,t){var n,r,a,i,o,s,l,c;if(1===t.nodeType){if($.hasData(e)&&(i=$.access(e),o=$.set(t,i),c=i.events))for(a in delete o.handle,o.events={},c)for(n=0,r=c[a].length;n<r;n++)N.event.add(t,a,c[a][n]);_.hasData(e)&&(s=_.access(e),l=N.extend({},s),_.set(t,l))}}function Re(e,t){var n=t.nodeName.toLowerCase();"input"===n&&me.test(e.type)?t.checked=e.checked:"input"!==n&&"textarea"!==n||(t.defaultValue=e.defaultValue)}function Xe(e,t,n,r){t=c.apply([],t);var a,i,o,s,l,u,f=0,d=e.length,p=d-1,h=t[0],m=y(h);if(m||d>1&&"string"==typeof h&&!v.checkClone&&We.test(h))return e.each((function(a){var i=e.eq(a);m&&(t[0]=h.call(this,a,i.html())),Xe(i,t,n,r)}));if(d&&(i=(a=Ee(t,e[0].ownerDocument,!1,e,r)).firstChild,1===a.childNodes.length&&(a=i),i||r)){for(s=(o=N.map(Ae(a,"script"),Me)).length;f<d;f++)l=a,f!==p&&(l=N.clone(l,!0,!0),s&&N.merge(o,Ae(l,"script"))),n.call(e[f],l,f);if(s)for(u=o[o.length-1].ownerDocument,N.map(o,Ie),f=0;f<s;f++)l=o[f],ve.test(l.type||"")&&!$.access(l,"globalEval")&&N.contains(u,l)&&(l.src&&"module"!==(l.type||"").toLowerCase()?N._evalUrl&&!l.noModule&&N._evalUrl(l.src,{nonce:l.nonce||l.getAttribute("nonce")}):x(l.textContent.replace(Oe,""),l,u))}return e}function Ve(e,t,n){for(var r,a=t?N.filter(t,e):e,i=0;null!=(r=a[i]);i++)n||1!==r.nodeType||N.cleanData(Ae(r)),r.parentNode&&(n&&se(r)&&be(Ae(r,"script")),r.parentNode.removeChild(r));return e}N.extend({htmlPrefilter:function(e){return e.replace(He,"<$1></$2>")},clone:function(e,t,n){var r,a,i,o,s=e.cloneNode(!0),l=se(e);if(!(v.noCloneChecked||1!==e.nodeType&&11!==e.nodeType||N.isXMLDoc(e)))for(o=Ae(s),r=0,a=(i=Ae(e)).length;r<a;r++)Re(i[r],o[r]);if(t)if(n)for(i=i||Ae(e),o=o||Ae(s),r=0,a=i.length;r<a;r++)Qe(i[r],o[r]);else Qe(e,s);return(o=Ae(s,"script")).length>0&&be(o,!l&&Ae(e,"script")),s},cleanData:function(e){for(var t,n,r,a=N.event.special,i=0;void 0!==(n=e[i]);i++)if(Z(n)){if(t=n[$.expando]){if(t.events)for(r in t.events)a[r]?N.event.remove(n,r):N.removeEvent(n,r,t.handle);n[$.expando]=void 0}n[_.expando]&&(n[_.expando]=void 0)}}}),N.fn.extend({detach:function(e){return Ve(this,e,!0)},remove:function(e){return Ve(this,e)},text:function(e){return F(this,(function(e){return void 0===e?N.text(this):this.empty().each((function(){1!==this.nodeType&&11!==this.nodeType&&9!==this.nodeType||(this.textContent=e)}))}),null,e,arguments.length)},append:function(){return Xe(this,arguments,(function(e){1!==this.nodeType&&11!==this.nodeType&&9!==this.nodeType||Pe(this,e).appendChild(e)}))},prepend:function(){return Xe(this,arguments,(function(e){if(1===this.nodeType||11===this.nodeType||9===this.nodeType){var t=Pe(this,e);t.insertBefore(e,t.firstChild)}}))},before:function(){return Xe(this,arguments,(function(e){this.parentNode&&this.parentNode.insertBefore(e,this)}))},after:function(){return Xe(this,arguments,(function(e){this.parentNode&&this.parentNode.insertBefore(e,this.nextSibling)}))},empty:function(){for(var e,t=0;null!=(e=this[t]);t++)1===e.nodeType&&(N.cleanData(Ae(e,!1)),e.textContent="");return this},clone:function(e,t){return e=null!=e&&e,t=null==t?e:t,this.map((function(){return N.clone(this,e,t)}))},html:function(e){return F(this,(function(e){var t=this[0]||{},n=0,r=this.length;if(void 0===e&&1===t.nodeType)return t.innerHTML;if("string"==typeof e&&!Be.test(e)&&!ye[(ge.exec(e)||["",""])[1].toLowerCase()]){e=N.htmlPrefilter(e);try{for(;n<r;n++)1===(t=this[n]||{}).nodeType&&(N.cleanData(Ae(t,!1)),t.innerHTML=e);t=0}catch(a){}}t&&this.empty().append(e)}),null,e,arguments.length)},replaceWith:function(){var e=[];return Xe(this,arguments,(function(t){var n=this.parentNode;N.inArray(this,e)<0&&(N.cleanData(Ae(this)),n&&n.replaceChild(t,this))}),e)}}),N.each({appendTo:"append",prependTo:"prepend",insertBefore:"before",insertAfter:"after",replaceAll:"replaceWith"},(function(e,t){N.fn[e]=function(e){for(var n,r=[],a=N(e),i=a.length-1,o=0;o<=i;o++)n=o===i?this:this.clone(!0),N(a[o])[t](n),u.apply(r,n.get());return this.pushStack(r)}}));var Ge=new RegExp("^("+re+")(?!px)[a-z%]+$","i"),Fe=function(e){var t=e.ownerDocument.defaultView;return t&&t.opener||(t=n),t.getComputedStyle(e)},Ue=new RegExp(ie.join("|"),"i");function ze(e,t,n){var r,a,i,o,s=e.style;return(n=n||Fe(e))&&(""!==(o=n.getPropertyValue(t)||n[t])||se(e)||(o=N.style(e,t)),!v.pixelBoxStyles()&&Ge.test(o)&&Ue.test(t)&&(r=s.width,a=s.minWidth,i=s.maxWidth,s.minWidth=s.maxWidth=s.width=o,o=n.width,s.width=r,s.minWidth=a,s.maxWidth=i)),void 0!==o?o+"":o}function Ye(e,t){return{get:function(){if(!e())return(this.get=t).apply(this,arguments);delete this.get}}}!function(){function e(){if(u){c.style.cssText="position:absolute;left:-11111px;width:60px;margin-top:1px;padding:0;border:0",u.style.cssText="position:relative;display:block;box-sizing:border-box;overflow:scroll;margin:auto;border:1px;padding:1px;width:60%;top:1%",oe.appendChild(c).appendChild(u);var e=n.getComputedStyle(u);r="1%"!==e.top,l=12===t(e.marginLeft),u.style.right="60%",s=36===t(e.right),a=36===t(e.width),u.style.position="absolute",i=12===t(u.offsetWidth/3),oe.removeChild(c),u=null}}function t(e){return Math.round(parseFloat(e))}var r,a,i,s,l,c=o.createElement("div"),u=o.createElement("div");u.style&&(u.style.backgroundClip="content-box",u.cloneNode(!0).style.backgroundClip="",v.clearCloneStyle="content-box"===u.style.backgroundClip,N.extend(v,{boxSizingReliable:function(){return e(),a},pixelBoxStyles:function(){return e(),s},pixelPosition:function(){return e(),r},reliableMarginLeft:function(){return e(),l},scrollboxSize:function(){return e(),i}}))}();var Je=["Webkit","Moz","ms"],Ze=o.createElement("div").style,Ke={};function $e(e){var t=N.cssProps[e]||Ke[e];return t||(e in Ze?e:Ke[e]=function(e){for(var t=e[0].toUpperCase()+e.slice(1),n=Je.length;n--;)if((e=Je[n]+t)in Ze)return e}(e)||e)}var _e=/^(none|table(?!-c[ea]).+)/,et=/^--/,tt={position:"absolute",visibility:"hidden",display:"block"},nt={letterSpacing:"0",fontWeight:"400"};function rt(e,t,n){var r=ae.exec(t);return r?Math.max(0,r[2]-(n||0))+(r[3]||"px"):t}function at(e,t,n,r,a,i){var o="width"===t?1:0,s=0,l=0;if(n===(r?"border":"content"))return 0;for(;o<4;o+=2)"margin"===n&&(l+=N.css(e,n+ie[o],!0,a)),r?("content"===n&&(l-=N.css(e,"padding"+ie[o],!0,a)),"margin"!==n&&(l-=N.css(e,"border"+ie[o]+"Width",!0,a))):(l+=N.css(e,"padding"+ie[o],!0,a),"padding"!==n?l+=N.css(e,"border"+ie[o]+"Width",!0,a):s+=N.css(e,"border"+ie[o]+"Width",!0,a));return!r&&i>=0&&(l+=Math.max(0,Math.ceil(e["offset"+t[0].toUpperCase()+t.slice(1)]-i-l-s-.5))||0),l}function it(e,t,n){var r=Fe(e),a=(!v.boxSizingReliable()||n)&&"border-box"===N.css(e,"boxSizing",!1,r),i=a,o=ze(e,t,r),s="offset"+t[0].toUpperCase()+t.slice(1);if(Ge.test(o)){if(!n)return o;o="auto"}return(!v.boxSizingReliable()&&a||"auto"===o||!parseFloat(o)&&"inline"===N.css(e,"display",!1,r))&&e.getClientRects().length&&(a="border-box"===N.css(e,"boxSizing",!1,r),(i=s in e)&&(o=e[s])),(o=parseFloat(o)||0)+at(e,t,n||(a?"border":"content"),i,r,o)+"px"}function ot(e,t,n,r,a){return new ot.prototype.init(e,t,n,r,a)}N.extend({cssHooks:{opacity:{get:function(e,t){if(t){var n=ze(e,"opacity");return""===n?"1":n}}}},cssNumber:{animationIterationCount:!0,columnCount:!0,fillOpacity:!0,flexGrow:!0,flexShrink:!0,fontWeight:!0,gridArea:!0,gridColumn:!0,gridColumnEnd:!0,gridColumnStart:!0,gridRow:!0,gridRowEnd:!0,gridRowStart:!0,lineHeight:!0,opacity:!0,order:!0,orphans:!0,widows:!0,zIndex:!0,zoom:!0},cssProps:{},style:function(e,t,n,r){if(e&&3!==e.nodeType&&8!==e.nodeType&&e.style){var a,i,o,s=J(t),l=et.test(t),c=e.style;if(l||(t=$e(s)),o=N.cssHooks[t]||N.cssHooks[s],void 0===n)return o&&"get"in o&&void 0!==(a=o.get(e,!1,r))?a:c[t];"string"===(i=typeof n)&&(a=ae.exec(n))&&a[1]&&(n=fe(e,t,a),i="number"),null!=n&&n==n&&("number"!==i||l||(n+=a&&a[3]||(N.cssNumber[s]?"":"px")),v.clearCloneStyle||""!==n||0!==t.indexOf("background")||(c[t]="inherit"),o&&"set"in o&&void 0===(n=o.set(e,n,r))||(l?c.setProperty(t,n):c[t]=n))}},css:function(e,t,n,r){var a,i,o,s=J(t);return et.test(t)||(t=$e(s)),(o=N.cssHooks[t]||N.cssHooks[s])&&"get"in o&&(a=o.get(e,!0,n)),void 0===a&&(a=ze(e,t,r)),"normal"===a&&t in nt&&(a=nt[t]),""===n||n?(i=parseFloat(a),!0===n||isFinite(i)?i||0:a):a}}),N.each(["height","width"],(function(e,t){N.cssHooks[t]={get:function(e,n,r){if(n)return!_e.test(N.css(e,"display"))||e.getClientRects().length&&e.getBoundingClientRect().width?it(e,t,r):ue(e,tt,(function(){return it(e,t,r)}))},set:function(e,n,r){var a,i=Fe(e),o=!v.scrollboxSize()&&"absolute"===i.position,s=(o||r)&&"border-box"===N.css(e,"boxSizing",!1,i),l=r?at(e,t,r,s,i):0;return s&&o&&(l-=Math.ceil(e["offset"+t[0].toUpperCase()+t.slice(1)]-parseFloat(i[t])-at(e,t,"border",!1,i)-.5)),l&&(a=ae.exec(n))&&"px"!==(a[3]||"px")&&(e.style[t]=n,n=N.css(e,t)),rt(0,n,l)}}})),N.cssHooks.marginLeft=Ye(v.reliableMarginLeft,(function(e,t){if(t)return(parseFloat(ze(e,"marginLeft"))||e.getBoundingClientRect().left-ue(e,{marginLeft:0},(function(){return e.getBoundingClientRect().left})))+"px"})),N.each({margin:"",padding:"",border:"Width"},(function(e,t){N.cssHooks[e+t]={expand:function(n){for(var r=0,a={},i="string"==typeof n?n.split(" "):[n];r<4;r++)a[e+ie[r]+t]=i[r]||i[r-2]||i[0];return a}},"margin"!==e&&(N.cssHooks[e+t].set=rt)})),N.fn.extend({css:function(e,t){return F(this,(function(e,t,n){var r,a,i={},o=0;if(Array.isArray(t)){for(r=Fe(e),a=t.length;o<a;o++)i[t[o]]=N.css(e,t[o],!1,r);return i}return void 0!==n?N.style(e,t,n):N.css(e,t)}),e,t,arguments.length>1)}}),N.Tween=ot,ot.prototype={constructor:ot,init:function(e,t,n,r,a,i){this.elem=e,this.prop=n,this.easing=a||N.easing._default,this.options=t,this.start=this.now=this.cur(),this.end=r,this.unit=i||(N.cssNumber[n]?"":"px")},cur:function(){var e=ot.propHooks[this.prop];return e&&e.get?e.get(this):ot.propHooks._default.get(this)},run:function(e){var t,n=ot.propHooks[this.prop];return this.options.duration?this.pos=t=N.easing[this.easing](e,this.options.duration*e,0,1,this.options.duration):this.pos=t=e,this.now=(this.end-this.start)*t+this.start,this.options.step&&this.options.step.call(this.elem,this.now,this),n&&n.set?n.set(this):ot.propHooks._default.set(this),this}},ot.prototype.init.prototype=ot.prototype,ot.propHooks={_default:{get:function(e){var t;return 1!==e.elem.nodeType||null!=e.elem[e.prop]&&null==e.elem.style[e.prop]?e.elem[e.prop]:(t=N.css(e.elem,e.prop,""))&&"auto"!==t?t:0},set:function(e){N.fx.step[e.prop]?N.fx.step[e.prop](e):1!==e.elem.nodeType||!N.cssHooks[e.prop]&&null==e.elem.style[$e(e.prop)]?e.elem[e.prop]=e.now:N.style(e.elem,e.prop,e.now+e.unit)}}},ot.propHooks.scrollTop=ot.propHooks.scrollLeft={set:function(e){e.elem.nodeType&&e.elem.parentNode&&(e.elem[e.prop]=e.now)}},N.easing={linear:function(e){return e},swing:function(e){return.5-Math.cos(e*Math.PI)/2},_default:"swing"},(N.fx=ot.prototype.init).step={};var st,lt,ct=/^(?:toggle|show|hide)$/,ut=/queueHooks$/;function ft(){lt&&(!1===o.hidden&&n.requestAnimationFrame?n.requestAnimationFrame(ft):n.setTimeout(ft,N.fx.interval),N.fx.tick())}function dt(){return n.setTimeout((function(){st=void 0})),st=Date.now()}function pt(e,t){var n,r=0,a={height:e};for(t=t?1:0;r<4;r+=2-t)a["margin"+(n=ie[r])]=a["padding"+n]=e;return t&&(a.opacity=a.width=e),a}function ht(e,t,n){for(var r,a=(mt.tweeners[t]||[]).concat(mt.tweeners["*"]),i=0,o=a.length;i<o;i++)if(r=a[i].call(n,t,e))return r}function mt(e,t,n){var r,a,i=0,o=mt.prefilters.length,s=N.Deferred().always((function(){delete l.elem})),l=function(){if(a)return!1;for(var t=st||dt(),n=Math.max(0,c.startTime+c.duration-t),r=1-(n/c.duration||0),i=0,o=c.tweens.length;i<o;i++)c.tweens[i].run(r);return s.notifyWith(e,[c,r,n]),r<1&&o?n:(o||s.notifyWith(e,[c,1,0]),s.resolveWith(e,[c]),!1)},c=s.promise({elem:e,props:N.extend({},t),opts:N.extend(!0,{specialEasing:{},easing:N.easing._default},n),originalProperties:t,originalOptions:n,startTime:st||dt(),duration:n.duration,tweens:[],createTween:function(t,n){var r=N.Tween(e,c.opts,t,n,c.opts.specialEasing[t]||c.opts.easing);return c.tweens.push(r),r},stop:function(t){var n=0,r=t?c.tweens.length:0;if(a)return this;for(a=!0;n<r;n++)c.tweens[n].run(1);return t?(s.notifyWith(e,[c,1,0]),s.resolveWith(e,[c,t])):s.rejectWith(e,[c,t]),this}}),u=c.props;for(!function(e,t){var n,r,a,i,o;for(n in e)if(a=t[r=J(n)],i=e[n],Array.isArray(i)&&(a=i[1],i=e[n]=i[0]),n!==r&&(e[r]=i,delete e[n]),(o=N.cssHooks[r])&&"expand"in o)for(n in i=o.expand(i),delete e[r],i)n in e||(e[n]=i[n],t[n]=a);else t[r]=a}(u,c.opts.specialEasing);i<o;i++)if(r=mt.prefilters[i].call(c,e,u,c.opts))return y(r.stop)&&(N._queueHooks(c.elem,c.opts.queue).stop=r.stop.bind(r)),r;return N.map(u,ht,c),y(c.opts.start)&&c.opts.start.call(e,c),c.progress(c.opts.progress).done(c.opts.done,c.opts.complete).fail(c.opts.fail).always(c.opts.always),N.fx.timer(N.extend(l,{elem:e,anim:c,queue:c.opts.queue})),c}N.Animation=N.extend(mt,{tweeners:{"*":[function(e,t){var n=this.createTween(e,t);return fe(n.elem,e,ae.exec(t),n),n}]},tweener:function(e,t){y(e)?(t=e,e=["*"]):e=e.match(M);for(var n,r=0,a=e.length;r<a;r++)n=e[r],mt.tweeners[n]=mt.tweeners[n]||[],mt.tweeners[n].unshift(t)},prefilters:[function(e,t,n){var r,a,i,o,s,l,c,u,f="width"in t||"height"in t,d=this,p={},h=e.style,m=e.nodeType&&ce(e),g=$.get(e,"fxshow");for(r in n.queue||(null==(o=N._queueHooks(e,"fx")).unqueued&&(o.unqueued=0,s=o.empty.fire,o.empty.fire=function(){o.unqueued||s()}),o.unqueued++,d.always((function(){d.always((function(){o.unqueued--,N.queue(e,"fx").length||o.empty.fire()}))}))),t)if(a=t[r],ct.test(a)){if(delete t[r],i=i||"toggle"===a,a===(m?"hide":"show")){if("show"!==a||!g||void 0===g[r])continue;m=!0}p[r]=g&&g[r]||N.style(e,r)}if((l=!N.isEmptyObject(t))||!N.isEmptyObject(p))for(r in f&&1===e.nodeType&&(n.overflow=[h.overflow,h.overflowX,h.overflowY],null==(c=g&&g.display)&&(c=$.get(e,"display")),"none"===(u=N.css(e,"display"))&&(c?u=c:(he([e],!0),c=e.style.display||c,u=N.css(e,"display"),he([e]))),("inline"===u||"inline-block"===u&&null!=c)&&"none"===N.css(e,"float")&&(l||(d.done((function(){h.display=c})),null==c&&(u=h.display,c="none"===u?"":u)),h.display="inline-block")),n.overflow&&(h.overflow="hidden",d.always((function(){h.overflow=n.overflow[0],h.overflowX=n.overflow[1],h.overflowY=n.overflow[2]}))),l=!1,p)l||(g?"hidden"in g&&(m=g.hidden):g=$.access(e,"fxshow",{display:c}),i&&(g.hidden=!m),m&&he([e],!0),d.done((function(){for(r in m||he([e]),$.remove(e,"fxshow"),p)N.style(e,r,p[r])}))),l=ht(m?g[r]:0,r,d),r in g||(g[r]=l.start,m&&(l.end=l.start,l.start=0))}],prefilter:function(e,t){t?mt.prefilters.unshift(e):mt.prefilters.push(e)}}),N.speed=function(e,t,n){var r=e&&"object"==typeof e?N.extend({},e):{complete:n||!n&&t||y(e)&&e,duration:e,easing:n&&t||t&&!y(t)&&t};return N.fx.off?r.duration=0:"number"!=typeof r.duration&&(r.duration in N.fx.speeds?r.duration=N.fx.speeds[r.duration]:r.duration=N.fx.speeds._default),null!=r.queue&&!0!==r.queue||(r.queue="fx"),r.old=r.complete,r.complete=function(){y(r.old)&&r.old.call(this),r.queue&&N.dequeue(this,r.queue)},r},N.fn.extend({fadeTo:function(e,t,n,r){return this.filter(ce).css("opacity",0).show().end().animate({opacity:t},e,n,r)},animate:function(e,t,n,r){var a=N.isEmptyObject(e),i=N.speed(t,n,r),o=function(){var t=mt(this,N.extend({},e),i);(a||$.get(this,"finish"))&&t.stop(!0)};return o.finish=o,a||!1===i.queue?this.each(o):this.queue(i.queue,o)},stop:function(e,t,n){var r=function(e){var t=e.stop;delete e.stop,t(n)};return"string"!=typeof e&&(n=t,t=e,e=void 0),t&&!1!==e&&this.queue(e||"fx",[]),this.each((function(){var t=!0,a=null!=e&&e+"queueHooks",i=N.timers,o=$.get(this);if(a)o[a]&&o[a].stop&&r(o[a]);else for(a in o)o[a]&&o[a].stop&&ut.test(a)&&r(o[a]);for(a=i.length;a--;)i[a].elem!==this||null!=e&&i[a].queue!==e||(i[a].anim.stop(n),t=!1,i.splice(a,1));!t&&n||N.dequeue(this,e)}))},finish:function(e){return!1!==e&&(e=e||"fx"),this.each((function(){var t,n=$.get(this),r=n[e+"queue"],a=n[e+"queueHooks"],i=N.timers,o=r?r.length:0;for(n.finish=!0,N.queue(this,e,[]),a&&a.stop&&a.stop.call(this,!0),t=i.length;t--;)i[t].elem===this&&i[t].queue===e&&(i[t].anim.stop(!0),i.splice(t,1));for(t=0;t<o;t++)r[t]&&r[t].finish&&r[t].finish.call(this);delete n.finish}))}}),N.each(["toggle","show","hide"],(function(e,t){var n=N.fn[t];N.fn[t]=function(e,r,a){return null==e||"boolean"==typeof e?n.apply(this,arguments):this.animate(pt(t,!0),e,r,a)}})),N.each({slideDown:pt("show"),slideUp:pt("hide"),slideToggle:pt("toggle"),fadeIn:{opacity:"show"},fadeOut:{opacity:"hide"},fadeToggle:{opacity:"toggle"}},(function(e,t){N.fn[e]=function(e,n,r){return this.animate(t,e,n,r)}})),N.timers=[],N.fx.tick=function(){var e,t=0,n=N.timers;for(st=Date.now();t<n.length;t++)(e=n[t])()||n[t]!==e||n.splice(t--,1);n.length||N.fx.stop(),st=void 0},N.fx.timer=function(e){N.timers.push(e),N.fx.start()},N.fx.interval=13,N.fx.start=function(){lt||(lt=!0,ft())},N.fx.stop=function(){lt=null},N.fx.speeds={slow:600,fast:200,_default:400},N.fn.delay=function(e,t){return e=N.fx&&N.fx.speeds[e]||e,t=t||"fx",this.queue(t,(function(t,r){var a=n.setTimeout(t,e);r.stop=function(){n.clearTimeout(a)}}))},function(){var e=o.createElement("input"),t=o.createElement("select").appendChild(o.createElement("option"));e.type="checkbox",v.checkOn=""!==e.value,v.optSelected=t.selected,(e=o.createElement("input")).value="t",e.type="radio",v.radioValue="t"===e.value}();var gt,vt=N.expr.attrHandle;N.fn.extend({attr:function(e,t){return F(this,N.attr,e,t,arguments.length>1)},removeAttr:function(e){return this.each((function(){N.removeAttr(this,e)}))}}),N.extend({attr:function(e,t,n){var r,a,i=e.nodeType;if(3!==i&&8!==i&&2!==i)return void 0===e.getAttribute?N.prop(e,t,n):(1===i&&N.isXMLDoc(e)||(a=N.attrHooks[t.toLowerCase()]||(N.expr.match.bool.test(t)?gt:void 0)),void 0!==n?null===n?void N.removeAttr(e,t):a&&"set"in a&&void 0!==(r=a.set(e,n,t))?r:(e.setAttribute(t,n+""),n):a&&"get"in a&&null!==(r=a.get(e,t))?r:null==(r=N.find.attr(e,t))?void 0:r)},attrHooks:{type:{set:function(e,t){if(!v.radioValue&&"radio"===t&&j(e,"input")){var n=e.value;return e.setAttribute("type",t),n&&(e.value=n),t}}}},removeAttr:function(e,t){var n,r=0,a=t&&t.match(M);if(a&&1===e.nodeType)for(;n=a[r++];)e.removeAttribute(n)}}),gt={set:function(e,t,n){return!1===t?N.removeAttr(e,n):e.setAttribute(n,n),n}},N.each(N.expr.match.bool.source.match(/\w+/g),(function(e,t){var n=vt[t]||N.find.attr;vt[t]=function(e,t,r){var a,i,o=t.toLowerCase();return r||(i=vt[o],vt[o]=a,a=null!=n(e,t,r)?o:null,vt[o]=i),a}}));var yt=/^(?:input|select|textarea|button)$/i,At=/^(?:a|area)$/i;function bt(e){return(e.match(M)||[]).join(" ")}function xt(e){return e.getAttribute&&e.getAttribute("class")||""}function wt(e){return Array.isArray(e)?e:"string"==typeof e&&e.match(M)||[]}N.fn.extend({prop:function(e,t){return F(this,N.prop,e,t,arguments.length>1)},removeProp:function(e){return this.each((function(){delete this[N.propFix[e]||e]}))}}),N.extend({prop:function(e,t,n){var r,a,i=e.nodeType;if(3!==i&&8!==i&&2!==i)return 1===i&&N.isXMLDoc(e)||(t=N.propFix[t]||t,a=N.propHooks[t]),void 0!==n?a&&"set"in a&&void 0!==(r=a.set(e,n,t))?r:e[t]=n:a&&"get"in a&&null!==(r=a.get(e,t))?r:e[t]},propHooks:{tabIndex:{get:function(e){var t=N.find.attr(e,"tabindex");return t?parseInt(t,10):yt.test(e.nodeName)||At.test(e.nodeName)&&e.href?0:-1}}},propFix:{for:"htmlFor",class:"className"}}),v.optSelected||(N.propHooks.selected={get:function(e){var t=e.parentNode;return t&&t.parentNode&&t.parentNode.selectedIndex,null},set:function(e){var t=e.parentNode;t&&(t.selectedIndex,t.parentNode&&t.parentNode.selectedIndex)}}),N.each(["tabIndex","readOnly","maxLength","cellSpacing","cellPadding","rowSpan","colSpan","useMap","frameBorder","contentEditable"],(function(){N.propFix[this.toLowerCase()]=this})),N.fn.extend({addClass:function(e){var t,n,r,a,i,o,s,l=0;if(y(e))return this.each((function(t){N(this).addClass(e.call(this,t,xt(this)))}));if((t=wt(e)).length)for(;n=this[l++];)if(a=xt(n),r=1===n.nodeType&&" "+bt(a)+" "){for(o=0;i=t[o++];)r.indexOf(" "+i+" ")<0&&(r+=i+" ");a!==(s=bt(r))&&n.setAttribute("class",s)}return this},removeClass:function(e){var t,n,r,a,i,o,s,l=0;if(y(e))return this.each((function(t){N(this).removeClass(e.call(this,t,xt(this)))}));if(!arguments.length)return this.attr("class","");if((t=wt(e)).length)for(;n=this[l++];)if(a=xt(n),r=1===n.nodeType&&" "+bt(a)+" "){for(o=0;i=t[o++];)for(;r.indexOf(" "+i+" ")>-1;)r=r.replace(" "+i+" "," ");a!==(s=bt(r))&&n.setAttribute("class",s)}return this},toggleClass:function(e,t){var n=typeof e,r="string"===n||Array.isArray(e);return"boolean"==typeof t&&r?t?this.addClass(e):this.removeClass(e):y(e)?this.each((function(n){N(this).toggleClass(e.call(this,n,xt(this),t),t)})):this.each((function(){var t,a,i,o;if(r)for(a=0,i=N(this),o=wt(e);t=o[a++];)i.hasClass(t)?i.removeClass(t):i.addClass(t);else void 0!==e&&"boolean"!==n||((t=xt(this))&&$.set(this,"__className__",t),this.setAttribute&&this.setAttribute("class",t||!1===e?"":$.get(this,"__className__")||""))}))},hasClass:function(e){var t,n,r=0;for(t=" "+e+" ";n=this[r++];)if(1===n.nodeType&&(" "+bt(xt(n))+" ").indexOf(t)>-1)return!0;return!1}});var Nt=/\r/g;N.fn.extend({val:function(e){var t,n,r,a=this[0];return arguments.length?(r=y(e),this.each((function(n){var a;1===this.nodeType&&(null==(a=r?e.call(this,n,N(this).val()):e)?a="":"number"==typeof a?a+="":Array.isArray(a)&&(a=N.map(a,(function(e){return null==e?"":e+""}))),(t=N.valHooks[this.type]||N.valHooks[this.nodeName.toLowerCase()])&&"set"in t&&void 0!==t.set(this,a,"value")||(this.value=a))}))):a?(t=N.valHooks[a.type]||N.valHooks[a.nodeName.toLowerCase()])&&"get"in t&&void 0!==(n=t.get(a,"value"))?n:"string"==typeof(n=a.value)?n.replace(Nt,""):null==n?"":n:void 0}}),N.extend({valHooks:{option:{get:function(e){var t=N.find.attr(e,"value");return null!=t?t:bt(N.text(e))}},select:{get:function(e){var t,n,r,a=e.options,i=e.selectedIndex,o="select-one"===e.type,s=o?null:[],l=o?i+1:a.length;for(r=i<0?l:o?i:0;r<l;r++)if(((n=a[r]).selected||r===i)&&!n.disabled&&(!n.parentNode.disabled||!j(n.parentNode,"optgroup"))){if(t=N(n).val(),o)return t;s.push(t)}return s},set:function(e,t){for(var n,r,a=e.options,i=N.makeArray(t),o=a.length;o--;)((r=a[o]).selected=N.inArray(N.valHooks.option.get(r),i)>-1)&&(n=!0);return n||(e.selectedIndex=-1),i}}}}),N.each(["radio","checkbox"],(function(){N.valHooks[this]={set:function(e,t){if(Array.isArray(t))return e.checked=N.inArray(N(e).val(),t)>-1}},v.checkOn||(N.valHooks[this].get=function(e){return null===e.getAttribute("value")?"on":e.value})})),v.focusin="onfocusin"in n;var Et=/^(?:focusinfocus|focusoutblur)$/,Ct=function(e){e.stopPropagation()};N.extend(N.event,{trigger:function(e,t,r,a){var i,s,l,c,u,f,d,p,m=[r||o],g=h.call(e,"type")?e.type:e,v=h.call(e,"namespace")?e.namespace.split("."):[];if(s=p=l=r=r||o,3!==r.nodeType&&8!==r.nodeType&&!Et.test(g+N.event.triggered)&&(g.indexOf(".")>-1&&(v=g.split("."),g=v.shift(),v.sort()),u=g.indexOf(":")<0&&"on"+g,(e=e[N.expando]?e:new N.Event(g,"object"==typeof e&&e)).isTrigger=a?2:3,e.namespace=v.join("."),e.rnamespace=e.namespace?new RegExp("(^|\\.)"+v.join("\\.(?:.*\\.|)")+"(\\.|$)"):null,e.result=void 0,e.target||(e.target=r),t=null==t?[e]:N.makeArray(t,[e]),d=N.event.special[g]||{},a||!d.trigger||!1!==d.trigger.apply(r,t))){if(!a&&!d.noBubble&&!A(r)){for(c=d.delegateType||g,Et.test(c+g)||(s=s.parentNode);s;s=s.parentNode)m.push(s),l=s;l===(r.ownerDocument||o)&&m.push(l.defaultView||l.parentWindow||n)}for(i=0;(s=m[i++])&&!e.isPropagationStopped();)p=s,e.type=i>1?c:d.bindType||g,(f=($.get(s,"events")||{})[e.type]&&$.get(s,"handle"))&&f.apply(s,t),(f=u&&s[u])&&f.apply&&Z(s)&&(e.result=f.apply(s,t),!1===e.result&&e.preventDefault());return e.type=g,a||e.isDefaultPrevented()||d._default&&!1!==d._default.apply(m.pop(),t)||!Z(r)||u&&y(r[g])&&!A(r)&&((l=r[u])&&(r[u]=null),N.event.triggered=g,e.isPropagationStopped()&&p.addEventListener(g,Ct),r[g](),e.isPropagationStopped()&&p.removeEventListener(g,Ct),N.event.triggered=void 0,l&&(r[u]=l)),e.result}},simulate:function(e,t,n){var r=N.extend(new N.Event,n,{type:e,isSimulated:!0});N.event.trigger(r,null,t)}}),N.fn.extend({trigger:function(e,t){return this.each((function(){N.event.trigger(e,t,this)}))},triggerHandler:function(e,t){var n=this[0];if(n)return N.event.trigger(e,t,n,!0)}}),v.focusin||N.each({focus:"focusin",blur:"focusout"},(function(e,t){var n=function(e){N.event.simulate(t,e.target,N.event.fix(e))};N.event.special[t]={setup:function(){var r=this.ownerDocument||this,a=$.access(r,t);a||r.addEventListener(e,n,!0),$.access(r,t,(a||0)+1)},teardown:function(){var r=this.ownerDocument||this,a=$.access(r,t)-1;a?$.access(r,t,a):(r.removeEventListener(e,n,!0),$.remove(r,t))}}}));var Tt=n.location,Dt=Date.now(),Lt=/\?/;N.parseXML=function(e){var t;if(!e||"string"!=typeof e)return null;try{t=(new n.DOMParser).parseFromString(e,"text/xml")}catch(r){t=void 0}return t&&!t.getElementsByTagName("parsererror").length||N.error("Invalid XML: "+e),t};var kt=/\[\]$/,jt=/\r?\n/g,qt=/^(?:submit|button|image|reset|file)$/i,St=/^(?:input|select|textarea|keygen)/i;function Ht(e,t,n,r){var a;if(Array.isArray(t))N.each(t,(function(t,a){n||kt.test(e)?r(e,a):Ht(e+"["+("object"==typeof a&&null!=a?t:"")+"]",a,n,r)}));else if(n||"object"!==w(t))r(e,t);else for(a in t)Ht(e+"["+a+"]",t[a],n,r)}N.param=function(e,t){var n,r=[],a=function(e,t){var n=y(t)?t():t;r[r.length]=encodeURIComponent(e)+"="+encodeURIComponent(null==n?"":n)};if(null==e)return"";if(Array.isArray(e)||e.jquery&&!N.isPlainObject(e))N.each(e,(function(){a(this.name,this.value)}));else for(n in e)Ht(n,e[n],t,a);return r.join("&")},N.fn.extend({serialize:function(){return N.param(this.serializeArray())},serializeArray:function(){return this.map((function(){var e=N.prop(this,"elements");return e?N.makeArray(e):this})).filter((function(){var e=this.type;return this.name&&!N(this).is(":disabled")&&St.test(this.nodeName)&&!qt.test(e)&&(this.checked||!me.test(e))})).map((function(e,t){var n=N(this).val();return null==n?null:Array.isArray(n)?N.map(n,(function(e){return{name:t.name,value:e.replace(jt,"\r\n")}})):{name:t.name,value:n.replace(jt,"\r\n")}})).get()}});var Bt=/%20/g,Wt=/#.*$/,Ot=/([?&])_=[^&]*/,Pt=/^(.*?):[ \t]*([^\r\n]*)$/gm,Mt=/^(?:GET|HEAD)$/,It=/^\/\//,Qt={},Rt={},Xt="*/".concat("*"),Vt=o.createElement("a");function Gt(e){return function(t,n){"string"!=typeof t&&(n=t,t="*");var r,a=0,i=t.toLowerCase().match(M)||[];if(y(n))for(;r=i[a++];)"+"===r[0]?(r=r.slice(1)||"*",(e[r]=e[r]||[]).unshift(n)):(e[r]=e[r]||[]).push(n)}}function Ft(e,t,n,r){var a={},i=e===Rt;function o(s){var l;return a[s]=!0,N.each(e[s]||[],(function(e,s){var c=s(t,n,r);return"string"!=typeof c||i||a[c]?i?!(l=c):void 0:(t.dataTypes.unshift(c),o(c),!1)})),l}return o(t.dataTypes[0])||!a["*"]&&o("*")}function Ut(e,t){var n,r,a=N.ajaxSettings.flatOptions||{};for(n in t)void 0!==t[n]&&((a[n]?e:r||(r={}))[n]=t[n]);return r&&N.extend(!0,e,r),e}Vt.href=Tt.href,N.extend({active:0,lastModified:{},etag:{},ajaxSettings:{url:Tt.href,type:"GET",isLocal:/^(?:about|app|app-storage|.+-extension|file|res|widget):$/.test(Tt.protocol),global:!0,processData:!0,async:!0,contentType:"application/x-www-form-urlencoded; charset=UTF-8",accepts:{"*":Xt,text:"text/plain",html:"text/html",xml:"application/xml, text/xml",json:"application/json, text/javascript"},contents:{xml:/\bxml\b/,html:/\bhtml/,json:/\bjson\b/},responseFields:{xml:"responseXML",text:"responseText",json:"responseJSON"},converters:{"* text":String,"text html":!0,"text json":JSON.parse,"text xml":N.parseXML},flatOptions:{url:!0,context:!0}},ajaxSetup:function(e,t){return t?Ut(Ut(e,N.ajaxSettings),t):Ut(N.ajaxSettings,e)},ajaxPrefilter:Gt(Qt),ajaxTransport:Gt(Rt),ajax:function(e,t){"object"==typeof e&&(t=e,e=void 0);var r,a,i,s,l,c,u,f,d,p,h=N.ajaxSetup({},t=t||{}),m=h.context||h,g=h.context&&(m.nodeType||m.jquery)?N(m):N.event,v=N.Deferred(),y=N.Callbacks("once memory"),A=h.statusCode||{},b={},x={},w="canceled",E={readyState:0,getResponseHeader:function(e){var t;if(u){if(!s)for(s={};t=Pt.exec(i);)s[t[1].toLowerCase()+" "]=(s[t[1].toLowerCase()+" "]||[]).concat(t[2]);t=s[e.toLowerCase()+" "]}return null==t?null:t.join(", ")},getAllResponseHeaders:function(){return u?i:null},setRequestHeader:function(e,t){return null==u&&(e=x[e.toLowerCase()]=x[e.toLowerCase()]||e,b[e]=t),this},overrideMimeType:function(e){return null==u&&(h.mimeType=e),this},statusCode:function(e){var t;if(e)if(u)E.always(e[E.status]);else for(t in e)A[t]=[A[t],e[t]];return this},abort:function(e){var t=e||w;return r&&r.abort(t),C(0,t),this}};if(v.promise(E),h.url=((e||h.url||Tt.href)+"").replace(It,Tt.protocol+"//"),h.type=t.method||t.type||h.method||h.type,h.dataTypes=(h.dataType||"*").toLowerCase().match(M)||[""],null==h.crossDomain){c=o.createElement("a");try{c.href=h.url,c.href=c.href,h.crossDomain=Vt.protocol+"//"+Vt.host!=c.protocol+"//"+c.host}catch(T){h.crossDomain=!0}}if(h.data&&h.processData&&"string"!=typeof h.data&&(h.data=N.param(h.data,h.traditional)),Ft(Qt,h,t,E),u)return E;for(d in(f=N.event&&h.global)&&0==N.active++&&N.event.trigger("ajaxStart"),h.type=h.type.toUpperCase(),h.hasContent=!Mt.test(h.type),a=h.url.replace(Wt,""),h.hasContent?h.data&&h.processData&&0===(h.contentType||"").indexOf("application/x-www-form-urlencoded")&&(h.data=h.data.replace(Bt,"+")):(p=h.url.slice(a.length),h.data&&(h.processData||"string"==typeof h.data)&&(a+=(Lt.test(a)?"&":"?")+h.data,delete h.data),!1===h.cache&&(a=a.replace(Ot,"$1"),p=(Lt.test(a)?"&":"?")+"_="+Dt+++p),h.url=a+p),h.ifModified&&(N.lastModified[a]&&E.setRequestHeader("If-Modified-Since",N.lastModified[a]),N.etag[a]&&E.setRequestHeader("If-None-Match",N.etag[a])),(h.data&&h.hasContent&&!1!==h.contentType||t.contentType)&&E.setRequestHeader("Content-Type",h.contentType),E.setRequestHeader("Accept",h.dataTypes[0]&&h.accepts[h.dataTypes[0]]?h.accepts[h.dataTypes[0]]+("*"!==h.dataTypes[0]?", "+Xt+"; q=0.01":""):h.accepts["*"]),h.headers)E.setRequestHeader(d,h.headers[d]);if(h.beforeSend&&(!1===h.beforeSend.call(m,E,h)||u))return E.abort();if(w="abort",y.add(h.complete),E.done(h.success),E.fail(h.error),r=Ft(Rt,h,t,E)){if(E.readyState=1,f&&g.trigger("ajaxSend",[E,h]),u)return E;h.async&&h.timeout>0&&(l=n.setTimeout((function(){E.abort("timeout")}),h.timeout));try{u=!1,r.send(b,C)}catch(T){if(u)throw T;C(-1,T)}}else C(-1,"No Transport");function C(e,t,o,s){var c,d,p,b,x,w=t;u||(u=!0,l&&n.clearTimeout(l),r=void 0,i=s||"",E.readyState=e>0?4:0,c=e>=200&&e<300||304===e,o&&(b=function(e,t,n){for(var r,a,i,o,s=e.contents,l=e.dataTypes;"*"===l[0];)l.shift(),void 0===r&&(r=e.mimeType||t.getResponseHeader("Content-Type"));if(r)for(a in s)if(s[a]&&s[a].test(r)){l.unshift(a);break}if(l[0]in n)i=l[0];else{for(a in n){if(!l[0]||e.converters[a+" "+l[0]]){i=a;break}o||(o=a)}i=i||o}if(i)return i!==l[0]&&l.unshift(i),n[i]}(h,E,o)),b=function(e,t,n,r){var a,i,o,s,l,c={},u=e.dataTypes.slice();if(u[1])for(o in e.converters)c[o.toLowerCase()]=e.converters[o];for(i=u.shift();i;)if(e.responseFields[i]&&(n[e.responseFields[i]]=t),!l&&r&&e.dataFilter&&(t=e.dataFilter(t,e.dataType)),l=i,i=u.shift())if("*"===i)i=l;else if("*"!==l&&l!==i){if(!(o=c[l+" "+i]||c["* "+i]))for(a in c)if((s=a.split(" "))[1]===i&&(o=c[l+" "+s[0]]||c["* "+s[0]])){!0===o?o=c[a]:!0!==c[a]&&(i=s[0],u.unshift(s[1]));break}if(!0!==o)if(o&&e.throws)t=o(t);else try{t=o(t)}catch(T){return{state:"parsererror",error:o?T:"No conversion from "+l+" to "+i}}}return{state:"success",data:t}}(h,b,E,c),c?(h.ifModified&&((x=E.getResponseHeader("Last-Modified"))&&(N.lastModified[a]=x),(x=E.getResponseHeader("etag"))&&(N.etag[a]=x)),204===e||"HEAD"===h.type?w="nocontent":304===e?w="notmodified":(w=b.state,d=b.data,c=!(p=b.error))):(p=w,!e&&w||(w="error",e<0&&(e=0))),E.status=e,E.statusText=(t||w)+"",c?v.resolveWith(m,[d,w,E]):v.rejectWith(m,[E,w,p]),E.statusCode(A),A=void 0,f&&g.trigger(c?"ajaxSuccess":"ajaxError",[E,h,c?d:p]),y.fireWith(m,[E,w]),f&&(g.trigger("ajaxComplete",[E,h]),--N.active||N.event.trigger("ajaxStop")))}return E},getJSON:function(e,t,n){return N.get(e,t,n,"json")},getScript:function(e,t){return N.get(e,void 0,t,"script")}}),N.each(["get","post"],(function(e,t){N[t]=function(e,n,r,a){return y(n)&&(a=a||r,r=n,n=void 0),N.ajax(N.extend({url:e,type:t,dataType:a,data:n,success:r},N.isPlainObject(e)&&e))}})),N._evalUrl=function(e,t){return N.ajax({url:e,type:"GET",dataType:"script",cache:!0,async:!1,global:!1,converters:{"text script":function(){}},dataFilter:function(e){N.globalEval(e,t)}})},N.fn.extend({wrapAll:function(e){var t;return this[0]&&(y(e)&&(e=e.call(this[0])),t=N(e,this[0].ownerDocument).eq(0).clone(!0),this[0].parentNode&&t.insertBefore(this[0]),t.map((function(){for(var e=this;e.firstElementChild;)e=e.firstElementChild;return e})).append(this)),this},wrapInner:function(e){return y(e)?this.each((function(t){N(this).wrapInner(e.call(this,t))})):this.each((function(){var t=N(this),n=t.contents();n.length?n.wrapAll(e):t.append(e)}))},wrap:function(e){var t=y(e);return this.each((function(n){N(this).wrapAll(t?e.call(this,n):e)}))},unwrap:function(e){return this.parent(e).not("body").each((function(){N(this).replaceWith(this.childNodes)})),this}}),N.expr.pseudos.hidden=function(e){return!N.expr.pseudos.visible(e)},N.expr.pseudos.visible=function(e){return!!(e.offsetWidth||e.offsetHeight||e.getClientRects().length)},N.ajaxSettings.xhr=function(){try{return new n.XMLHttpRequest}catch(e){}};var zt={0:200,1223:204},Yt=N.ajaxSettings.xhr();v.cors=!!Yt&&"withCredentials"in Yt,v.ajax=Yt=!!Yt,N.ajaxTransport((function(e){var t,r;if(v.cors||Yt&&!e.crossDomain)return{send:function(a,i){var o,s=e.xhr();if(s.open(e.type,e.url,e.async,e.username,e.password),e.xhrFields)for(o in e.xhrFields)s[o]=e.xhrFields[o];for(o in e.mimeType&&s.overrideMimeType&&s.overrideMimeType(e.mimeType),e.crossDomain||a["X-Requested-With"]||(a["X-Requested-With"]="XMLHttpRequest"),a)s.setRequestHeader(o,a[o]);t=function(e){return function(){t&&(t=r=s.onload=s.onerror=s.onabort=s.ontimeout=s.onreadystatechange=null,"abort"===e?s.abort():"error"===e?"number"!=typeof s.status?i(0,"error"):i(s.status,s.statusText):i(zt[s.status]||s.status,s.statusText,"text"!==(s.responseType||"text")||"string"!=typeof s.responseText?{binary:s.response}:{text:s.responseText},s.getAllResponseHeaders()))}},s.onload=t(),r=s.onerror=s.ontimeout=t("error"),void 0!==s.onabort?s.onabort=r:s.onreadystatechange=function(){4===s.readyState&&n.setTimeout((function(){t&&r()}))},t=t("abort");try{s.send(e.hasContent&&e.data||null)}catch(l){if(t)throw l}},abort:function(){t&&t()}}})),N.ajaxPrefilter((function(e){e.crossDomain&&(e.contents.script=!1)})),N.ajaxSetup({accepts:{script:"text/javascript, application/javascript, application/ecmascript, application/x-ecmascript"},contents:{script:/\b(?:java|ecma)script\b/},converters:{"text script":function(e){return N.globalEval(e),e}}}),N.ajaxPrefilter("script",(function(e){void 0===e.cache&&(e.cache=!1),e.crossDomain&&(e.type="GET")})),N.ajaxTransport("script",(function(e){var t,n;if(e.crossDomain||e.scriptAttrs)return{send:function(r,a){t=N("<script>").attr(e.scriptAttrs||{}).prop({charset:e.scriptCharset,src:e.url}).on("load error",n=function(e){t.remove(),n=null,e&&a("error"===e.type?404:200,e.type)}),o.head.appendChild(t[0])},abort:function(){n&&n()}}}));var Jt,Zt=[],Kt=/(=)\?(?=&|$)|\?\?/;N.ajaxSetup({jsonp:"callback",jsonpCallback:function(){var e=Zt.pop()||N.expando+"_"+Dt++;return this[e]=!0,e}}),N.ajaxPrefilter("json jsonp",(function(e,t,r){var a,i,o,s=!1!==e.jsonp&&(Kt.test(e.url)?"url":"string"==typeof e.data&&0===(e.contentType||"").indexOf("application/x-www-form-urlencoded")&&Kt.test(e.data)&&"data");if(s||"jsonp"===e.dataTypes[0])return a=e.jsonpCallback=y(e.jsonpCallback)?e.jsonpCallback():e.jsonpCallback,s?e[s]=e[s].replace(Kt,"$1"+a):!1!==e.jsonp&&(e.url+=(Lt.test(e.url)?"&":"?")+e.jsonp+"="+a),e.converters["script json"]=function(){return o||N.error(a+" was not called"),o[0]},e.dataTypes[0]="json",i=n[a],n[a]=function(){o=arguments},r.always((function(){void 0===i?N(n).removeProp(a):n[a]=i,e[a]&&(e.jsonpCallback=t.jsonpCallback,Zt.push(a)),o&&y(i)&&i(o[0]),o=i=void 0})),"script"})),v.createHTMLDocument=((Jt=o.implementation.createHTMLDocument("").body).innerHTML="<form></form><form></form>",2===Jt.childNodes.length),N.parseHTML=function(e,t,n){return"string"!=typeof e?[]:("boolean"==typeof t&&(n=t,t=!1),t||(v.createHTMLDocument?((r=(t=o.implementation.createHTMLDocument("")).createElement("base")).href=o.location.href,t.head.appendChild(r)):t=o),i=!n&&[],(a=q.exec(e))?[t.createElement(a[1])]:(a=Ee([e],t,i),i&&i.length&&N(i).remove(),N.merge([],a.childNodes)));var r,a,i},N.fn.load=function(e,t,n){var r,a,i,o=this,s=e.indexOf(" ");return s>-1&&(r=bt(e.slice(s)),e=e.slice(0,s)),y(t)?(n=t,t=void 0):t&&"object"==typeof t&&(a="POST"),o.length>0&&N.ajax({url:e,type:a||"GET",dataType:"html",data:t}).done((function(e){i=arguments,o.html(r?N("<div>").append(N.parseHTML(e)).find(r):e)})).always(n&&function(e,t){o.each((function(){n.apply(this,i||[e.responseText,t,e])}))}),this},N.each(["ajaxStart","ajaxStop","ajaxComplete","ajaxError","ajaxSuccess","ajaxSend"],(function(e,t){N.fn[t]=function(e){return this.on(t,e)}})),N.expr.pseudos.animated=function(e){return N.grep(N.timers,(function(t){return e===t.elem})).length},N.offset={setOffset:function(e,t,n){var r,a,i,o,s,l,c=N.css(e,"position"),u=N(e),f={};"static"===c&&(e.style.position="relative"),s=u.offset(),i=N.css(e,"top"),l=N.css(e,"left"),("absolute"===c||"fixed"===c)&&(i+l).indexOf("auto")>-1?(o=(r=u.position()).top,a=r.left):(o=parseFloat(i)||0,a=parseFloat(l)||0),y(t)&&(t=t.call(e,n,N.extend({},s))),null!=t.top&&(f.top=t.top-s.top+o),null!=t.left&&(f.left=t.left-s.left+a),"using"in t?t.using.call(e,f):u.css(f)}},N.fn.extend({offset:function(e){if(arguments.length)return void 0===e?this:this.each((function(t){N.offset.setOffset(this,e,t)}));var t,n,r=this[0];return r?r.getClientRects().length?(t=r.getBoundingClientRect(),n=r.ownerDocument.defaultView,{top:t.top+n.pageYOffset,left:t.left+n.pageXOffset}):{top:0,left:0}:void 0},position:function(){if(this[0]){var e,t,n,r=this[0],a={top:0,left:0};if("fixed"===N.css(r,"position"))t=r.getBoundingClientRect();else{for(t=this.offset(),n=r.ownerDocument,e=r.offsetParent||n.documentElement;e&&(e===n.body||e===n.documentElement)&&"static"===N.css(e,"position");)e=e.parentNode;e&&e!==r&&1===e.nodeType&&((a=N(e).offset()).top+=N.css(e,"borderTopWidth",!0),a.left+=N.css(e,"borderLeftWidth",!0))}return{top:t.top-a.top-N.css(r,"marginTop",!0),left:t.left-a.left-N.css(r,"marginLeft",!0)}}},offsetParent:function(){return this.map((function(){for(var e=this.offsetParent;e&&"static"===N.css(e,"position");)e=e.offsetParent;return e||oe}))}}),N.each({scrollLeft:"pageXOffset",scrollTop:"pageYOffset"},(function(e,t){var n="pageYOffset"===t;N.fn[e]=function(r){return F(this,(function(e,r,a){var i;if(A(e)?i=e:9===e.nodeType&&(i=e.defaultView),void 0===a)return i?i[t]:e[r];i?i.scrollTo(n?i.pageXOffset:a,n?a:i.pageYOffset):e[r]=a}),e,r,arguments.length)}})),N.each(["top","left"],(function(e,t){N.cssHooks[t]=Ye(v.pixelPosition,(function(e,n){if(n)return n=ze(e,t),Ge.test(n)?N(e).position()[t]+"px":n}))})),N.each({Height:"height",Width:"width"},(function(e,t){N.each({padding:"inner"+e,content:t,"":"outer"+e},(function(n,r){N.fn[r]=function(a,i){var o=arguments.length&&(n||"boolean"!=typeof a),s=n||(!0===a||!0===i?"margin":"border");return F(this,(function(t,n,a){var i;return A(t)?0===r.indexOf("outer")?t["inner"+e]:t.document.documentElement["client"+e]:9===t.nodeType?(i=t.documentElement,Math.max(t.body["scroll"+e],i["scroll"+e],t.body["offset"+e],i["offset"+e],i["client"+e])):void 0===a?N.css(t,n,s):N.style(t,n,a,s)}),t,o?a:void 0,o)}}))})),N.each("blur focus focusin focusout resize scroll click dblclick mousedown mouseup mousemove mouseover mouseout mouseenter mouseleave change select submit keydown keypress keyup contextmenu".split(" "),(function(e,t){N.fn[t]=function(e,n){return arguments.length>0?this.on(t,null,e,n):this.trigger(t)}})),N.fn.extend({hover:function(e,t){return this.mouseenter(e).mouseleave(t||e)}}),N.fn.extend({bind:function(e,t,n){return this.on(e,null,t,n)},unbind:function(e,t){return this.off(e,null,t)},delegate:function(e,t,n,r){return this.on(t,e,n,r)},undelegate:function(e,t,n){return 1===arguments.length?this.off(e,"**"):this.off(t,e||"**",n)}}),N.proxy=function(e,t){var n,r,a;if("string"==typeof t&&(n=e[t],t=e,e=n),y(e))return r=l.call(arguments,2),(a=function(){return e.apply(t||this,r.concat(l.call(arguments)))}).guid=e.guid=e.guid||N.guid++,a},N.holdReady=function(e){e?N.readyWait++:N.ready(!0)},N.isArray=Array.isArray,N.parseJSON=JSON.parse,N.nodeName=j,N.isFunction=y,N.isWindow=A,N.camelCase=J,N.type=w,N.now=Date.now,N.isNumeric=function(e){var t=N.type(e);return("number"===t||"string"===t)&&!isNaN(e-parseFloat(e))},void 0===(r=function(){return N}.apply(t,[]))||(e.exports=r);var $t=n.jQuery,_t=n.$;return N.noConflict=function(e){return n.$===N&&(n.$=_t),e&&n.jQuery===N&&(n.jQuery=$t),N},a||(n.jQuery=n.$=N),N}))},N9uB:function(e,t,n){e.exports=n.p+"static/oval-42d6af61463a05fada77c860dcc4beb6.png"},Pxg6:function(e,t,n){e.exports=n.p+"static/layer-6-cee286913526f515fa5cee4a4c7914dd.png"},"Q+dw":function(e,t,n){e.exports=n.p+"static/resrecimg-7189254d49f88a56de5f68b8b5296005.jpg"},Sb2p:function(e,t,n){e.exports=n.p+"static/dots2-2af3f174238dc41c38b31224343f79f3.png"},VcnT:function(e,t,n){e.exports=n.p+"static/p3-93e2e289e11aefa29774adc4db8d937e.png"},dkLQ:function(e,t,n){e.exports=n.p+"static/rectangle-2ec30cae3813f8d35710dcc7c5c07439.png"},"fmF+":function(e,t,n){"use strict";n.r(t);var r=n("q1tI"),a=n.n(r),i=n("Eg6Q"),o=n("fItr"),s=(n("N0jU"),n("ywwV"),n("CV+e"),n("JLc0")),l=n.n(s),c=n("ERsL"),u=n.n(c),f=n("Q+dw"),d=n.n(f),p=function(){return a.a.createElement(a.a.Fragment,null,a.a.createElement("div",{className:"container-fluid who-we-are-main "},a.a.createElement("div",{className:"who-we-are-first-sec flex"},a.a.createElement("div",{className:"who-we-are-text w-1/2 flex justify-center flex-col items-center"},a.a.createElement("div",{className:"who-we-are-vector-styles flex"},a.a.createElement("img",{className:"v1",src:u.a,alt:"vector"}),a.a.createElement("div",null,a.a.createElement("img",{className:"v2",src:u.a,alt:"vector"})),a.a.createElement("img",{className:"v3",src:u.a,alt:"vector"})),a.a.createElement("div",{className:"who-we-are-ambitions"},a.a.createElement("h1",{className:"f-s-62 clr-3a f-fm l-h-1-1 f-w-b who-we-are-heading-main ","data-aos":"fade-right","data-aos-duration":"2000"},"Who We Are"),a.a.createElement("p",{className:"f-s-21 f-w-b f-fm clr-3a mt-4"},"It’s important for us to adhere our values, as they",a.a.createElement("br",null)," embody our agency and define the way that we work."))),a.a.createElement("div",{className:"who-we-are-page-image w-1/2 flex justify-end "},a.a.createElement("div",{className:"resp"},a.a.createElement("div",{className:"imgDiv ml-2"},a.a.createElement("img",{className:"",src:l.a,alt:"side"})))))),a.a.createElement("div",{className:"who-we-are-mob-first-sec"},a.a.createElement("div",{className:"display-image"},a.a.createElement("img",{className:"main-image",src:d.a,alt:"main"})),a.a.createElement("div",{className:"heading-main-who-we-are"},a.a.createElement("h1",{className:"f-s-24 f-w-b clr-half-white f-fm"},"Who We Are"),a.a.createElement("p",{className:"f-s-18 f-fm clr-half-white"},"It’s important for us to adhere our values, as they embody our agency and define the way that we work."))))},h=(n("n+qT"),n("I9ei")),m=n.n(h),g=n("n5Xu"),v=n.n(g),y=n("N9uB"),A=n.n(y),b=n("dkLQ"),x=n.n(b),w=n("vCg7"),N=n.n(w),E=n("Pxg6"),C=n.n(E),T=(n("Bc/y"),function(){return a.a.createElement(a.a.Fragment,null,a.a.createElement("div",{className:" who-we-are-second-part-main mt-40"},a.a.createElement("div",{className:"who-we-are w-full flex"},a.a.createElement("img",{className:"threedott",src:m.a,alt:"dott"}),a.a.createElement("div",{className:"partB-start w-1/2 flex flex-col justify-center ml-24 mt-10","data-aos":"fade-right","data-aos-duration":"2000"},a.a.createElement("div",{className:"diamond-image flex justify-center ml-8"},a.a.createElement("img",{className:"diamond",src:v.a,alt:"diamond"})),a.a.createElement("div",{className:"who-we-are-heading flex justify-center w-2/3 ml-64 "},a.a.createElement("h1",{className:"f-s-18 f-fm clr-5a f-w-b ml-8 our-services-head"},"About Us")),a.a.createElement("div",{className:"who-we-are-headingB flex justify-center"},a.a.createElement("h1",{className:"f-s-42 f-fm f-clr-black f-w-b"},"Insights and resources to help drive your business forward faster.")),a.a.createElement("div",{className:"second-part-text flex justify-center "},a.a.createElement("p",{className:"f-s-18 f-fm clr-rgba-90-9 "},"We build results-oriented brand strategies and continually refine your campaigns for the greatest outcome. From full-scale design interface to development, right through to our precise execution and reporting...that's right, we've got you covered")),a.a.createElement("div",{className:"second-part-text flex justify-center"},a.a.createElement("p",{className:"f-s-18 f-fm clr-rgba-90-9"},"Always aiming high and striving to achieve more; we apply strategy and creativity to everything we do. Approaching challenges with positivity, we are inclusive - sharing our skills and experience with colleagues and clients alike. The Designer is razor sharp, always on it. Commercial, focused and agile."))),a.a.createElement("div",{className:"partB-image w-1/2 mt-48","data-aos":"fade-left","data-aos-duration":"2000"},a.a.createElement("div",{className:"layer-img"},a.a.createElement("img",{src:A.a,alt:"oval"})))),a.a.createElement("div",{className:" why-we-exist mt-64"},a.a.createElement("div",{className:"image-mobile"},a.a.createElement("div",null,a.a.createElement("img",{className:"mobile-image",src:C.a,alt:"mobile-screen"}))),a.a.createElement("div",{className:"why-we-exist w-full flex"},a.a.createElement("div",{className:"animated-images w-1/2"},a.a.createElement("div",{className:"first-image flex justify-end","data-aos":"fade-right","data-aos-duration":"2000"},a.a.createElement("div",null,a.a.createElement("img",{className:"rectangle",src:x.a,alt:"rectangle"}))),a.a.createElement("div",{className:"second-image flex justify-end w-3/4","data-aos":"fade-up","data-aos-duration":"2500"},a.a.createElement("div",{className:"fade-up"},a.a.createElement("img",{className:"square",src:N.a,alt:"square"})))),a.a.createElement("div",{className:"why-do-we-exist-heading w-1/2 ","data-aos":"fade-left","data-aos-duration":"2000"},a.a.createElement("div",{className:"heading-first flex justify-center"},a.a.createElement("div",null,a.a.createElement("h1",{className:"f-s-42 f-fm f-w-b f-clr-black"},"Why Do We Exist?"),a.a.createElement("p",{className:"f-s-18 f-fm clr-rgba-90-9"},"We believe every business owner should have an online presence that clearly expresses their unique offering to the world. We have a unique passion for under represented, minority owned businesses who don’t always have the resources to invest in modern marketing."))),a.a.createElement("div",{className:"heading-second flex justify-center"},a.a.createElement("div",null,a.a.createElement("h1",{className:"f-s-42 f-fm f-w-b f-clr-black"},"How Do We Do It?"),a.a.createElement("p",{className:"f-s-18 f-fm clr-rgba-90-9"},"We pinpoint the objective of each business we work with, build a comprehensive strategy (with specific timelines and budgets in mind) and get to work on making growth goals come true! We mobilize our team to execute on winning you new customers through a measurable and intentional plan of action. Technology and marketing should be fun and easy!"))),a.a.createElement("div",{className:"heading-third flex justify-center"},a.a.createElement("div",null,a.a.createElement("h1",{className:"f-s-42 f-fm f-w-b f-clr-black"},"What Do We Do Best?"),a.a.createElement("p",{className:"f-s-18 f-fm clr-rgba-90-9"},"We craft world-class marketing strategies, custom designed web applications, mobile applications, for multicultural business owners"))))))))}),D=(n("Ry7B"),n("XPtb"),n("n+Mo")),L=n.n(D),k=n("Sb2p"),j=n.n(k),q=n("iTlS"),S=n.n(q),H=n("+10x"),B=n.n(H),W=n("VcnT"),O=n.n(W),P=n("iHjy"),M=n.n(P),I=n("EVdn"),Q=n.n(I),R=function(){return Object(r.useEffect)((function(){var e=0;Q()(window).scroll((function(){var t=Q()("#counter").offset().top-window.innerHeight;0===e&&Q()(window).scrollTop()>t&&(Q()(".counter-value").each((function(){var e=Q()(this),t=e.attr("data-count");Q()({countNum:e.text()}).animate({countNum:t},{duration:2e3,easing:"swing",step:function(){e.text(Math.floor(this.countNum))},complete:function(){e.text(this.countNum)}})})),e=1)}))})),a.a.createElement(a.a.Fragment,null,a.a.createElement("div",{className:"container-fluid progress-rounds-main mt-64"},a.a.createElement("div",{className:"progress-circles-start w-full mt-64 flex"},a.a.createElement("div",{className:"first-two-circles w-1/2 flex justify-center mt-64"},a.a.createElement("div",{className:"background-heart-image"},a.a.createElement("img",{className:"heart-pic",src:L.a,alt:"heart"})),a.a.createElement("div",{className:"c1 absolute "},a.a.createElement("div",{className:"circle-text"},a.a.createElement("div",{style:{marginTop:"5rem"}},a.a.createElement("h",{className:" f-fm f-s-21 f-w-600 f-str-n f-sty-n l-h-1-71 l-spc-n f-clr-black text-center"},"Happy Clients")),a.a.createElement("div",{className:"flex",style:{width:"100%",justifyContent:"center",marginTop:"1rem"}},a.a.createElement("p",{id:"counter","data-count":"150",className:" counter-value f-fm f-s-48 f-w-600 f-str-n f-sty-n l-h-1-71 l-spc-n clr-ef"},"0"),a.a.createElement("span",{className:"plusicon"},"+")))),a.a.createElement("div",{className:"c2"},a.a.createElement("div",{className:"circle-text"},a.a.createElement("div",{style:{marginTop:"5rem"}},a.a.createElement("h",{className:" f-fm f-s-21 f-w-600 f-str-n f-sty-n l-h-1-71 l-spc-n f-clr-black text-center"},"Project Done ")),a.a.createElement("div",{className:"flex",style:{width:"100%",justifyContent:"center",marginTop:"1rem"}},a.a.createElement("p",{id:"counter","data-count":"200",className:" counter-value f-fm f-s-48 f-w-600 f-str-n f-sty-n l-h-1-71 l-spc-n clr-ef"},"0"),a.a.createElement("span",{className:"plusicon"},"+"))))),a.a.createElement("div",{className:"Last-two-circles w-1/2 flex justify-center mt-64"},a.a.createElement("div",{className:"c3 absolute "},a.a.createElement("div",{className:"circle-text"},a.a.createElement("div",{style:{marginTop:"5rem"}},a.a.createElement("h",{className:" f-fm f-s-21 f-w-600 f-str-n f-sty-n l-h-1-71 l-spc-n f-clr-black text-center"},"Referral Rate ")),a.a.createElement("div",{className:"flex",style:{width:"100%",justifyContent:"center",marginTop:"1rem"}},a.a.createElement("p",{id:"counter","data-count":"91",className:" counter-value f-fm f-s-48 f-w-600 f-str-n f-sty-n l-h-1-71 l-spc-n clr-ef"},"0"),a.a.createElement("span",{className:"percent-icon"},"%")))),a.a.createElement("div",{className:"c4"},a.a.createElement("div",{className:"circle-text"},a.a.createElement("div",{style:{marginTop:"5rem"}},a.a.createElement("h",{className:" f-fm f-s-21 f-w-600 f-str-n f-sty-n l-h-1-71 l-spc-n f-clr-black text-center"},"Client Retention ")),a.a.createElement("div",{className:"flex",style:{width:"100%",justifyContent:"center",marginTop:"1rem"}},a.a.createElement("p",{id:"counter","data-count":"86",className:" counter-value f-fm f-s-48 f-w-600 f-str-n f-sty-n l-h-1-71 l-spc-n clr-ef"},"0"),a.a.createElement("span",{className:"percent-icon"},"%")))),a.a.createElement("div",{className:"background-heart-image"},a.a.createElement("img",{className:"dotts-pic",src:j.a,alt:"dot"}))))),a.a.createElement("div",{className:"our-team w-full pb-64 mt-32"},a.a.createElement("div",{className:"our-team-heading flex justify-center mt-16"},a.a.createElement("div",null,a.a.createElement("h2",{className:"f-s-42 f-clr-black f-w-b f-fm "},"Our Team"))),a.a.createElement("div",{className:"our-team-images flex justify-center"},a.a.createElement("div",{className:"our-team-sec-start flex w-4/5 justify-between"},a.a.createElement("div",{className:"first-team-member"},a.a.createElement("img",{className:"",src:S.a,alt:"first"}),a.a.createElement("h1",{className:"f-s-24 f-w-b f-clr-black text-center"},"Afzal Tanoli"),a.a.createElement("p",{className:"f-s-16 f-fm clr-rgba-239-8 text-center"},"CEO")),a.a.createElement("div",{className:"second-team-member"},a.a.createElement("img",{className:"",src:B.a,alt:"first"}),a.a.createElement("h1",{className:"f-s-24 f-w-b f-clr-black text-center"},"Akash Qureshi"),a.a.createElement("p",{className:"f-s-16 f-fm clr-rgba-239-8 text-center"},"CEO & Full Stack Developer")),a.a.createElement("div",{className:"third-team-member"},a.a.createElement("img",{className:"",src:O.a,alt:"first"}),a.a.createElement("h1",{className:"f-s-24 f-w-b f-clr-black text-center"},"Muqarrab Khan"),a.a.createElement("p",{className:"f-s-16 f-fm clr-rgba-239-8 text-center"},"CEO & UI/UX Designer")),a.a.createElement("div",{className:"fourth-team-member"},a.a.createElement("img",{className:"",src:M.a,alt:"first"}),a.a.createElement("h1",{className:"f-s-24 f-w-b f-clr-black text-center"},"Sher Hassan"),a.a.createElement("p",{className:"f-s-16 f-fm clr-rgba-239-8 text-center"},"Brand Identity")))),a.a.createElement("div",{class:"container-fluid carusal"},a.a.createElement("div",{id:"myCarousel",class:"carousel slide","data-ride":"carousel"},a.a.createElement("ol",{class:"carousel-indicators"},a.a.createElement("li",{"data-target":"#myCarousel","data-slide-to":"0",class:"active"}),a.a.createElement("li",{"data-target":"#myCarousel","data-slide-to":"1"}),a.a.createElement("li",{"data-target":"#myCarousel","data-slide-to":"2"})),a.a.createElement("div",{class:"carousel-inner"},a.a.createElement("div",{class:"item active"},a.a.createElement("img",{src:S.a,alt:"Los Angeles",style:{width:"100%"}}),a.a.createElement("h1",{className:"f-s-24 f-w-b f-clr-black text-center"},"Afzal Tanoli"),a.a.createElement("p",{className:"f-s-16 f-fm clr-rgba-239-8 text-center"},"CEO")),a.a.createElement("div",{class:"item"},a.a.createElement("img",{src:B.a,alt:"Chicago",style:{width:"100%"}}),a.a.createElement("h1",{className:"f-s-24 f-w-b f-clr-black text-center"},"Akash Qureshi"),a.a.createElement("p",{className:"f-s-16 f-fm clr-rgba-239-8 text-center"},"CEO & Full Stack Developer")),a.a.createElement("div",{class:"item"},a.a.createElement("img",{src:O.a,alt:"New york",style:{width:"100%"}}),a.a.createElement("h1",{className:"f-s-24 f-w-b f-clr-black text-center"},"Muqarrab Khan"),a.a.createElement("p",{className:"f-s-16 f-fm clr-rgba-239-8 text-center"},"CEO & UI/UX Designer")),a.a.createElement("div",{class:"item"},a.a.createElement("img",{src:M.a,alt:"New york",style:{width:"100%"}}),a.a.createElement("h1",{className:"f-s-24 f-w-b f-clr-black text-center"},"Sher Hassan"),a.a.createElement("p",{className:"f-s-16 f-fm clr-rgba-239-8 text-center"},"Brand Identity"))),a.a.createElement("a",{class:"left carousel-control",href:"#myCarousel","data-slide":"prev"},a.a.createElement("span",{class:"glyphicon glyphicon-chevron-left"}),a.a.createElement("span",{class:"sr-only"},"Previous")),a.a.createElement("a",{class:"right carousel-control",href:"#myCarousel","data-slide":"next"},a.a.createElement("span",{class:"glyphicon glyphicon-chevron-right"}),a.a.createElement("span",{class:"sr-only"},"Next"))))))};t.default=function(){return a.a.createElement("div",null,a.a.createElement(i.a,null),a.a.createElement(p,null),a.a.createElement(T,null),a.a.createElement(R,null),a.a.createElement(o.a,null))}},iHjy:function(e,t,n){e.exports=n.p+"static/p4-5fd65e95ac65d6befcf2b0110c65e798.png"},iTlS:function(e,t,n){e.exports=n.p+"static/p1-99a43eb0dc3d2eb619b78934afd7ad28.png"},"n+Mo":function(e,t){e.exports="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAm8AAAHeCAYAAADNSOzFAAAAGXRFWHRTb2Z0d2FyZQBBZG9iZSBJbWFnZVJlYWR5ccllPAAAItNJREFUeNrs3Ydzntd1J+BLsHdRrOpUs2pULMley3Zsx3ZiJ5nZ2Tb7J+5mnczGu5NxNo5tSbbVOyVKVKVESuxVFEmw7j16LyyQBsDvA77ylueZOQOIYgHOi/LDrYsuHfgs0Xkrci3JtbTUkmk1mwvT6nyui7nO5ZrMdVlLAWA4lmhB603kWpdrbanVJaytzLWqvL5ogP/e5RLgzk6rL3OdmlanPRYAEN5IaU2ujbmuz7W+hLZVAw5n17KoBMIVc/yeSyXEnch1fFqd8ggB4BrfaE2bNjp4b8m1aVpgW9bw9+lCCXGHp9UZjxoArgwANMNECWnbcm0tr0+08ONxU6kpp0uIO5Rrf66TPhQA6DIjb/UWmwduynVzrhtzLdaSr0biDpTal4zMASC8MWYrSliLilG2RVoyp5hmjQ/ivbmOagcAwhujENOfMcJ2p8C2IGdLkNuTqpG5S1oCgPDGIG3IdUeu23It146BijPnPi11MDl7DgDhjXlaXMLaN0p4Y/jOlhD3cTK1CoDwRo9WlMB2VzLKNk6xRu6jXLtTdZgwAAhvXCFuNbg31+3JbtE6ifVw8cH/QaqOIAEA4U1oSw/k2p5sQKi7L3K9n6oRufPaAYDw1i1xRdVflNBGs8QND7tzvZeqq7sAQHhrsVjT9mCqjvuY0I7G+zzXO6naqQoAteF6rIWLdWz35Lo/VTci0A43ljpSQlwcAuy4EQDGzsjbwtyS69Fcq7Wi9WJd3M5UHTcixAEgvDVMbEZ4LNcNWtE5p3K9LcQBILw1Q6xli+nRB5J1bUKcEAeA8FZrm3N9K9c6rWCak7neTNV9qgAgvNVAbEh4OFWbEmA2sbHh9WR3KgDC21htyvXtZLSN3u3L9VpyThwAQ+KokFlCbarWtT2Y3I5Af2ITy7ZUXbu1I7k/FYBBhxQjb38mjv14MlWjbrAQ50qAi6u3bGoAQHgbgptTNU26TCsYoNjU8HKuA1oBgPA2GHHsR2xKuFcrGKJPcr2a66xWADBf1rxVd5J+L1VHgcAw3ZaqK7feSNWaOFOpAPSt6yNvG3N9P9dKHwqM2NFcL+Q6rhUACG+9uT1Vh+66KYFxiZG3t0td0g4AhLfZxfq2+z1+aiLOhItRuCNaAYDwdqW4LeE7uW7x6KmZGIXblaqrti5qBwCz6dKGheW5/jI5v42a/iCVqt3Occjvc7mOaQkAM+nKeq84ePcnghsNsD7XX6dqWt/tHgB0MrzFN8OfJveT0qzPy1iX+eNca7QDgC6Ft+vLN0BHgdBEcfbgz3LdqhUAdCG8bcn1V6la6wZNtTTXd1N1rM1i7QCgreFta64flm980AZ35vqbXNdpBYDw1sbg9oNklIL2mdrMcLtWAAhvbbFFcKPl4mP7P+R6IrkdBEB4a7iYTvpLwY2OuCtVu6hXawWA8NZEK1K1q9QaN7okdlPHbtRtWgEgvDXtffh5rmUeJx0UH/c/zHWPVgAIb03xd6kaeYOuipsYvpmqtXDWwQEIb7UWa36cQA+V2IUa18A5lBpAeKulh5K7SuFqG1N1HtxGrQAQ3uokFmo/4PHBjGLkLUbgnAcHILzVwpJUjSwAc39uxxq4WAu3SDsAhLdx+m8eG/QsdqH+MNmNDSC8jcnfeWTQtzgHzuYeAOFt5GKN2zqPDOYlPndiucEWrQAQ3kYhLuR+yOOCBYmp0x/lukMrAIS3Yb+Nf+tRwcA+n76d65FkIwOA8DYk/91jgoG7L9f3U7V7GwDhbWD+yiOCobkpVefBrdIKAOFtEG7NtdUjgqHakNzIACC8DUAsrP6uxwMjsSLXj3PdohUAwtt8/RePBkZqca7v5bpfKwCEt379vccCY/NwqnajTmgFgPDWi7jKZ63HAmMV58DFeXCu1AIQ3uYUO96+6ZFALcRNDH/thykA4W02cViog3ihXiK4xZ2om7UCQHi7WpznttTjgNpZXj4/t2sFgPA25bbksmyo+9eJ7yT3CwMIb6kabXvCY4BGeCBV5y8u1gqA7oa3J5PpUmiSuPkkplFXaAVA98JbfBO40SOAxtmUqp2o67QCoDvhLUbbHAsCzbW6BLhtWgHQjfD2YK6V2g+NFj+E/TDXXVoB0O7wtj5VNykAzRdnNMamo0fL6wC0MLw94Ys8tM69ub6fa4lWALQrvN2enNYObXVTqm5kWK0VAO0Ib7E+5hEth1a7LlUbGTZpBUDzw9v9ydlQ0AXxef7j5EotgEaHt1XJJgXo2teWuFLr4WSNK0Ajw1t8AXelDnRPjLjbyADQsPC2IZk+gS6zkQGgYeHtUW2GzpvayGC3OUDNw1vcXbpVm4FUbWSIS+3v1gqA+oa3h7QYuOprzuO5vpXGdz0fgPA2i5tTtd4N4Gp3puo4EXccA9QkvMXRAEbdgLnEQb4/Sw70BahFeLs1VRfQA8xl6kDfO7UCYHzhLUbdHtRaoI+vQ7EG7vFkHRzAWMJbjLqt01qgT7ELNXajWgcHMMLwZtQNWIg4By7WwW3RCoDRhLc4Sd2oG7AQU+fB3acVAMMPb77YAoMQo/iPpOpe1KXaATCc8LYp2fIPDFacFxnTqNdrBcDgw5tRN2AY1qTqYvtvaAXA4MLb2vITMsCwvlY9lut7yTQq4AviQNyrlcAI3JLr58kSDUB4W5DluW7XSmBEVuf6Sa4HUrWxAUB469NduRZrJTBCU/cnx9Vaq7QDEN76+wJ6lzYCYxKH+sY06q1aAQhvvbnRT73AmC3L9d1cT5bXAYS3OdythUBN3Jbrb3Nt0wpAeJtZnL10gxYCNRKX2v8o1+O5lmgHILxdyVo3oK5iViBG4bZqBSC8ff3n7tA+oMbiSJG44P6J5GBfQHhLN6XqfDeAuotZghiFu1ErgDaY75oQh/ICTRK74n+Q69Ncr+Y6oyV08Pt9rAmNgZdl0ypGpSfSlaPT8XocBXYp14Xy8mJ5/VypyVxnc50uv07Nw9sKP8ECDRXnwcVGqzdyfZDrspbQEhHK1qXqrvHV02pl+eFlmIfpny8h7otcp8rLE7mOl/9HDcLbbcmVNEBzxahC7EaNdbsv5TqqJTREfO+Nkx7WTwtq60otG/Pn1PpSV/uyfI5FHS4vjdQt9APh0oHP+v0zcZr5dVoHtMSHud5M1RQQ1MVECUMbrqqmH4ETo91Hch0odShV07IMMbxdV8IbQJvE1M6OXO8lU6mMR4yebcq1sdT6NJj7x+suRuH254ow8rkfooYT3r6Z6x5tA1rqZK7XyzcSGJal00La5vLS1W7VD04HU7WxaE+qNkUwgPD2H5O7TIH2i28gryXr4RiMCGZbSlCLlzH9ae343C6VH6I+TtWInBHxeYa3GM79qZYBHfJJqqZTv9AK+rC8hLSpsk58YeJonw9KmVbtM7w9muteLQM6OALwUa63U3UcAlwtNhHEqNq2UsLa8D4XYzr1nVzHhLfemDIFhDghjmrKM2ajtpawFmvWJrRlpPaXz8WDwtvsTJkCfB3i4niRd1N1ICndsL4Eta2llmhJLUR429G1ENdreDNlCnClWEAdUzg7U8encFpqVfp6GjTC2gotqbU4My6uvjsuvH3NlCnA7GIK553ykmZalr4eVYvAtlZLGimWNsT1d63e2NDLsO8GwQ1gTlMjNLEr9f3yDcSdjvUWd31umvbsHN/RDnHtXVzjuaP8QNVKvYy8PZDrIR8PAD2LU+N3lyB3XDvq8f2uBLSpsBa7Q20yaLc4dPs3qYWjcL2Et5+Wn04A6F+sh4uRuDgzzonxo7U2XbnJwC0G3fRKqq6+60x4i4MG/1MylAywUNNPjN+XXMY9rLA2/XBcS36YEsf7/J/UkpsarhXetuf6jmcOMFDnS5DbU4LcRS2Zl3VXhbWVWsI1/DK14JzGa4W3J1O18A+A4Yj1cZ+XiiDn+p+ZxQaDOAx307Rari3MQ+OnUecKbzFV+p+TNQIAo3S0hLiow6m7F3LHFGhsMNhcgprdoAxSbCT6VRvD2/W5/sbzBRibiyXAHSoVr19o2fu4qAS160tdV14u9fgZgV808XNqrvB2f66HPVeA2rhcRgyOTav47yacKRchbXWqrplaW17GmrUYUVvs0TJG/15+OGqMuQ7p3eJ5AtQuAG0oNV3csXqyvPxi2ssv02h3tUYIW1VC2prycm2pCGrOVaOOfpKqNXCvND28xReIzZ4nQCOsKTWTyVKny8szqRqpi4rpoovlZfz35Vm+Hywt3y8Wl5exUWBZeRm1spSpTprqG7luzfXPqQHrTGebNt1ckigAQFdEcPunVPOlCLMNYZsyBQC6Jkaa/2v686UJjQhvWz0/AKCjfpaqqdTGhLdIne4yBQC67LFcTzQlvMUZO7ZtAwBdd1cdA9xM4c2oGwDA1wHusbqHt42eEwDAn8T6t4fqHN42eEYAAFd4INd9dQxv8d/rPR8AgD/zSK7tdQtvsVlhkWcDADCjb+faVrfwBgDA7Nnpe2mMM5VXhzdTpgAAc4t7fH+Qqrt9xx7e1nkeAADXtDrX99Pst1WNLLwZeQMA6M3mXI+OM7wtLikSAIDexBlw28cV3tboPwBA376VRrjpc3p4W6v3AAB9i9nLJ9OI7oafHt5MmQIAzE/sGxjJHahG3gAABuPOXLeMMryt0nMAgAWJ9W8rhDcAgGZYlqortIQ3AICGuDHXHcMOb7E7YrleAwAMRBzeO5Tp06nwZtQNAGBwYvr08WGGtxV6DAAwULHz9EbhDQCgOWL0baCH906FN+vdAAAGLy5BuH8Y4c3IGwDAcER4G9gd8sIbAMBwRd56eNDhbam+AgAMza25Ng8yvC3TUwCAoXpEeAMAaI5NuW4eVHgzbQoAMHwP5VokvAEANMP6XLcJbwAAzfFAWsDo2/SL6QEAGL51qdp9uqDwNqGPAAAj8+BCwpudpgAAoxWjbzfNN7wBADB69803vNmsAAAwenHjwqb5hDcAAMbjHuENAKA5bsm1st/w5pgQAIDxiPPe7hbeAACa487Ux2yoaVMAgPFakfo4NkR4AwAYv7v6CW8X9QsAYKy25VotvAEANMf2XsMbAADjd7vwBgDQHGtzbewlvJk2BQCoh9t6CW+X9AkAoBZuTdXBvXOGNyNvAAD1EFdlbRLeAACa45Zrhbdg6hQAoB5u7iW8ndcnAIBaiMN6NwhvAADNcZPwBgDQHDcKbwAAzXF9ruXCGwBAM8RZb9vmCm/n9AgAoFbmDG+T+gMAUCtbhTcAgOaII0PWCG8AAM2xdbbwdlZvAABqZ9Ns4c3IGwBA/WyeLbyd1hsAgNpZm646720qvJ3RGwCAWto0U3i7nKx7AwCoo+tnCm/B1CkAQP1sFN4AAJrDyBsAQIPEhoVVM4W3U3oDAFBL1wlvAADCGwAAwhsAQLetmym8XUwO6wUAqKO1M4W3cFJvAABqZ0kqO06vDm8n9AYAoJbWCm8AAM2xeqbwZtoUAKCeTJsCADTIjCNvk6UAAKiXGUfewlG9AQCondXCGwBAc8w68nZcbwAAamdxruVG3gAAmmPlTOEt7jg9rzcAALWzbGKW/3FEbwAAamf5bOHtsN4AANTOrCNvh/QGAKB2ls41bXpZfwAAamXWadPYsOCqLACAepl12jSYOgUAaFB4O6A/AAC1MjFXeDuoPwAAtbJ4rvA2meuYHgEANCO8hf16BABQH8IbAEBzXHPkLXacXtQnAIBmhLcIbnadAgDUxEQPv+czbQIAqIXFwhsAQHNc7CW8ncl1VK8AAMZvosffZ/QNAGD8LvYa3vboFQBAc8LbiVIAAIzP5Yk+fvMn+gUAMFbnhTcAgAbpJ7ydSnadAgCM04WJPv/Abj0DAGhWeLukbwAAY3G+3/A2mZz5BgDQmPAWPtQ3AICxODef8LY/12m9AwAYucn5hLfLyegbAEBjwlsq4c3GBQCA0To73/B2Jten+gcAMFLzHnkLu/QPAGBkYtbz7ELCW9y2cEgfAQBGImY+08QC/xKjbwAADQpve1N15ykAAMN1ehDhLY4NeVsvAQCG7stBhLfwcTL6BgAwbKcGFd6MvgEANCi8hRh9c2UWAMDwDGzaNMTo21t6CgAwFJG1BjryFj7KdUJvAQAG7osS4AYa3uIvfE1vAQAG7uTUKxMD/ov35dqvvwAAzQhvwegbAMBgnRhmeDue60M9BgAYaL4aWngLb+Sa1GcAgAWLfQVDnTZNJbiZPgUAWLiYMr007PAW4uDeg/oNALAgx6f/x8SQ/7GXpidFAAD6dmSU4S3mZ917CgAwf0dHGd5SCW9H9R0AoG+xWeHYqMNb/KPPJ9OnAAD9ivVuF0cd3kLskrD7FACgP4ev/oWJEf7j7+X63DMAAOjZoXGGtxDTp2c8BwCAnox15C3E4b1/SNU6OAAAZhcDXl+OO7xNJUjr3wAA5nZgpl+cGNMbsytVNzAAADCzg3UKbyFuXzjiuQAAzKhWI28hzix5Js0wlwsA0HGRj07VLbyFs7meznXeMwIA+JN9s/2PiRq8cXGAb4zAXfScAAC+sr/O4S3EgrxnkyNEAAAuNyG8hb2pOsRXgAMAuixuVTjfhPAWdqdqFyoAQFd9Ntf/XFLDN/jD8vKJXIs8PwCgY/bO9T8navpGR4AzhQoAdE1s5DzVxPAWdqfqHlS7UAGArth7rd8w0YB34KnkHDgAQHhrRHgLcYzIr5ObGACAdjuT62gbwluI+d9/y3XYcwUAWmpvL79pokHvUFyl9ZtcH3m2AEAL7WlbeAuXcr2Q6+XyOgBAG8Qg1cE2hrcp76dqHdwpzxoAaIFPUo9HpE00+J2MBX3/mua4+wsAoEHhrScTDX9H4wiRP+Y67ZkDAA31Ra4jXQlv4VwJcG5jAACa6JN+fvNES97pOELkdc8eAGig3V0Mb+HdXJ97/gBAg8Qa/i+6Gt5CXGZ/xscBANAQu/v9A20Lb5PJ+jcAoBkuCW+VQ7ne9PEAANRc3KgwKbxVduba52MCAKixD+fzhyZa3JDnkvVvAEA9xS1RB4S3K1n/BgDU1Qfz/YMTLW9MrH/b4eMDAKiRGFj6WHib3dvJ+jcAoD725jorvM3t2eT+UwCgHj5YyB/uSniL+0//kKrzVAAAxuVkrv3CW2+OJPefAgDjtWuhf8FEBxu2x8cNADAGMRP48UL/kokONu6F1OcFsAAAAxBr3S4Kb/07n6r1bxd9DAEAIxLHg7w/iL9ooqMNPJ7rZR9HAMCIfJoGdPLFRIeb+FEpAIBh2zWov2ii442M0bcTPp4AgCE6nKpTL4S3AYh1b79P1To4AIBheGuQf9mEfn618/QFbQAAhuBYGvA1ncJbJc5+e0cbAIAB2znov1B4+9obuQ5qAwAwIHEV1sAvBxDevhbnr/wx1xmtAAAG4J2SL4S3ITqbXGAPACzcl2kAV2EJb72J7byvagMAsACx1u3yMP5i4W1mcX3Fbm0AAOYhblIY2kUAwtvsXkzVNVoAAP2Ic92GtgRLeJvd1AG+57QCAOjRqTTk6zeFt2s/gOe1AQDo0ZtpSGvdhLfefZbrbW0AAK4hllt9Oux/RHjrzY5cn2sDADCHoY+6CW+9iwfxXKruQQUAuNqRVM3WDZ3w1rvYuBAbGC5oBQBwlddH9Q8Jb/05ketZbQAApomlVSO7H114618Mib6lDQBAqpZWvTbKf1B4m58daUTz2gBArcWtTCeFt2Z4btQPCwColVgPP/LZOOFt/s6nagPDea0AgE6Kc2AnhbdmiZG357QBADonjg97bxz/sPC2cLH2bYc2AECnxNEgl8bxDwtvgxHz3Xu1AQA64cA4v+8Lb4MT06fHtAEAWi1G214e5xsgvA1O3LzwTK6zWgEArfVuGvNpE8LbYJ1O1Q7US1oBAK38Pj/2g/qFt8E7nOsFbQCA1nkl10XhrZ1253pHGwCgNeL+0lpsThTehueN5AotAGiDGG17pS5vjPA2PHFRbexAPaEVANBocZPCKeGtG+LqrNiBOqkVANBIMQhTq6VQwtvwRVL/Q7IDFQCaJmbRnq/b93DhbTQOphrNlQMAPdmV62jd3ijhbXQ+SNXBfgBA/cXF82/W8Q0T3kYrLrF1ByoA1N+LqQZnuglv4xdz58+mGg7BAgB/8n6qljzVkvA2epHin07VFRsAQL3E9+fX6vwGCm/jEZfXP5Wqo0QAgPqIGbKLdX4DhbfxiXNj4giRy1oBALXwXq5DdX8jhbfx2p/rJW0AgLE7mRpyrJfwNn4fJpfYA8A4xSzYr5vyxgpv9RBHiHyqDQAwFrHO7ZzwRr/i+o3D2gAAI3UgNWwARXirj9jZ8vtU3YUKAAzfhVy/bdobLbzVy9QRIpNaAQBD98smvtHCW/3EXWrPpJqfMQMADRffa8818Q0X3uop1r79MTkDDgCG4bNSjSS81fsD62VtAICBupSqUbfGEt7q7YNcb2sDAAzML5r+Dghv9fdmro+0AQAW7FepGnkT3hi6F1OD5+YBoAZ25jrehndEeGuG2LgQGxgc4gsA/TuT6422vDPCW3PE0SFP5zqhFQDQl1+26Z0R3polzqN5KtdprQCAnvxTatnRW8Jb80Rw+11yCwMAXEtcO3mube+U8NZMJ1M1AndeKwBgRnty7W3jOya8NdfR8hPFJa0AgCvENOkf2vrOCW/NdiC5RgsArvYPbX7nhLfmiyHhF7UBAL7yf1PLBzWEt3aIGxjcgwpA172S61Tb30nhrT3eTy06gBAA+hQH2b/XhXdUeGuXnaUAoEviIPtfd+WdFd7aJ0bf3tcGADrkH7v0zgpv7RTr3z7WBgA64F9Sx47NEt7a64Vcn2gDAC32aqoOru8U4a29Ypv0c6mlp0sD0Hlx1umuLr7jwlv7A1wc4vuZVgDQIjHa9tuuvvPCW/vFOoC4ImSfVgDQAmdz/arLDRDeuhPg4h7UA1oBQIOdL8Gt0/d6C2/dEWfgPC3AAdBQEdieStXIW6cJbwIcADTBS6m6RaHzhDcBDgDq7s1U3eON8CbAaQUANRfHgbytDcIbAhwA9Re3Bb2qDcIbfx7gHCMCQN3EIfMvaIPwxswB7pnkIF8A6mN/qg6Zv6wVwhszmzrIV4ADYNxiR+kzqeNnuQlv9BPgXGYPwLgcT9VZbhe1Qnij9wAXl9l/rBUAjNiJVN1Xel4rhDf6E+sLns/1vlYAMCKnUjXiNqkVwhvz93KundoAwAiC229yndYK4Y2Fe6MUAAhuwhsNEaNvr2gDAIKb8EZzvJeqjQy2bQMguAlvNMTuXL9Ptm8DILgJbzTG57l+l2zjBmB+vhDchDdG71Cuf891VisA6EOc4/ZrwU14YzyOl0/AL7QCgB4cSdWIm3PchDfG6FQJcEe1AoA5xIzN7wQ34Y16mCwBbp9WADCDA8laaeGN2onjQ57K9ZFWADDNZ8kl88IbtfZCrre0AYDyA/3vk/NBhTdqb0euP/pkBei0neUH+staIbzRDJ/m+pdkfQNAF72a3IktvNFIsRP1fydn+QB0Rcy4PJtrl1YIbzRXLFD9Za5jWgHQahdyPZ3rE60Q3miHf03VWjgA2ieOjIrDd/drhfBGu7xVQhwA7XEy178lh7ULb7RWTJ/+r2QnKkAbxOG7cUj7Ka0Q3mi3WAf3D6m6GxWAZooz3J7KdU4rhDe641e53tQGgMaJY0DiDDezKMIbHfR2qs6DA6D+YuYkDmHfqRXCG90Wi11jGtUp3AD1dTbXb1N1CDvCG3w19P4/cx3UCoDaiZ2k/y/XYa2oj0WXDnymC9TFHbm+rQ0AtbA714upmjKlRoy8USexg+mftQFgrGIpy2u5nhPchDfoxZlUTaO62B5g9OL4j7jq6l2tEN6g35/6/jHXh1oBMDJTNybs04p6s+aNutuc6yfaADBUsZM01reZ9WgAI2/U3aFcv9AGgKGIHf+vpuoMN8FNeIOBuZDrf6TqflQABiPWGP8m1y6taBbTpjTNXbme0AaABdmf69lck1rRPEbeaJoPkuNEABbirVRdLC+4CW8wMlPHiZzVCoCeTZbQtiO5lrDRTJvSdA/nul8bAOYU06TP+aFXeIO6WJ/rZ8lIMsDVYoTtjVzvaIXwBnUTwe3nudZpBcBXTqXqCJCjWiG8QZ2ZRgWoLpV/OTm7TXiDhohp1B/lWqkVQMecL6Ftt1YIb9C4j+1c38l1m1YAHXEg1/O5TmuF8AZNdmuqDvVdphVAS13M9Xqu97SiG5ZoAS33aflp9Fu5btYOoGViM0IcAXJSK7rDyBtdsj3XY8koHNB8cQRI3JTwdnLgbucYeaNLdqfqoMrHc92iHUBDHU/V2rZjWtFNRt7oqltLiFuuFUBDXErVaNs75XU6ysgbXRVr4WIU7tFcd2gHUHOHcr2YrG0jGXmDsCVVGxrWagVQMxdSdb3V+8naNoQ3uMLiXA/kui+5IxWoh32pGm1zbhvCG8whbmeItXBbtAIYk7O5Xs31iVYgvEHvtqdqPdwKrQBGJKZF46DdHcmdpAhvMC9Lcz2U6+5UXbcFMCyHc72UqmNAQHiDBbouVVOpm7UCGLDJVG1I+FArEN5g8OKS+0dyrdIKYIEul8AWwe2cdiC8wfDErtT7U7UrdbF2APMQ9y3HhgRTpAhvMEIx+hajcLdpBdCjU7ley7VXKxDeYHw2pmpXqvVwwGxi52hcIL8rudYK4Q1q46ZcD6fqnDiAMLWuLY7+OKsdDIq7TWEw4qegz1N1T+pf5FqpJdBpe3K9mdxFyhAYeYPBi40M96RqU8My7YBOic0IsYP0iFYgvEHzLCshLmqpdkCrHcv1eq79WoHwBu0IcTEK941kqQK0TewgjZG2T7UC4Q3aZ3mue4U4aE1oix2kH6dqYwIIb9DyEHdPCXGmU6FZYgPCW6kaaRPaEN6gY2I69a5UjcYt1w4Q2kB4g2ZYPC3EuTcV6iWusNoptCG8ATOZyHVrCXEbtAPGKo78eCfXPq1AeAN6sa2EuBu0AkYmRtb2lNB2VDuoKzveoJ72l4rrtmJzw/ZUTa8Cg3cx10e53k3VLlKoNSNv0AyxueHOXHfnWq0dMBBf5vogVfePTmoHwhswlM/ZXDem6piRbdoB8xLr2d5L1Z3ENiEgvAEjszZVu1RvT44agWu5kKoDdSO0uSwe4Q0Yq9ilenMJclu1A64Qd47GtOjuXOe1A+ENqJs1qVobF6NxK7WDjjpXwtpHJbyB8AbU/3M7VaNwd6RqVM5OVbog1rLFKNveVO0ghVZyVAi0UyzCnjpuJD7P4/Df7cm0Ku0T69d25/okOeaDrvx0buQNOmVVCXJRG7WDhjpdwlqUaVGEN6Az1kwLcq7jou7iHLa4/WB3rkPagfAGCHJViLsl1/XaQU3ECNveUgeTM9lAeANmFLtUb0rVRodYIzehJYxQrGHbUwKb+0VBeAP6tDTXDSXMxUuHATNol1I1Dfp5KQfogvAGDOrrRarWxt1QalP5NehX3Cm6r1Qc7+HwXBDegBFYlqpp1RvKyzVawiziaqqDJagZXQPhDaiJOIZkS65t5eVqLemsOCD3UAlrEdqOJJsNQHgDam91CXGbSl2nJa11rgS0Q9PC2iVtAeENaLal04Jc1MbyazTPiRLUIqQdTqZBQXgDOmNtqs6Vi9pQXgp09XG5BLPjqTq241h5aYMBCG8AVwS6mGJdP+1l/JqdrcMVgexEqWOlIrRd0BoQ3gD6NVFCXNS6Euamaon29OVsqkbTTk4La/H6Ga2BZvBFD2iCWPw+NRp0tZXTgtzqabWqVNdG7GKnZ1wrdWqWMpIGwhvAWJ0pdXCG/7eohLupILei/Pfy8nLFtKrzNWCx9ix2dE6Wl2dLnZ72/k+9fs6HBAhvAE11uYSa0z383sWpOnh4aXk5/fXF5fWJ8vqS8vrSGcLi0hnehukL/M9f9esXrqrz015OTgttAF/5/wIMAFo8xcnRj/5BAAAAAElFTkSuQmCC"},vCg7:function(e,t,n){e.exports=n.p+"static/square-30d9de1abf4466d9ff71a2671f626b0d.png"}}]);
//# sourceMappingURL=component---src-pages-who-we-are-js-2f98e7f89bd07b024c85.js.map
|
e
|
coin_change_solution.js
|
#!/usr/bin/env node
// Solve the "Coin Change" problem using a bottom-up dynamic programming
// approach. The time complexity is O(n * coins.length) since we have a nested
// loop. The storage complexity is the same, as we store a matrix.
//
// * `coins` is an array of the coin values, eg. [ 1, 2, 3 ]. We assume it
// to be non-empty.
// * `n` is the amount, eg. 4 cents.
//
// The top-down solution is also possible (memoization), but can causes
// stack-overflows for large inputs.
//
function
|
(coins, n) {
// The 2-dimension buffer will contain answers to this question:
// "how much permutations is there for an amount of `i` cents, and `j`
// remaining coins?" eg. `buffer[10][2]` will tell us how many permutations
// there are when giving back 10 cents using only the first two coin types
// [ 1, 2 ].
var buffer = new Array(n + 1);
for (var i = 0; i <= n; ++i)
buffer[i] = new Array(coins.length + 1);
// For all the cases where we need to give back 0 cents, there's exactly
// 1 permutation: the empty set. Note that buffer[0][0] won't ever be
// needed.
for (var j = 1; j <= coins.length; ++j)
buffer[0][j] = 1;
// We process each case: 1 cent, 2 cent, etc. up to `n` cents, included.
for (i = 1; i <= n; ++i) {
// No more coins? No permutation is possible to attain `i` cents.
buffer[i][0] = 0;
// Now we consider the cases when we have J coin types available.
for (j = 1; j <= coins.length; ++j) {
// First, we take into account all the known permutations possible
// _without_ using the J-th coin (actually computed at the previous
// loop step).
var value = buffer[i][j - 1];
// Then, we add all the permutations possible by consuming the J-th
// coin itself, if we can.
if (coins[j - 1] <= i)
value += buffer[i - coins[j - 1]][j];
// We now know the answer for this specific case.
buffer[i][j] = value;
}
}
// Return the bottom-right answer, the one we were looking for in the
// first place.
return buffer[n][coins.length];
}
// The boring stuff: parsing and printing.
//
function processData(input) {
var lines = input.split('\n');
var coins = lines[0].split(',').map(function (s) {return +s;});
var n = +lines[1];
var res = findPermutations(coins, n);
console.log(res);
}
process.stdin.resume();
process.stdin.setEncoding("ascii");
_input = "";
process.stdin.on("data", function (input) {
_input += input;
});
process.stdin.on("end", function () {
processData(_input);
});
|
findPermutations
|
mod.rs
|
#[doc = r" Value read from the register"]
pub struct R {
bits: u32,
}
#[doc = r" Value to write to the register"]
pub struct W {
bits: u32,
}
impl super::P2_26 {
#[doc = r" Modifies the contents of the register"]
#[inline]
pub fn modify<F>(&self, f: F)
where
for<'w> F: FnOnce(&R, &'w mut W) -> &'w mut W,
{
let bits = self.register.get();
let r = R { bits: bits };
let mut w = W { bits: bits };
f(&r, &mut w);
self.register.set(w.bits);
}
#[doc = r" Reads the contents of the register"]
#[inline]
pub fn read(&self) -> R {
R {
bits: self.register.get(),
}
}
#[doc = r" Writes to the register"]
#[inline]
pub fn write<F>(&self, f: F)
where
F: FnOnce(&mut W) -> &mut W,
{
let mut w = W::reset_value();
f(&mut w);
self.register.set(w.bits);
}
#[doc = r" Writes the reset value to the register"]
#[inline]
pub fn reset(&self) {
self.write(|w| w)
}
}
#[doc = "Possible values of the field `FUNC`"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum FUNCR {
#[doc = "General purpose digital input/output\n pin."]
P2_26,
#[doc = "SDRAM clock enable 2."]
EMC_CKE2,
#[doc = "Master In Slave Out for SSP0."]
SSP0_MISO,
#[doc = "Match output for Timer 3, channel 0."]
T3_MAT0,
#[doc = r" Reserved"]
_Reserved(u8),
}
impl FUNCR {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bits(&self) -> u8 {
match *self {
FUNCR::P2_26 => 0,
FUNCR::EMC_CKE2 => 1,
FUNCR::SSP0_MISO => 2,
FUNCR::T3_MAT0 => 3,
FUNCR::_Reserved(bits) => bits,
}
}
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _from(value: u8) -> FUNCR {
match value {
0 => FUNCR::P2_26,
1 => FUNCR::EMC_CKE2,
2 => FUNCR::SSP0_MISO,
3 => FUNCR::T3_MAT0,
i => FUNCR::_Reserved(i),
}
}
#[doc = "Checks if the value of the field is `P2_26`"]
#[inline]
pub fn is_p2_26(&self) -> bool {
*self == FUNCR::P2_26
}
#[doc = "Checks if the value of the field is `EMC_CKE2`"]
#[inline]
pub fn is_emc_cke2(&self) -> bool {
*self == FUNCR::EMC_CKE2
}
#[doc = "Checks if the value of the field is `SSP0_MISO`"]
#[inline]
pub fn is_ssp0_miso(&self) -> bool {
*self == FUNCR::SSP0_MISO
}
#[doc = "Checks if the value of the field is `T3_MAT0`"]
#[inline]
pub fn is_t3_mat0(&self) -> bool {
*self == FUNCR::T3_MAT0
}
}
#[doc = "Possible values of the field `MODE`"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum MODER {
#[doc = "Inactive (no pull-down/pull-up resistor\n enabled)."]
INACTIVE,
#[doc = "Pull-down resistor enabled."]
PULLDOWN_EN,
#[doc = "Pull-up resistor enabled."]
PULLUP_EN,
#[doc = "Repeater mode."]
REPEATER_MODE,
}
impl MODER {
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bits(&self) -> u8 {
match *self {
MODER::INACTIVE => 0,
MODER::PULLDOWN_EN => 1,
MODER::PULLUP_EN => 2,
MODER::REPEATER_MODE => 3,
}
}
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _from(value: u8) -> MODER {
match value {
0 => MODER::INACTIVE,
1 => MODER::PULLDOWN_EN,
2 => MODER::PULLUP_EN,
3 => MODER::REPEATER_MODE,
_ => unreachable!(),
}
}
#[doc = "Checks if the value of the field is `INACTIVE`"]
#[inline]
pub fn is_inactive(&self) -> bool {
*self == MODER::INACTIVE
}
#[doc = "Checks if the value of the field is `PULLDOWN_EN`"]
#[inline]
pub fn is_pulldown_en(&self) -> bool {
*self == MODER::PULLDOWN_EN
}
#[doc = "Checks if the value of the field is `PULLUP_EN`"]
#[inline]
pub fn is_pullup_en(&self) -> bool {
*self == MODER::PULLUP_EN
}
#[doc = "Checks if the value of the field is `REPEATER_MODE`"]
#[inline]
pub fn is_repeater_mode(&self) -> bool {
*self == MODER::REPEATER_MODE
}
}
#[doc = "Possible values of the field `HYS`"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum HYSR {
#[doc = "Disable."]
DISABLE,
#[doc = "Enable."]
ENABLE,
}
impl HYSR {
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
match *self {
HYSR::DISABLE => false,
HYSR::ENABLE => true,
}
}
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _from(value: bool) -> HYSR {
match value {
false => HYSR::DISABLE,
true => HYSR::ENABLE,
}
}
#[doc = "Checks if the value of the field is `DISABLE`"]
#[inline]
pub fn is_disable(&self) -> bool {
*self == HYSR::DISABLE
}
#[doc = "Checks if the value of the field is `ENABLE`"]
#[inline]
pub fn is_enable(&self) -> bool {
*self == HYSR::ENABLE
}
}
#[doc = "Possible values of the field `INV`"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum INVR {
#[doc = "Input not inverted (HIGH on pin reads as 1, LOW on pin\n reads as 0)."]
INPUT_NOT_INVERTED,
#[doc = "Input inverted (HIGH on pin reads as 0, LOW on pin reads as\n 1)."]
INPUT_INVERTED_HIGH,
}
impl INVR {
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
match *self {
INVR::INPUT_NOT_INVERTED => false,
INVR::INPUT_INVERTED_HIGH => true,
}
}
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _from(value: bool) -> INVR {
match value {
false => INVR::INPUT_NOT_INVERTED,
true => INVR::INPUT_INVERTED_HIGH,
}
}
#[doc = "Checks if the value of the field is `INPUT_NOT_INVERTED`"]
#[inline]
pub fn is_input_not_inverted(&self) -> bool {
*self == INVR::INPUT_NOT_INVERTED
}
#[doc = "Checks if the value of the field is `INPUT_INVERTED_HIGH`"]
#[inline]
pub fn is_input_inverted_high(&self) -> bool {
*self == INVR::INPUT_INVERTED_HIGH
}
}
#[doc = "Possible values of the field `SLEW`"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum SLEWR {
#[doc = "Standard mode, output slew rate control is enabled. More\n outputs can be switched simultaneously."]
STANDARD,
#[doc = "Fast mode, slew rate control is disabled. Refer to the\n appropriate specific device data sheet for details."]
FAST,
}
impl SLEWR {
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
match *self {
SLEWR::STANDARD => false,
SLEWR::FAST => true,
}
}
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _from(value: bool) -> SLEWR {
match value {
false => SLEWR::STANDARD,
true => SLEWR::FAST,
}
}
#[doc = "Checks if the value of the field is `STANDARD`"]
#[inline]
pub fn is_standard(&self) -> bool {
*self == SLEWR::STANDARD
}
#[doc = "Checks if the value of the field is `FAST`"]
#[inline]
pub fn is_fast(&self) -> bool {
*self == SLEWR::FAST
}
}
#[doc = "Possible values of the field `OD`"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum ODR {
#[doc = "Disable."]
DISABLE,
#[doc = "Open-drain mode enabled. This is not a true open-drain\n mode. Input cannot be pulled up above VDD."]
ENABLED,
}
impl ODR {
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
match *self {
ODR::DISABLE => false,
ODR::ENABLED => true,
}
}
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _from(value: bool) -> ODR {
match value {
false => ODR::DISABLE,
true => ODR::ENABLED,
}
}
#[doc = "Checks if the value of the field is `DISABLE`"]
#[inline]
pub fn is_disable(&self) -> bool {
*self == ODR::DISABLE
}
#[doc = "Checks if the value of the field is `ENABLED`"]
#[inline]
pub fn is_enabled(&self) -> bool {
*self == ODR::ENABLED
}
}
#[doc = "Values that can be written to the field `FUNC`"]
pub enum FUNCW {
#[doc = "General purpose digital input/output\n pin."]
P2_26,
#[doc = "SDRAM clock enable 2."]
EMC_CKE2,
#[doc = "Master In Slave Out for SSP0."]
SSP0_MISO,
#[doc = "Match output for Timer 3, channel 0."]
T3_MAT0,
}
impl FUNCW {
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _bits(&self) -> u8 {
match *self {
FUNCW::P2_26 => 0,
FUNCW::EMC_CKE2 => 1,
FUNCW::SSP0_MISO => 2,
FUNCW::T3_MAT0 => 3,
}
}
}
#[doc = r" Proxy"]
pub struct _FUNCW<'a> {
w: &'a mut W,
}
impl<'a> _FUNCW<'a> {
#[doc = r" Writes `variant` to the field"]
#[inline]
pub fn variant(self, variant: FUNCW) -> &'a mut W {
unsafe { self.bits(variant._bits()) }
}
#[doc = "General purpose digital input/output pin."]
#[inline]
pub fn p2_26(self) -> &'a mut W {
self.variant(FUNCW::P2_26)
}
#[doc = "SDRAM clock enable 2."]
#[inline]
pub fn emc_cke2(self) -> &'a mut W {
self.variant(FUNCW::EMC_CKE2)
}
#[doc = "Master In Slave Out for SSP0."]
#[inline]
pub fn ssp0_miso(self) -> &'a mut W {
self.variant(FUNCW::SSP0_MISO)
}
#[doc = "Match output for Timer 3, channel 0."]
#[inline]
pub fn t3_mat0(self) -> &'a mut W {
self.variant(FUNCW::T3_MAT0)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub unsafe fn bits(self, value: u8) -> &'a mut W {
const MASK: u8 = 7;
const OFFSET: u8 = 0;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = "Values that can be written to the field `MODE`"]
pub enum MODEW {
#[doc = "Inactive (no pull-down/pull-up resistor\n enabled)."]
INACTIVE,
#[doc = "Pull-down resistor enabled."]
PULLDOWN_EN,
#[doc = "Pull-up resistor enabled."]
PULLUP_EN,
#[doc = "Repeater mode."]
REPEATER_MODE,
}
impl MODEW {
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _bits(&self) -> u8 {
match *self {
MODEW::INACTIVE => 0,
MODEW::PULLDOWN_EN => 1,
MODEW::PULLUP_EN => 2,
MODEW::REPEATER_MODE => 3,
}
}
}
#[doc = r" Proxy"]
pub struct _MODEW<'a> {
w: &'a mut W,
}
impl<'a> _MODEW<'a> {
#[doc = r" Writes `variant` to the field"]
#[inline]
pub fn variant(self, variant: MODEW) -> &'a mut W {
{
self.bits(variant._bits())
}
}
#[doc = "Inactive (no pull-down/pull-up resistor enabled)."]
#[inline]
pub fn inactive(self) -> &'a mut W {
self.variant(MODEW::INACTIVE)
}
#[doc = "Pull-down resistor enabled."]
#[inline]
pub fn pulldown_en(self) -> &'a mut W {
self.variant(MODEW::PULLDOWN_EN)
}
#[doc = "Pull-up resistor enabled."]
#[inline]
pub fn pullup_en(self) -> &'a mut W {
self.variant(MODEW::PULLUP_EN)
}
#[doc = "Repeater mode."]
#[inline]
pub fn repeater_mode(self) -> &'a mut W {
self.variant(MODEW::REPEATER_MODE)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bits(self, value: u8) -> &'a mut W {
const MASK: u8 = 3;
const OFFSET: u8 = 3;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = "Values that can be written to the field `HYS`"]
pub enum HYSW {
#[doc = "Disable."]
DISABLE,
#[doc = "Enable."]
ENABLE,
}
impl HYSW {
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _bits(&self) -> bool {
match *self {
HYSW::DISABLE => false,
HYSW::ENABLE => true,
}
}
}
#[doc = r" Proxy"]
pub struct _HYSW<'a> {
w: &'a mut W,
}
impl<'a> _HYSW<'a> {
#[doc = r" Writes `variant` to the field"]
#[inline]
pub fn variant(self, variant: HYSW) -> &'a mut W {
{
self.bit(variant._bits())
}
}
#[doc = "Disable."]
#[inline]
pub fn disable(self) -> &'a mut W {
self.variant(HYSW::DISABLE)
}
#[doc = "Enable."]
#[inline]
pub fn enable(self) -> &'a mut W {
self.variant(HYSW::ENABLE)
}
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 5;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = "Values that can be written to the field `INV`"]
pub enum INVW {
#[doc = "Input not inverted (HIGH on pin reads as 1, LOW on pin\n reads as 0)."]
INPUT_NOT_INVERTED,
#[doc = "Input inverted (HIGH on pin reads as 0, LOW on pin reads as\n 1)."]
INPUT_INVERTED_HIGH,
}
impl INVW {
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _bits(&self) -> bool {
match *self {
INVW::INPUT_NOT_INVERTED => false,
INVW::INPUT_INVERTED_HIGH => true,
}
}
}
#[doc = r" Proxy"]
pub struct _INVW<'a> {
w: &'a mut W,
}
impl<'a> _INVW<'a> {
#[doc = r" Writes `variant` to the field"]
#[inline]
pub fn variant(self, variant: INVW) -> &'a mut W {
{
self.bit(variant._bits())
}
}
#[doc = "Input not inverted (HIGH on pin reads as 1, LOW on pin reads as 0)."]
#[inline]
pub fn input_not_inverted(self) -> &'a mut W {
self.variant(INVW::INPUT_NOT_INVERTED)
}
#[doc = "Input inverted (HIGH on pin reads as 0, LOW on pin reads as 1)."]
#[inline]
pub fn input_inverted_high(self) -> &'a mut W {
self.variant(INVW::INPUT_INVERTED_HIGH)
}
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 6;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = "Values that can be written to the field `SLEW`"]
pub enum SLEWW {
#[doc = "Standard mode, output slew rate control is enabled. More\n outputs can be switched simultaneously."]
STANDARD,
#[doc = "Fast mode, slew rate control is disabled. Refer to the\n appropriate specific device data sheet for details."]
FAST,
}
impl SLEWW {
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _bits(&self) -> bool {
match *self {
SLEWW::STANDARD => false,
SLEWW::FAST => true,
}
}
}
#[doc = r" Proxy"]
pub struct _SLEWW<'a> {
w: &'a mut W,
}
impl<'a> _SLEWW<'a> {
#[doc = r" Writes `variant` to the field"]
#[inline]
pub fn variant(self, variant: SLEWW) -> &'a mut W {
{
self.bit(variant._bits())
}
}
#[doc = "Standard mode, output slew rate control is enabled. More outputs can be switched simultaneously."]
#[inline]
pub fn standard(self) -> &'a mut W {
self.variant(SLEWW::STANDARD)
}
#[doc = "Fast mode, slew rate control is disabled. Refer to the appropriate specific device data sheet for details."]
#[inline]
pub fn fast(self) -> &'a mut W {
self.variant(SLEWW::FAST)
}
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 9;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = "Values that can be written to the field `OD`"]
pub enum ODW {
#[doc = "Disable."]
DISABLE,
#[doc = "Open-drain mode enabled. This is not a true open-drain\n mode. Input cannot be pulled up above VDD."]
ENABLED,
}
impl ODW {
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _bits(&self) -> bool {
match *self {
ODW::DISABLE => false,
ODW::ENABLED => true,
}
}
}
#[doc = r" Proxy"]
pub struct _ODW<'a> {
w: &'a mut W,
}
impl<'a> _ODW<'a> {
#[doc = r" Writes `variant` to the field"]
#[inline]
pub fn variant(self, variant: ODW) -> &'a mut W {
{
self.bit(variant._bits())
}
}
#[doc = "Disable."]
#[inline]
pub fn disable(self) -> &'a mut W {
self.variant(ODW::DISABLE)
}
#[doc = "Open-drain mode enabled. This is not a true open-drain mode. Input cannot be pulled up above VDD."]
#[inline]
pub fn enabled(self) -> &'a mut W {
self.variant(ODW::ENABLED)
}
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 10;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
impl R {
#[doc = r" Value of the register as raw bits"]
#[inline]
pub fn bits(&self) -> u32 {
self.bits
}
#[doc = "Bits 0:2 - Selects pin function for pin P2[26]"]
#[inline]
pub fn func(&self) -> FUNCR {
FUNCR::_from({
const MASK: u8 = 7;
const OFFSET: u8 = 0;
((self.bits >> OFFSET) & MASK as u32) as u8
})
}
#[doc = "Bits 3:4 - Selects function mode (on-chip pull-up/pull-down resistor control)."]
#[inline]
pub fn mode(&self) -> MODER {
MODER::_from({
const MASK: u8 = 3;
const OFFSET: u8 = 3;
((self.bits >> OFFSET) & MASK as u32) as u8
})
}
#[doc = "Bit 5 - Hysteresis."]
#[inline]
pub fn hys(&self) -> HYSR {
HYSR::_from({
const MASK: bool = true;
const OFFSET: u8 = 5;
((self.bits >> OFFSET) & MASK as u32) != 0
})
}
#[doc = "Bit 6 - Invert input"]
#[inline]
pub fn inv(&self) -> INVR {
INVR::_from({
const MASK: bool = true;
const OFFSET: u8 = 6;
((self.bits >> OFFSET) & MASK as u32) != 0
})
}
#[doc = "Bit 9 - Driver slew rate"]
#[inline]
pub fn slew(&self) -> SLEWR {
SLEWR::_from({
const MASK: bool = true;
const OFFSET: u8 = 9;
((self.bits >> OFFSET) & MASK as u32) != 0
})
}
#[doc = "Bit 10 - Open-drain mode."]
#[inline]
pub fn od(&self) -> ODR {
ODR::_from({
const MASK: bool = true;
const OFFSET: u8 = 10;
((self.bits >> OFFSET) & MASK as u32) != 0
})
}
}
impl W {
#[doc = r" Reset value of the register"]
#[inline]
pub fn reset_value() -> W {
W { bits: 48 }
}
#[doc = r" Writes raw bits to the register"]
#[inline]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.bits = bits;
self
}
#[doc = "Bits 0:2 - Selects pin function for pin P2[26]"]
#[inline]
pub fn func(&mut self) -> _FUNCW {
_FUNCW { w: self }
}
#[doc = "Bits 3:4 - Selects function mode (on-chip pull-up/pull-down resistor control)."]
#[inline]
pub fn
|
(&mut self) -> _MODEW {
_MODEW { w: self }
}
#[doc = "Bit 5 - Hysteresis."]
#[inline]
pub fn hys(&mut self) -> _HYSW {
_HYSW { w: self }
}
#[doc = "Bit 6 - Invert input"]
#[inline]
pub fn inv(&mut self) -> _INVW {
_INVW { w: self }
}
#[doc = "Bit 9 - Driver slew rate"]
#[inline]
pub fn slew(&mut self) -> _SLEWW {
_SLEWW { w: self }
}
#[doc = "Bit 10 - Open-drain mode."]
#[inline]
pub fn od(&mut self) -> _ODW {
_ODW { w: self }
}
}
|
mode
|
history.py
|
from __future__ import annotations
from .data_structures import Stack
from .operation import Operation
class HistoryManager:
def __init__(self):
self.undo_stack: Stack[Operation] = Stack()
self.redo_stack: Stack[Operation] = Stack()
def add_operation(self, operation_instance: Operation):
self.undo_stack.append(operation_instance)
def undo(self) -> Operation:
operation_to_undo = self.undo_stack.pop()
|
operation_to_redo = self.redo_stack.pop()
self.undo_stack.append(operation_to_redo)
return operation_to_redo
def __contains__(self, item):
if isinstance(item, Operation):
return item in self.undo_stack
|
self.redo_stack.append(operation_to_undo)
return operation_to_undo
def redo(self) -> Operation:
|
0001_initial.py
|
# Generated by Django 3.2.12 on 2022-02-16 17:52
import datetime
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import uuid
class
|
(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Task',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('external_id', models.UUIDField(db_index=True, default=uuid.uuid4, editable=False, unique=True)),
('title', models.CharField(max_length=100)),
('priority', models.IntegerField(default=0)),
('description', models.TextField(blank=True, max_length=500)),
('completed', models.BooleanField(default=False)),
('date_created', models.DateTimeField(auto_now_add=True)),
('deleted', models.BooleanField(default=False)),
('status', models.CharField(choices=[('pending', 'Pending'), ('in_progress', 'In Progress'), ('completed', 'Completed'), ('cancelled', 'Cancelled')], default='pending', max_length=100)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Report',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('consent', models.BooleanField(default=False, help_text='Uncheck to stop receiving reports')),
('time', models.TimeField(default=datetime.time(0, 0), help_text='All times are in UTC format.')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='History',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('old_status', models.CharField(choices=[('pending', 'Pending'), ('in_progress', 'In Progress'), ('completed', 'Completed'), ('cancelled', 'Cancelled')], default='n/a', max_length=100)),
('new_status', models.CharField(choices=[('pending', 'Pending'), ('in_progress', 'In Progress'), ('completed', 'Completed'), ('cancelled', 'Cancelled')], default='n/a', max_length=100)),
('change_date', models.DateTimeField(auto_now=True)),
('task', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='tasks.task')),
],
),
]
|
Migration
|
rtsV2Handler.js
|
/* Copyright (c) 2019-2020 Digital Dream Labs. See LICENSE file for details. */
var { RtsCliUtil } = require("./rtsCliUtil.js");
var { Anki } = require("./messageExternalComms.js");
if (!Rts) {
var Rts = Anki.Vector.ExternalComms;
}
class
|
{
constructor(vectorBle, sodium, sessions) {
this.vectorBle = vectorBle;
this.vectorBle.onReceive(this);
this.sodium = sodium;
this.sessions = sessions;
this.encrypted = false;
this.keysAuthorized = false;
this.waitForResponse = "";
this.promiseKeys = {};
// remembered state
this.wifiScanResults = {};
this.otaProgress = {};
this.logId = 0;
this.logFile = [];
this.isReading = false;
this.cryptoKeys = {};
this.firstTimePair = true;
this.hasProgressBar = false;
this.helpArgs = {};
this.connRequestHandle = null;
// events
this.onEncryptedConnectionEvent = [];
this.onReadyForPinEvent = [];
this.onOtaProgressEvent = [];
this.onLogProgressEvent = [];
this.onCliResponseEvent = [];
this.onPrintEvent = [];
this.onCommandDoneEvent = [];
this.onNewProgressBarEvent = [];
this.onUpdateProgressBarEvent = [];
this.onLogsDownloadedEvent = [];
this.setCliHelp();
}
onReadyForPin(fnc) {
this.onReadyForPinEvent.push(fnc);
}
onOtaProgress(fnc) {
this.onOtaProgressEvent.push(fnc);
}
onLogProgress(fnc) {
this.onLogProgressEvent.push(fnc);
}
onEncryptedConnection(fnc) {
this.onEncryptedConnectionEvent.push(fnc);
}
onCliResponse(fnc) {
this.onCliResponseEvent.push(fnc);
}
onPrint(fnc) {
this.onPrintEvent.push(fnc);
}
onCommandDone(fnc) {
this.onCommandDoneEvent.push(fnc);
}
onNewProgressBar(fnc) {
this.onNewProgressBarEvent.push(fnc);
}
onUpdateProgressBar(fnc) {
this.onUpdateProgressBarEvent.push(fnc);
}
onLogsDownloaded(fnc) {
this.onLogsDownloadedEvent.push(fnc);
}
enterPin(pin) {
let clientKeys = this.sodium.crypto_kx_client_session_keys(
this.keys.publicKey,
this.keys.privateKey,
this.remoteKeys.publicKey
);
let sharedRx = this.sodium.crypto_generichash(32, clientKeys.sharedRx, pin);
let sharedTx = this.sodium.crypto_generichash(32, clientKeys.sharedTx, pin);
this.cryptoKeys.decrypt = sharedRx;
this.cryptoKeys.encrypt = sharedTx;
this.send(
Rts.RtsConnection_2.NewRtsConnection_2WithRtsAck(
new Rts.RtsAck(Rts.RtsConnection_2Tag.RtsNonceMessage)
)
);
this.encrypted = true;
}
cleanup() {
this.vectorBle.onReceiveUnsubscribe(this);
}
send(rtsConn2) {
let rtsConn = Rts.RtsConnection.NewRtsConnectionWithRtsConnection_2(
rtsConn2
);
let extResponse = Rts.ExternalComms.NewExternalCommsWithRtsConnection(
rtsConn
);
let data = extResponse.pack();
if (this.encrypted) {
data = this.encrypt(data);
}
let packet = Array.from(data); // todo: Buffer.from
this.vectorBle.send(packet);
}
receive(data) {
if (this.encrypted) {
data = this.decrypt(data);
}
if (data == null) {
return;
}
if (data[0] == 1 && data.length == 5) {
// data is handshake so we should bail
this.cancelConnection();
return;
}
let comms = new Rts.ExternalComms();
comms.unpack(data);
if (comms.tag == Rts.ExternalCommsTag.RtsConnection) {
switch (comms.value.tag) {
case Rts.RtsConnectionTag.RtsConnection_2: {
let rtsMsg = comms.value.value;
switch (rtsMsg.tag) {
case Rts.RtsConnection_2Tag.RtsConnRequest:
this.onRtsConnRequest(rtsMsg.value);
break;
case Rts.RtsConnection_2Tag.RtsNonceMessage:
this.onRtsNonceMessage(rtsMsg.value);
break;
case Rts.RtsConnection_2Tag.RtsChallengeMessage:
this.onRtsChallengeMessage(rtsMsg.value);
break;
case Rts.RtsConnection_2Tag.RtsChallengeSuccessMessage:
this.onRtsChallengeSuccessMessage(rtsMsg.value);
break;
// Post-connection messages
case Rts.RtsConnection_2Tag.RtsWifiScanResponse_2:
this.resolvePromise("wifi-scan", rtsMsg);
break;
case Rts.RtsConnection_2Tag.RtsWifiConnectResponse:
this.resolvePromise("wifi-connect", rtsMsg);
break;
case Rts.RtsConnection_2Tag.RtsStatusResponse_2:
this.resolvePromise("status", rtsMsg);
break;
case Rts.RtsConnection_2Tag.RtsWifiForgetResponse:
this.resolvePromise("wifi-forget", rtsMsg);
break;
case Rts.RtsConnection_2Tag.RtsWifiAccessPointResponse:
this.resolvePromise("wifi-ap", rtsMsg);
break;
case Rts.RtsConnection_2Tag.RtsWifiIpResponse:
this.resolvePromise("wifi-ip", rtsMsg);
break;
case Rts.RtsConnection_2Tag.RtsOtaUpdateResponse:
this.otaProgress["value"] = rtsMsg.value;
for (let i = 0; i < this.onOtaProgressEvent.length; i++) {
this.onOtaProgressEvent[i](rtsMsg.value);
}
if (this.hasProgressBar) {
for (let i = 0; i < this.onUpdateProgressBarEvent.length; i++) {
this.onUpdateProgressBarEvent[i](
Number(rtsMsg.value.current),
Number(rtsMsg.value.expected)
);
}
}
if (this.waitForResponse == "ota-start") {
if (rtsMsg.value.status == 3) {
this.resolvePromise(this.waitForResponse, rtsMsg);
} else if (rtsMsg.value.status >= 5) {
this.rejectPromise(this.waitForResponse, rtsMsg);
}
} else if (this.waitForResponse == "ota-cancel") {
if (rtsMsg.value.status != 2) {
this.resolvePromise(this.waitForResponse, rtsMsg);
}
}
break;
case Rts.RtsConnection_2Tag.RtsResponse:
this.rejectPromise(this.waitForResponse, rtsMsg);
break;
case Rts.RtsConnection_2Tag.RtsLogResponse:
if (rtsMsg.value.exitCode == 0) {
this.logId = rtsMsg.value.fileId;
this.logFile = [];
} else {
// todo: error case
}
break;
case Rts.RtsConnection_2Tag.RtsFileDownload:
let chunk = rtsMsg.value;
if (chunk.fileId == this.logId) {
this.logFile = this.logFile.concat(chunk.fileChunk);
for (let i = 0; i < this.onLogProgressEvent.length; i++) {
this.onLogProgressEvent[i](rtsMsg.value);
}
if (this.hasProgressBar) {
for (
let i = 0;
i < this.onUpdateProgressBarEvent.length;
i++
) {
this.onUpdateProgressBarEvent[i](
chunk.packetNumber,
chunk.packetTotal
);
}
}
if (chunk.packetNumber == chunk.packetTotal) {
// resolve promise
let fileName =
"vector-logs-" + RtsCliUtil.getDateString() + ".tar.bz2";
for (let i = 0; i < this.onLogsDownloadedEvent.length; i++) {
this.onLogsDownloadedEvent[i](fileName, this.logFile);
}
this.resolvePromise("logs", rtsMsg);
}
}
break;
default:
break;
}
break;
}
default:
break;
}
}
}
encrypt(data) {
let txt = new Uint8Array(data);
let nonce = new Uint8Array(this.nonces.encrypt);
let cipher = this.sodium.crypto_aead_xchacha20poly1305_ietf_encrypt(
txt,
null,
null,
nonce,
this.cryptoKeys.encrypt
);
this.sodium.increment(this.nonces.encrypt);
return cipher;
}
decrypt(cipher) {
let c = new Uint8Array(cipher);
let nonce = new Uint8Array(this.nonces.decrypt);
let data = null;
try {
data = this.sodium.crypto_aead_xchacha20poly1305_ietf_decrypt(
null,
c,
null,
nonce,
this.cryptoKeys.decrypt
);
this.sodium.increment(this.nonces.decrypt);
} catch (e) {
console.log("error decrypting");
this.sessions.deleteSession(this.remoteKeys.publicKey);
this.sessions.save();
}
return data;
}
onRtsConnRequest(msg) {
this.remoteKeys = {};
this.remoteKeys.publicKey = msg.publicKey;
let savedSession = this.sessions.getSession(this.remoteKeys.publicKey);
if (savedSession != null) {
this.keys = this.sessions.getKeys();
this.cryptoKeys = { encrypt: savedSession.tx, decrypt: savedSession.rx };
this.firstTimePair = false;
// use saved session
this.send(
Rts.RtsConnection_5.NewRtsConnection_5WithRtsConnResponse(
new Rts.RtsConnResponse(
Rts.RtsConnType.Reconnection,
this.keys.publicKey
)
)
);
} else if (
this.remoteKeys.publicKey.toString() in this.vectorBle.sessions
) {
let session = this.vectorBle.sessions[
this.remoteKeys.publicKey.toString()
];
this.keys = session.myKeys;
this.cryptoKeys = session.cryptoKeys;
this.firstTimePair = false;
// use saved session
this.send(
Rts.RtsConnection_2.NewRtsConnection_2WithRtsConnResponse(
new Rts.RtsConnResponse(
Rts.RtsConnType.Reconnection,
this.keys.publicKey
)
)
);
} else {
// generate keys
this.keys = this.sodium.crypto_kx_keypair();
let self = this;
this.connRequestHandle = setTimeout(function () {
self.cancelConnection();
}, 3000);
this.send(
Rts.RtsConnection_2.NewRtsConnection_2WithRtsConnResponse(
new Rts.RtsConnResponse(
Rts.RtsConnType.FirstTimePair,
this.keys.publicKey
)
)
);
}
}
cancelConnection() {
let msg =
"\x1b[91mPairing failed. Double press robot button and try again. You may need to do 'ble-clear'.\x1b[0m";
for (let i = 0; i < this.onPrintEvent.length; i++) {
this.onPrintEvent[i](msg);
}
this.vectorBle.tryDisconnect();
for (let i = 0; i < this.onCommandDoneEvent.length; i++) {
this.onCommandDoneEvent[i]();
}
}
onRtsNonceMessage(msg) {
if (this.connRequestHandle != null) {
clearTimeout(this.connRequestHandle);
this.connRequestHandle = null;
}
this.nonces = {};
this.nonces.decrypt = msg.toDeviceNonce;
this.nonces.encrypt = msg.toRobotNonce;
if (!this.firstTimePair) {
// No need to enter pin
this.send(
Rts.RtsConnection_2.NewRtsConnection_2WithRtsAck(
new Rts.RtsAck(Rts.RtsConnection_2Tag.RtsNonceMessage)
)
);
this.encrypted = true;
return;
}
for (let i = 0; i < this.onReadyForPinEvent.length; i++) {
this.onReadyForPinEvent[i](this);
}
}
onRtsChallengeMessage(msg) {
this.send(
Rts.RtsConnection_2.NewRtsConnection_2WithRtsChallengeMessage(
new Rts.RtsChallengeMessage(msg.number + 1)
)
);
}
onRtsChallengeSuccessMessage(msg) {
this.keysAuthorized = true;
this.vectorBle.sessions[this.remoteKeys.publicKey.toString()] = {
cryptoKeys: this.cryptoKeys,
myKeys: this.keys,
};
// successfully received rtsChallengeSuccessMessage
for (let i = 0; i < this.onEncryptedConnectionEvent.length; i++) {
this.onEncryptedConnectionEvent[i](this);
}
}
storePromiseMethods(str, resolve, reject) {
this.promiseKeys[str] = {};
this.promiseKeys[str].resolve = resolve;
this.promiseKeys[str].reject = reject;
}
resolvePromise(str, msg) {
if (this.promiseKeys[str] != null) {
this.promiseKeys[str].resolve(msg);
this.promiseKeys[str] = null;
}
}
rejectPromise(str, msg) {
if (this.promiseKeys[str] != null) {
this.promiseKeys[str].reject(msg);
this.promiseKeys[str] = null;
}
}
cliResolve(msg) {
let output = "";
if (msg == null) {
output = "Request timed out.";
} else {
output = RtsCliUtil.msgToStr(msg.value);
}
for (let i = 0; i < this.onCliResponseEvent.length; i++) {
this.onCliResponseEvent[i](output);
}
this.waitForResponse = "";
}
//
// <!-- API Promises
//
doCancelPair(){
let self = this;
let p = new Promise(function (resolve, reject) {
self.send(
Rts.RtsConnection_2.NewRtsConnection_2WithRtsCancelPairing(
new Rts.RtsCancelPairing()
)
);
});
}
doWifiScan() {
let self = this;
let p = new Promise(function (resolve, reject) {
self.storePromiseMethods("wifi-scan", resolve, reject);
self.send(
Rts.RtsConnection_2.NewRtsConnection_2WithRtsWifiScanRequest(
new Rts.RtsWifiScanRequest()
)
);
});
return p;
}
doWifiConnect(ssid, password, auth, timeout) {
let self = this;
let p = new Promise(function (resolve, reject) {
self.storePromiseMethods("wifi-connect", resolve, reject);
self.send(
Rts.RtsConnection_2.NewRtsConnection_2WithRtsWifiConnectRequest(
new Rts.RtsWifiConnectRequest(
RtsCliUtil.convertStrToHex(ssid),
password,
timeout,
auth,
false
)
)
);
});
return p;
}
doWifiForget(ssid) {
let self = this;
let p = new Promise(function (resolve, reject) {
self.storePromiseMethods("wifi-forget", resolve, reject);
let deleteAll = ssid == "!all";
let hexSsid = deleteAll ? "" : RtsCliUtil.convertStrToHex(ssid);
self.send(
Rts.RtsConnection_2.NewRtsConnection_2WithRtsWifiForgetRequest(
new Rts.RtsWifiForgetRequest(deleteAll, hexSsid)
)
);
});
return p;
}
doWifiAp(enable) {
let self = this;
let p = new Promise(function (resolve, reject) {
self.storePromiseMethods("wifi-ap", resolve, reject);
self.send(
Rts.RtsConnection_2.NewRtsConnection_2WithRtsWifiAccessPointRequest(
new Rts.RtsWifiAccessPointRequest(enable.toLowerCase() == "true")
)
);
});
return p;
}
doWifiIp() {
let self = this;
let p = new Promise(function (resolve, reject) {
self.storePromiseMethods("wifi-ip", resolve, reject);
self.send(
Rts.RtsConnection_2.NewRtsConnection_2WithRtsWifiIpRequest(
new Rts.RtsWifiIpRequest()
)
);
});
return p;
}
doStatus() {
let self = this;
let p = new Promise(function (resolve, reject) {
self.storePromiseMethods("status", resolve, reject);
self.send(
Rts.RtsConnection_2.NewRtsConnection_2WithRtsStatusRequest(
new Rts.RtsStatusRequest()
)
);
});
return RtsCliUtil.addTimeout(p);
}
doOtaStart(url) {
let self = this;
let p = new Promise(function (resolve, reject) {
self.storePromiseMethods("ota-start", resolve, reject);
self.send(
Rts.RtsConnection_2.NewRtsConnection_2WithRtsOtaUpdateRequest(
new Rts.RtsOtaUpdateRequest(url)
)
);
});
return p;
}
doOtaCancel(url) {
let self = this;
let p = new Promise(function (resolve, reject) {
self.storePromiseMethods("ota-cancel", resolve, reject);
self.send(
Rts.RtsConnection_2.NewRtsConnection_2WithRtsOtaCancelRequest(
new Rts.RtsOtaCancelRequest(url)
)
);
});
return p;
}
doLog() {
let self = this;
let p = new Promise(function (resolve, reject) {
self.storePromiseMethods("logs", resolve, reject);
self.send(
Rts.RtsConnection_2.NewRtsConnection_2WithRtsLogRequest(
new Rts.RtsLogRequest(0, [])
)
);
});
return p;
}
requireArgs(args, num) {
if (args.length < num) {
console.log(
'"' + args[0] + '" command requires ' + (num - 1) + " arguments"
);
return false;
}
return true;
}
//
// API Promises -->
//
setCliHelp() {
let helpArgs = {
"wifi-connect": {
args: 2,
des: "Connect Vector to a WiFi network.",
help: "wifi-connect {ssid} {password}",
},
"wifi-scan": {
args: 0,
des: "Get WiFi networks that Vector can scan.",
help: "wifi-scan",
},
"wifi-ip": {
args: 0,
des: "Get Vector's WiFi IPv4/IPv6 addresses.",
help: "wifi-ip",
},
"wifi-ap": {
args: 1,
des: "Enable/Disable Vector as a WiFi access point.",
help: "wifi-ap {true|false}",
},
"wifi-forget": {
args: 1,
des: "Forget a WiFi network, or optionally all of them.",
help: "wifi-forget {ssid|!all}",
},
"ota-start": {
args: 1,
des: "Tell Vector to start an OTA update with the given URL.",
help: "ota-start {url}",
},
"ota-progress": {
args: 0,
des: "Get the current OTA progress.",
help: "ota-progress",
},
"ota-cancel": {
args: 0,
des: "Cancel an OTA in progress.",
help: "ota-cancel",
},
logs: {
args: 0,
des: "Download logs over BLE from Vector.",
help: "logs",
},
status: {
args: 0,
des: "Get status information from Vector.",
help: "status",
},
"anki-auth": {
args: 1,
des: "Provision Vector with Anki account.",
help: "anki-auth {session_token}",
},
"connection-id": {
args: 1,
des: "Give Vector a DAS/analytics id for this BLE session.",
help: "connection-id {id}",
},
sdk: {
args: 3,
des: "Send an SDK request over BLE.",
help: "sdk {path} {json} {client_app_guid}",
},
};
this.helpArgs = helpArgs;
return helpArgs;
}
// returns whether resolved immediately
handleCli(args) {
let self = this;
let cmd = args[0];
let r = function (msg) {
self.cliResolve(msg);
};
let output = "";
switch (cmd) {
case "quit":
case "exit":
self.vectorBle.tryDisconnect();
return false;
case "help":
output = RtsCliUtil.printHelp(self.helpArgs);
for (let i = 0; i < this.onPrintEvent.length; i++) {
this.onPrintEvent[i](output);
}
break;
case "wifi-scan":
self.waitForResponse = "wifi-scan";
self.doWifiScan().then(function (msg) {
self.wifiScanResults = msg.value.scanResult;
self.cliResolve(msg);
}, r);
break;
case "wifi-connect":
if (!self.requireArgs(args, 3)) break;
self.waitForResponse = "wifi-connect";
let ssid = args[1];
let hasScanned = false;
let result = null;
for (let i = 0; i < self.wifiScanResults.length; i++) {
let r = self.wifiScanResults[i];
if (ssid == RtsCliUtil.convertHexToStr(r.wifiSsidHex)) {
result = r;
hasScanned = true;
break;
}
}
self
.doWifiConnect(ssid, args[2], hasScanned ? result.authType : 6, 15)
.then(function (msg) {
self.cliResolve(msg);
}, r);
break;
case "status":
self.waitForResponse = "status";
self.doStatus().then(function (msg) {
self.cliResolve(msg);
}, r);
break;
case "wifi-ip":
self.waitForResponse = "wifi-ip";
self.doWifiIp().then(function (msg) {
self.cliResolve(msg);
}, r);
break;
case "wifi-forget":
if (!self.requireArgs(args, 2)) break;
self.waitForResponse = "wifi-forget";
self.doWifiForget(args[1]).then(function (msg) {
self.cliResolve(msg);
}, r);
break;
case "wifi-ap":
if (!self.requireArgs(args, 2)) break;
self.waitForResponse = "wifi-ap";
self.doWifiAp(args[1]).then(function (msg) {
self.cliResolve(msg);
}, r);
break;
case "anki-auth":
if (!self.requireArgs(args, 2)) break;
self.waitForResponse = "anki-auth";
self.doAnkiAuth(args[1]).then(function (msg) {
self.cliResolve(msg);
}, r);
break;
case "ota-start":
if (!self.requireArgs(args, 2)) break;
self.waitForResponse = "ota-start";
self.hasProgressBar = true;
output = "Updating robot with OTA from " + args[1];
for (let i = 0; i < this.onPrintEvent.length; i++) {
this.onPrintEvent[i](output);
}
for (let i = 0; i < this.onNewProgressBarEvent.length; i++) {
this.onNewProgressBarEvent[i]();
}
self.doOtaStart(args[1]).then(function (msg) {
self.otaProgress.value = msg.value;
self.hasProgressBar = false;
self.cliResolve(msg);
}, r);
break;
case "ota-cancel":
self.waitForResponse = "ota-cancel";
self.doOtaCancel().then(function (msg) {
self.otaProgress.value = msg.value;
self.cliResolve(msg);
}, r);
break;
case "ota-progress":
if (self.otaProgress.value != null) {
console.log(
RtsCliUtil.rtsOtaUpdateResponseStr(self.otaProgress.value)
);
}
break;
case "logs":
console.log(
"downloading logs over BLE will probably take about 30 seconds..."
);
self.waitForResponse = "logs";
self.hasProgressBar = true;
output = "Downloading logs...";
for (let i = 0; i < this.onPrintEvent.length; i++) {
this.onPrintEvent[i](output);
}
for (let i = 0; i < this.onNewProgressBarEvent.length; i++) {
this.onNewProgressBarEvent[i]();
}
self.doLog().then(function (msg) {
self.hasProgressBar = false;
self.cliResolve(msg);
}, r);
break;
default:
self.waitForResponse = "";
break;
}
if (self.waitForResponse == "") {
return true;
}
return false;
}
}
module.exports = { RtsV2Handler };
|
RtsV2Handler
|
table.rs
|
// Copyright 2016 EinsteinDB Project Authors. Licensed under Apache-2.0.
use std::convert::TryInto;
use std::io::Write;
use std::sync::Arc;
use std::{cmp, u8};
use crate::prelude::*;
use crate::FieldTypeTp;
use eekvproto::interlock::KeyRange;
use einsteindbpb::ColumnInfo;
use super::mysql::{Duration, Time};
use super::{datum, datum::DatumDecoder, Datum, Error, Result};
use crate::expr::EvalContext;
use codec::prelude::*;
use EinsteinDB_util::codec::BytesSlice;
use EinsteinDB_util::collections::{HashMap, HashSet};
// handle or index id
pub const ID_LEN: usize = 8;
pub const PREFIX_LEN: usize = TABLE_PREFIX_LEN + ID_LEN /*table_id*/ + SEP_LEN;
pub const RECORD_ROW_KEY_LEN: usize = PREFIX_LEN + ID_LEN;
pub const TABLE_PREFIX: &[u8] = b"t";
pub const RECORD_PREFIX_SEP: &[u8] = b"_r";
pub const INDEX_PREFIX_SEP: &[u8] = b"_i";
pub const SEP_LEN: usize = 2;
pub const TABLE_PREFIX_LEN: usize = 1;
pub const TABLE_PREFIX_KEY_LEN: usize = TABLE_PREFIX_LEN + ID_LEN;
// the maximum len of the old encoding of index value.
pub const MAX_OLD_ENCODED_VALUE_LEN: usize = 9;
/// `TableEncoder` encodes the table record/index prefix.
trait TableEncoder: NumberEncoder {
fn append_table_record_prefix(&mut self, table_id: i64) -> Result<()> {
self.write_bytes(TABLE_PREFIX)?;
self.write_i64(table_id)?;
self.write_bytes(RECORD_PREFIX_SEP).map_err(Error::from)
}
fn append_table_index_prefix(&mut self, table_id: i64) -> Result<()> {
self.write_bytes(TABLE_PREFIX)?;
self.write_i64(table_id)?;
self.write_bytes(INDEX_PREFIX_SEP).map_err(Error::from)
}
}
impl<T: BufferWriter> TableEncoder for T {}
/// Extracts table prefix from table record or index.
#[inline]
pub fn extract_table_prefix(key: &[u8]) -> Result<&[u8]> {
if !key.starts_with(TABLE_PREFIX) || key.len() < TABLE_PREFIX_KEY_LEN {
Err(invalid_type!(
"record key or index key expected, but got {:?}",
key
))
} else {
Ok(&key[..TABLE_PREFIX_KEY_LEN])
}
}
/// Checks if the range is for table record or index.
pub fn check_table_ranges(ranges: &[KeyRange]) -> Result<()> {
for range in ranges {
extract_table_prefix(range.get_start())?;
extract_table_prefix(range.get_end())?;
if range.get_start() >= range.get_end() {
return Err(invalid_type!(
"invalid range,range.start should be smaller than range.end, but got [{:?},{:?})",
range.get_start(),
range.get_end()
));
}
}
Ok(())
}
#[inline]
pub fn check_record_key(key: &[u8]) -> Result<()> {
check_key_type(key, RECORD_PREFIX_SEP)
}
#[inline]
pub fn check_index_key(key: &[u8]) -> Result<()> {
check_key_type(key, INDEX_PREFIX_SEP)
}
/// `check_key_type` checks if the key is the type we want, `wanted_type` should be
/// `table::RECORD_PREFIX_SEP` or `table::INDEX_PREFIX_SEP` .
#[inline]
fn check_key_type(key: &[u8], wanted_type: &[u8]) -> Result<()> {
let mut buf = key;
if buf.read_bytes(TABLE_PREFIX_LEN)? != TABLE_PREFIX {
return Err(invalid_type!(
"record or index key expected, but got {}",
hex::encode_upper(key)
));
}
buf.read_bytes(ID_LEN)?;
if buf.read_bytes(SEP_LEN)? != wanted_type {
Err(invalid_type!(
"expected key sep type {}, but got key {})",
hex::encode_upper(wanted_type),
hex::encode_upper(key)
))
} else {
Ok(())
}
}
/// Decodes table ID from the key.
pub fn decode_table_id(key: &[u8]) -> Result<i64> {
let mut buf = key;
if buf.read_bytes(TABLE_PREFIX_LEN)? != TABLE_PREFIX {
return Err(invalid_type!(
"record key expected, but got {}",
hex::encode_upper(key)
));
}
buf.read_i64().map_err(Error::from)
}
/// `flatten` flattens the datum.
#[inline]
pub fn flatten(ctx: &mut EvalContext, data: Datum) -> Result<Datum> {
match data {
Datum::Dur(d) => Ok(Datum::I64(d.to_nanos())),
Datum::Time(t) => Ok(Datum::U64(t.to_packed_u64(ctx)?)),
_ => Ok(data),
}
}
// `encode_row` encodes row data and column ids into a slice of byte.
// Row layout: colID1, value1, colID2, value2, .....
pub fn encode_row(ctx: &mut EvalContext, row: Vec<Datum>, col_ids: &[i64]) -> Result<Vec<u8>> {
if row.len() != col_ids.len() {
return Err(box_err!(
"data and columnID count not match {} vs {}",
row.len(),
col_ids.len()
));
}
let mut values = Vec::with_capacity(cmp::max(row.len() * 2, 1));
for (&id, col) in col_ids.iter().zip(row) {
values.push(Datum::I64(id));
let fc = flatten(ctx, col)?;
values.push(fc);
}
if values.is_empty() {
values.push(Datum::Null);
}
datum::encode_value(ctx, &values)
}
/// `encode_row_key` encodes the table id and record handle into a byte array.
pub fn encode_row_key(table_id: i64, handle: i64) -> Vec<u8> {
let mut key = Vec::with_capacity(RECORD_ROW_KEY_LEN);
// can't panic
key.append_table_record_prefix(table_id).unwrap();
key.write_i64(handle).unwrap();
key
}
pub fn encode_common_handle_for_test(table_id: i64, handle: &[u8]) -> Vec<u8> {
let mut key = Vec::with_capacity(PREFIX_LEN + handle.len());
key.append_table_record_prefix(table_id).unwrap();
key.extend(handle);
key
}
/// `encode_column_key` encodes the table id, row handle and column id into a byte array.
pub fn encode_column_key(table_id: i64, handle: i64, column_id: i64) -> Vec<u8> {
let mut key = Vec::with_capacity(RECORD_ROW_KEY_LEN + ID_LEN);
key.append_table_record_prefix(table_id).unwrap();
key.write_i64(handle).unwrap();
key.write_i64(column_id).unwrap();
key
}
/// `decode_int_handle` decodes the key and gets the int handle.
#[inline]
pub fn decode_int_handle(mut key: &[u8]) -> Result<i64> {
check_record_key(key)?;
key = &key[PREFIX_LEN..];
key.read_i64().map_err(Error::from)
}
/// `decode_common_handle` decodes key key and gets the common handle.
#[inline]
pub fn decode_common_handle(mut key: &[u8]) -> Result<&[u8]> {
check_record_key(key)?;
key = &key[PREFIX_LEN..];
Ok(key)
}
/// `encode_index_seek_key` encodes an index value to byte array.
pub fn encode_index_seek_key(table_id: i64, idx_id: i64, encoded: &[u8]) -> Vec<u8> {
let mut key = Vec::with_capacity(PREFIX_LEN + ID_LEN + encoded.len());
key.append_table_index_prefix(table_id).unwrap();
key.write_i64(idx_id).unwrap();
key.write_all(encoded).unwrap();
key
}
// `decode_index_key` decodes datums from an index key.
pub fn decode_index_key(
ctx: &mut EvalContext,
encoded: &[u8],
infos: &[ColumnInfo],
) -> Result<Vec<Datum>> {
let mut buf = &encoded[PREFIX_LEN + ID_LEN..];
let mut res = vec![];
for info in infos {
if buf.is_empty() {
return Err(box_err!("{} is too short.", hex::encode_upper(encoded)));
}
let mut v = buf.read_datum()?;
v = unflatten(ctx, v, info)?;
res.push(v);
}
Ok(res)
}
/// `unflatten` converts a raw datum to a column datum.
fn unflatten(
ctx: &mut EvalContext,
datum: Datum,
field_type: &dyn FieldTypeAccessor,
) -> Result<Datum> {
if let Datum::Null = datum {
return Ok(datum);
}
let tp = field_type.tp();
match tp {
FieldTypeTp::Float => Ok(Datum::F64(f64::from(datum.f64() as f32))),
FieldTypeTp::Date | FieldTypeTp::DateTime | FieldTypeTp::Timestamp => {
let fsp = field_type.decimal() as i8;
let t = Time::from_packed_u64(ctx, datum.u64(), tp.try_into()?, fsp)?;
Ok(Datum::Time(t))
}
FieldTypeTp::Duration => {
Duration::from_nanos(datum.i64(), field_type.decimal() as i8).map(Datum::Dur)
}
FieldTypeTp::Enum | FieldTypeTp::Set | FieldTypeTp::Bit => Err(box_err!(
"unflatten field type {} is not supported yet.",
tp
)),
t => {
debug_assert!(
[
FieldTypeTp::Tiny,
FieldTypeTp::Short,
FieldTypeTp::Year,
FieldTypeTp::Int24,
FieldTypeTp::Long,
FieldTypeTp::LongLong,
FieldTypeTp::Double,
FieldTypeTp::TinyBlob,
FieldTypeTp::MediumBlob,
FieldTypeTp::Blob,
FieldTypeTp::LongBlob,
FieldTypeTp::VarChar,
FieldTypeTp::String,
FieldTypeTp::NewDecimal,
FieldTypeTp::JSON
]
.contains(&t),
"unknown type {} {}",
t,
datum
);
Ok(datum)
}
}
}
// `decode_col_value` decodes data to a Datum according to the column info.
pub fn decode_col_value(
data: &mut BytesSlice<'_>,
ctx: &mut EvalContext,
col: &ColumnInfo,
) -> Result<Datum> {
let d = data.read_datum()?;
unflatten(ctx, d, col)
}
// `decode_row` decodes a byte slice into datums.
// TODO: We should only decode columns in the cols map.
// Row layout: colID1, value1, colID2, value2, .....
pub fn decode_row(
data: &mut BytesSlice<'_>,
ctx: &mut EvalContext,
cols: &HashMap<i64, ColumnInfo>,
) -> Result<HashMap<i64, Datum>> {
let mut values = datum::decode(data)?;
if values.get(0).map_or(true, |d| *d == Datum::Null) {
return Ok(HashMap::default());
}
if values.len() & 1 == 1 {
return Err(box_err!("decoded row values' length should be even!"));
}
let mut row = HashMap::with_capacity_and_hasher(cols.len(), Default::default());
let mut drain = values.drain(..);
loop {
let id = match drain.next() {
None => return Ok(row),
Some(id) => id.i64(),
};
let v = drain.next().unwrap();
if let Some(ci) = cols.get(&id) {
let v = unflatten(ctx, v, ci)?;
row.insert(id, v);
}
}
}
/// `RowColMeta` saves the column meta of the row.
#[derive(Debug)]
pub struct RowColMeta {
offset: usize,
length: usize,
}
/// `RowColsDict` stores the row data and a map mapping column ID to its meta.
#[derive(Debug)]
pub struct RowColsDict {
// data of current row
pub value: Vec<u8>,
// cols contains meta of each column in the format of:
// (col_id1,(offset1,len1)),(col_id2,(offset2,len2),...)
pub cols: HashMap<i64, RowColMeta>,
}
impl RowColMeta {
pub fn new(offset: usize, length: usize) -> RowColMeta {
RowColMeta { offset, length }
}
}
impl RowColsDict {
pub fn new(cols: HashMap<i64, RowColMeta>, value: Vec<u8>) -> RowColsDict {
RowColsDict { value, cols }
}
/// Returns the total count of the columns.
#[inline]
pub fn len(&self) -> usize {
self.cols.len()
}
/// Returns whether it has columns or not.
#[inline]
pub fn is_empty(&self) -> bool {
self.cols.is_empty()
}
/// Gets the column data from its meta if `key` exists.
pub fn get(&self, key: i64) -> Option<&[u8]> {
if let Some(meta) = self.cols.get(&key) {
return Some(&self.value[meta.offset..(meta.offset + meta.length)]);
}
None
}
/// Appends a column to the row.
pub fn append(&mut self, cid: i64, value: &mut Vec<u8>) {
let offset = self.value.len();
let length = value.len();
self.value.append(value);
self.cols.insert(cid, RowColMeta::new(offset, length));
}
/// Gets binary of cols, keeps the original order, and returns one slice and cols' end offsets.
pub fn get_column_values_and_end_offsets(&self) -> (&[u8], Vec<usize>) {
let mut start = self.value.len();
let mut length = 0;
for meta in self.cols.values() {
if meta.offset < start {
start = meta.offset;
}
length += meta.length;
}
let end_offsets = self
.cols
.values()
.map(|meta| meta.offset + meta.length - start)
.collect();
(&self.value[start..start + length], end_offsets)
}
}
/// `cut_row` cuts the encoded row into (col_id,offset,length)
/// and returns interested columns' meta in RowColsDict
///
/// Encoded row can be either in row format v1 or v2.
///
/// `col_ids` must be consistent with `cols`. Otherwise the result is undefined.
pub fn cut_row(
data: Vec<u8>,
col_ids: &HashSet<i64>,
cols: Arc<Vec<ColumnInfo>>,
) -> Result<RowColsDict> {
if cols.is_empty() || data.is_empty() || (data.len() == 1 && data[0] == datum::NIL_FLAG) {
return Ok(RowColsDict::new(HashMap::default(), data));
}
match data[0] {
crate::codec::row::v2::CODEC_VERSION => cut_row_v2(data, cols),
_ => cut_row_v1(data, col_ids),
}
}
/// Cuts a non-empty row in row format v1.
fn cut_row_v1(data: Vec<u8>, cols: &HashSet<i64>) -> Result<RowColsDict> {
let meta_map = {
let mut meta_map = HashMap::with_capacity_and_hasher(cols.len(), Default::default());
let length = data.len();
let mut tmp_data: &[u8] = data.as_ref();
while !tmp_data.is_empty() && meta_map.len() < cols.len() {
let id = tmp_data.read_datum()?.i64();
let offset = length - tmp_data.len();
let (val, rem) = datum::split_datum(tmp_data, false)?;
if cols.contains(&id) {
meta_map.insert(id, RowColMeta::new(offset, val.len()));
}
tmp_data = rem;
}
meta_map
};
Ok(RowColsDict::new(meta_map, data))
}
/// Cuts a non-empty row in row format v2 and encodes into v1 format.
fn cut_row_v2(data: Vec<u8>, cols: Arc<Vec<ColumnInfo>>) -> Result<RowColsDict> {
use crate::codec::datum_codec::{ColumnIdDatumEncoder, EvaluableDatumEncoder};
use crate::codec::row::v2::{RowSlice, V1CompatibleEncoder};
let mut meta_map = HashMap::with_capacity_and_hasher(cols.len(), Default::default());
let mut result = Vec::with_capacity(data.len() + cols.len() * 8);
let row_slice = RowSlice::from_bytes(&data)?;
for col in cols.iter() {
let id = col.get_column_id();
if let Some((start, offset)) = row_slice.search_in_non_null_ids(id)? {
result.write_column_id_datum(id)?;
let v2_datum = &row_slice.values()[start..offset];
let result_offset = result.len();
result.write_v2_as_datum(v2_datum, col)?;
meta_map.insert(
id,
RowColMeta::new(result_offset, result.len() - result_offset),
);
} else if row_slice.search_in_null_ids(id) {
result.write_column_id_datum(id)?;
let result_offset = result.len();
result.write_evaluable_datum_null()?;
meta_map.insert(
id,
RowColMeta::new(result_offset, result.len() - result_offset),
);
} else {
// Otherwise the column does not exist.
}
}
Ok(RowColsDict::new(meta_map, result))
}
/// `cut_idx_key` cuts the encoded index key into RowColsDict and handle .
pub fn cut_idx_key(key: Vec<u8>, col_ids: &[i64]) -> Result<(RowColsDict, Option<i64>)> {
let mut meta_map: HashMap<i64, RowColMeta> =
HashMap::with_capacity_and_hasher(col_ids.len(), Default::default());
let handle = {
let mut tmp_data: &[u8] = &key[PREFIX_LEN + ID_LEN..];
let length = key.len();
// parse cols from data
for &id in col_ids {
let offset = length - tmp_data.len();
let (val, rem) = datum::split_datum(tmp_data, false)?;
meta_map.insert(id, RowColMeta::new(offset, val.len()));
tmp_data = rem;
}
if tmp_data.is_empty() {
None
} else {
Some(tmp_data.read_datum()?.i64())
}
};
Ok((RowColsDict::new(meta_map, key), handle))
}
pub fn generate_index_data_for_test(
table_id: i64,
index_id: i64,
handle: i64,
col_val: &Datum,
unique: bool,
) -> (HashMap<i64, Vec<u8>>, Vec<u8>) {
let indice = vec![(2, (*col_val).clone()), (3, Datum::Dec(handle.into()))];
let mut expect_row = HashMap::default();
let mut v: Vec<_> = indice
.iter()
.map(|&(ref cid, ref value)| {
expect_row.insert(
*cid,
datum::encode_key(&mut EvalContext::default(), &[value.clone()]).unwrap(),
);
value.clone()
})
.collect();
if !unique {
v.push(Datum::I64(handle));
}
let encoded = datum::encode_key(&mut EvalContext::default(), &v).unwrap();
let idx_key = encode_index_seek_key(table_id, index_id, &encoded);
(expect_row, idx_key)
}
#[braneg(test)]
mod tests {
use std::i64;
use einsteindbpb::ColumnInfo;
use crate::codec::datum::{self, Datum};
use EinsteinDB_util::collections::{HashMap, HashSet};
use EinsteinDB_util::map;
use super::*;
const TABLE_ID: i64 = 1;
const INDEX_ID: i64 = 1;
#[test]
fn test_row_key_codec() {
let tests = vec![i64::MIN, i64::MAX, -1, 0, 2, 3, 1024];
for &t in &tests {
let k = encode_row_key(1, t);
assert_eq!(t, decode_int_handle(&k).unwrap());
}
}
#[test]
fn test_index_key_codec() {
let tests = vec![
Datum::U64(1),
Datum::Bytes(b"123".to_vec()),
Datum::I64(-1),
Datum::Dur(Duration::parse(&mut EvalContext::default(), b"12:34:56.666", 2).unwrap()),
];
let mut duration_col = ColumnInfo::default();
duration_col
.as_mut_accessor()
.set_tp(FieldTypeTp::Duration)
.set_decimal(2);
let types = vec![
FieldTypeTp::LongLong.into(),
FieldTypeTp::VarChar.into(),
FieldTypeTp::LongLong.into(),
duration_col,
];
let mut ctx = EvalContext::default();
let buf = datum::encode_key(&mut ctx, &tests).unwrap();
let encoded = encode_index_seek_key(1, 2, &buf);
assert_eq!(tests, decode_index_key(&mut ctx, &encoded, &types).unwrap());
}
fn to_hash_map(row: &RowColsDict) -> HashMap<i64, Vec<u8>> {
let mut data = HashMap::with_capacity_and_hasher(row.cols.len(), Default::default());
if row.is_empty() {
return data;
}
for (key, meta) in &row.cols {
data.insert(
*key,
row.value[meta.offset..(meta.offset + meta.length)].to_vec(),
);
}
data
}
fn cut_row_as_owned(bs: &[u8], col_id_set: &HashSet<i64>) -> HashMap<i64, Vec<u8>> {
let is_empty_row =
col_id_set.is_empty() || bs.is_empty() || (bs.len() == 1 && bs[0] == datum::NIL_FLAG);
let res = if is_empty_row {
RowColsDict::new(HashMap::default(), bs.to_vec())
} else {
cut_row_v1(bs.to_vec(), col_id_set).unwrap()
};
to_hash_map(&res)
}
fn cut_idx_key_as_owned(bs: &[u8], ids: &[i64]) -> (HashMap<i64, Vec<u8>>, Option<i64>) {
let (res, left) = cut_idx_key(bs.to_vec(), ids).unwrap();
(to_hash_map(&res), left)
}
#[test]
fn test_row_codec() {
let mut duration_col = ColumnInfo::default();
duration_col
.as_mut_accessor()
.set_tp(FieldTypeTp::Duration)
.set_decimal(2);
let mut cols = map![
1 => FieldTypeTp::LongLong.into(),
2 => FieldTypeTp::VarChar.into(),
3 => FieldTypeTp::NewDecimal.into(),
5 => FieldTypeTp::JSON.into(),
6 => duration_col
];
let mut row = map![
1 => Datum::I64(100),
2 => Datum::Bytes(b"abc".to_vec()),
3 => Datum::Dec(10.into()),
5 => Datum::Json(r#"{"name": "John"}"#.parse().unwrap()),
6 => Datum::Dur(Duration::parse(&mut EvalContext::default(),b"23:23:23.666",2 ).unwrap())
];
let mut ctx = EvalContext::default();
let col_ids: Vec<_> = row.iter().map(|(&id, _)| id).collect();
let col_values: Vec<_> = row.iter().map(|(_, v)| v.clone()).collect();
let mut col_encoded: HashMap<_, _> = row
.iter()
.map(|(k, v)| {
let f = super::flatten(&mut ctx, v.clone()).unwrap();
(*k, datum::encode_value(&mut ctx, &[f]).unwrap())
})
.collect();
let mut col_id_set: HashSet<_> = col_ids.iter().cloned().collect();
let bs = encode_row(&mut ctx, col_values, &col_ids).unwrap();
assert!(!bs.is_empty());
let mut ctx = EvalContext::default();
let r = decode_row(&mut bs.as_slice(), &mut ctx, &cols).unwrap();
assert_eq!(row, r);
let mut datums: HashMap<_, _>;
datums = cut_row_as_owned(&bs, &col_id_set);
assert_eq!(col_encoded, datums);
cols.insert(4, FieldTypeTp::Float.into());
let r = decode_row(&mut bs.as_slice(), &mut ctx, &cols).unwrap();
assert_eq!(row, r);
col_id_set.insert(4);
datums = cut_row_as_owned(&bs, &col_id_set);
assert_eq!(col_encoded, datums);
cols.remove(&4);
cols.remove(&3);
let r = decode_row(&mut bs.as_slice(), &mut ctx, &cols).unwrap();
row.remove(&3);
assert_eq!(row, r);
col_id_set.remove(&3);
col_id_set.remove(&4);
datums = cut_row_as_owned(&bs, &col_id_set);
col_encoded.remove(&3);
assert_eq!(col_encoded, datums);
let bs = encode_row(&mut ctx, vec![], &[]).unwrap();
assert!(!bs.is_empty());
assert!(decode_row(&mut bs.as_slice(), &mut ctx, &cols)
.unwrap()
.is_empty());
datums = cut_row_as_owned(&bs, &col_id_set);
assert!(datums.is_empty());
}
#[test]
fn test_idx_codec() {
let mut col_ids = vec![1, 2, 3, 4];
let mut duration_col = ColumnInfo::default();
duration_col
.as_mut_accessor()
.set_tp(FieldTypeTp::Duration)
.set_decimal(2);
let col_types = vec![
FieldTypeTp::LongLong.into(),
FieldTypeTp::VarChar.into(),
FieldTypeTp::NewDecimal.into(),
duration_col,
];
let col_values = vec![
Datum::I64(100),
Datum::Bytes(b"abc".to_vec()),
Datum::Dec(10.into()),
Datum::Dur(Duration::parse(&mut EvalContext::default(), b"23:23:23.666", 2).unwrap()),
];
let mut ctx = EvalContext::default();
let mut col_encoded: HashMap<_, _> = col_ids
.iter()
.zip(&col_types)
.zip(&col_values)
.map(|((id, t), v)| {
let unflattened = super::unflatten(&mut ctx, v.clone(), t).unwrap();
let encoded = datum::encode_key(&mut ctx, &[unflattened]).unwrap();
(*id, encoded)
})
.collect();
let key = datum::encode_key(&mut ctx, &col_values).unwrap();
let bs = encode_index_seek_key(1, 1, &key);
assert!(!bs.is_empty());
let mut ctx = EvalContext::default();
let r = decode_index_key(&mut ctx, &bs, &col_types).unwrap();
assert_eq!(col_values, r);
let mut res: (HashMap<_, _>, _) = cut_idx_key_as_owned(&bs, &col_ids);
assert_eq!(col_encoded, res.0);
assert!(res.1.is_none());
let handle_data = col_encoded.remove(&4).unwrap();
let handle = if handle_data.is_empty() {
None
} else {
Some((handle_data.as_ref() as &[u8]).read_datum().unwrap().i64())
};
col_ids.remove(3);
res = cut_idx_key_as_owned(&bs, &col_ids);
assert_eq!(col_encoded, res.0);
assert_eq!(res.1, handle);
let bs = encode_index_seek_key(1, 1, &[]);
assert!(!bs.is_empty());
assert!(decode_index_key(&mut ctx, &bs, &[]).unwrap().is_empty());
res = cut_idx_key_as_owned(&bs, &[]);
assert!(res.0.is_empty());
assert!(res.1.is_none());
}
#[test]
fn test_extract_table_prefix() {
let cases = vec![
(vec![], None),
(b"a\x80\x00\x00\x00\x00\x00\x00\x01".to_vec(), None),
(b"t\x80\x00\x00\x00\x00\x00\x01".to_vec(), None),
(
b"t\x80\x00\x00\x00\x00\x00\x00\x01".to_vec(),
Some(b"t\x80\x00\x00\x00\x00\x00\x00\x01".to_vec()),
),
(
b"t\x80\x00\x00\x00\x00\x00\x00\x01_r\xff\xff".to_vec(),
Some(b"t\x80\x00\x00\x00\x00\x00\x00\x01".to_vec()),
),
];
for (input, output) in cases {
assert_eq!(extract_table_prefix(&input).ok().map(From::from), output);
}
}
#[test]
fn test_check_table_range() {
let small_key = b"t\x80\x00\x00\x00\x00\x00\x00\x01a".to_vec();
let large_key = b"t\x80\x00\x00\x00\x00\x00\x00\x01b".to_vec();
let mut range = KeyRange::default();
range.set_start(small_key.clone());
range.set_end(large_key.clone());
assert!(check_table_ranges(&[range]).is_ok());
//test range.start > range.end
let mut range = KeyRange::default();
range.set_end(small_key.clone());
range.set_start(large_key);
assert!(check_table_ranges(&[range]).is_err());
// test invalid end
let mut range = KeyRange::default();
range.set_start(small_key);
range.set_end(b"xx".to_vec());
assert!(check_table_ranges(&[range]).is_err());
}
#[test]
fn test_decode_table_id() {
let tests = vec![0, 2, 3, 1024, i64::MAX];
for &tid in &tests {
let k = encode_row_key(tid, 1);
assert_eq!(tid, decode_table_id(&k).unwrap());
let k = encode_index_seek_key(tid, 1, &k);
assert_eq!(tid, decode_table_id(&k).unwrap());
assert!(decode_table_id(b"xxx").is_err());
}
}
#[test]
fn test_check_key_type()
|
}
|
{
let record_key = encode_row_key(TABLE_ID, 1);
assert!(check_key_type(&record_key.as_slice(), RECORD_PREFIX_SEP).is_ok());
assert!(check_key_type(&record_key.as_slice(), INDEX_PREFIX_SEP).is_err());
let (_, index_key) =
generate_index_data_for_test(TABLE_ID, INDEX_ID, 1, &Datum::I64(1), true);
assert!(check_key_type(&index_key.as_slice(), RECORD_PREFIX_SEP).is_err());
assert!(check_key_type(&index_key.as_slice(), INDEX_PREFIX_SEP).is_ok());
let too_small_key = vec![0];
assert!(check_key_type(&too_small_key.as_slice(), RECORD_PREFIX_SEP).is_err());
assert!(check_key_type(&too_small_key.as_slice(), INDEX_PREFIX_SEP).is_err());
}
|
operation_ids.rs
|
// Print the operation IDs alphabetically
// cargo run --example operation_ids -- ../azure-rest-api-specs/specification/vmware/resource-manager/Microsoft.AVS/stable/2020-03-20/vmware.json
use autorust_openapi::*;
use std::{
fs::{self},
path::Path,
process::exit,
};
fn main() -> Result<(), Box<dyn std::error::Error>> {
match std::env::args().nth(1) {
None => {
eprintln!("Please pass in the spec path.");
exit(1);
}
Some(file_in) => {
|
let bytes = fs::read(file_in)?;
let api: OpenAPI = serde_json::from_slice(&bytes)?;
let mut operation_ids = Vec::new();
for (_path, item) in &api.paths {
match item {
ReferenceOr::Reference { .. } => (),
ReferenceOr::Item(item) => {
for op in item.operations() {
if let Some(operation_id) = &op.operation_id {
operation_ids.push(operation_id);
}
}
}
}
}
operation_ids.sort();
for operation_id in operation_ids {
println!("{}", operation_id);
}
}
}
Ok(())
}
|
let file_in = Path::new(&file_in);
|
index.ts
|
export { readMigration } from './db/utils/read-files';
export { autoRetryTransaction } from './db/utils/transactions';
export { BusEvent, BusEventDto, EventBus } from './event-bus';
export * from './exceptions';
export * from './interceptors/exts';
|
export { TimeoutInterceptor } from './interceptors/timeout.interceptor';
export { TransformInterceptor } from './interceptors/transform.interceptor';
export { HashKey, HashKeys, HashValue, RedisCacheModule, RedisHashService } from './redis-cache/redis-cache.module';
export * from './utils';
export * as vk from './vk';
|
export { FetchLimiter } from './interceptors/rate-limiter';
|
file.rs
|
use crate::types::primitive::Integer;
use mime::{Mime, APPLICATION_OCTET_STREAM};
use serde::Deserialize;
use std::{fmt, path::Path};
use tokio::{
fs,
io::{AsyncRead, Result as IoResult},
};
use tokio_util::codec::{BytesCodec, FramedRead};
/// File ready to be downloaded
///
/// The file can be downloaded via the link `https://api.telegram.org/file/bot<token>/<file_path>`
/// It is guaranteed that the link will be valid for at least 1 hour
/// When the link expires, a new one can be requested by calling getFile
/// Maximum file size to download is 20 MB
#[derive(Clone, Debug, Deserialize)]
pub struct File {
/// Identifier for this file, which can be used to download or reuse the file
pub file_id: String,
/// Unique identifier for this file
///
/// It is supposed to be the same over time and for different bots.
/// Can't be used to download or reuse the file.
pub file_unique_id: String,
/// File size, if known
pub file_size: Option<Integer>,
/// File path
/// Use `https://api.telegram.org/file/bot<token>/<file_path>` to get the file
pub file_path: Option<String>,
}
/// Information about a file for reader
#[derive(Clone, Debug)]
pub struct InputFileInfo {
pub(crate) name: String,
pub(crate) mime_type: Option<Mime>,
}
impl InputFileInfo {
/// Creates a new info object with given file name
pub fn new<S: Into<String>>(name: S) -> Self {
Self {
name: name.into(),
mime_type: None,
}
}
/// Sets mime type of a file
pub fn mime_type(mut self, mime_type: Mime) -> Self {
self.mime_type = Some(mime_type);
self
}
}
impl From<&str> for InputFileInfo {
fn from(name: &str) -> Self {
InputFileInfo::new(name)
}
}
impl From<(&str, Mime)> for InputFileInfo {
fn from((name, mime_type): (&str, Mime)) -> Self {
InputFileInfo::new(name).mime_type(mime_type)
}
}
impl From<String> for InputFileInfo {
fn from(name: String) -> Self {
InputFileInfo::new(name)
}
}
impl From<(String, Mime)> for InputFileInfo {
fn from((name, mime_type): (String, Mime)) -> Self {
InputFileInfo::new(name).mime_type(mime_type)
}
}
/// File reader to upload
pub struct InputFileReader {
pub(crate) info: Option<InputFileInfo>,
pub(crate) reader: FramedRead<Box<dyn AsyncRead + Send + Sync + Unpin>, BytesCodec>,
}
impl fmt::Debug for InputFileReader {
fn fmt(&self, out: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(out, "InputFileReader(reader: ..., info: {:?})", self.info)
}
}
impl InputFileReader {
/// Creates a new file reader
pub fn new<R>(reader: R) -> Self
where
R: AsyncRead + Send + Sync + Unpin + 'static,
{
InputFileReader {
reader: FramedRead::new(Box::new(reader), BytesCodec::new()),
info: None,
}
}
/// Sets a file info
pub fn info<I: Into<InputFileInfo>>(mut self, info: I) -> Self {
self.info = Some(info.into());
self
}
}
impl<R> From<R> for InputFileReader
where
R: AsyncRead + Send + Sync + Unpin + 'static,
{
fn from(reader: R) -> Self {
InputFileReader::new(reader)
}
}
/// File to upload
#[derive(Debug)]
pub struct InputFile {
pub(crate) kind: InputFileKind,
}
impl InputFile {
/// Send a file_id that exists on the Telegram servers
pub fn file_id<S: Into<String>>(file_id: S) -> Self {
Self {
kind: InputFileKind::Id(file_id.into()),
}
}
/// Send an HTTP URL to get a file from the Internet
///
/// Telegram will download a file from that URL
pub fn url<S: Into<String>>(url: S) -> Self {
Self {
kind: InputFileKind::Url(url.into()),
}
}
/// Path to file in FS (will be uploaded using multipart/form-data)
pub async fn path(path: impl AsRef<Path>) -> IoResult<Self> {
let path = path.as_ref();
let file = fs::File::open(path).await?;
let mut reader = InputFileReader::new(file);
if let Some(file_name) = path.file_name().and_then(|x| x.to_str()) {
let mime_type = path
.extension()
.and_then(|x| x.to_str())
.and_then(|x| mime_guess::from_ext(x).first())
.unwrap_or(APPLICATION_OCTET_STREAM);
reader = reader.info(InputFileInfo::new(file_name).mime_type(mime_type));
}
Ok(Self {
kind: InputFileKind::Reader(reader),
})
}
/// A reader (file will be uploaded using multipart/form-data)
pub fn reader<R: Into<InputFileReader>>(reader: R) -> Self {
Self {
kind: InputFileKind::Reader(reader.into()),
}
}
}
pub(crate) enum InputFileKind {
Id(String),
Url(String),
Reader(InputFileReader),
}
impl fmt::Debug for InputFileKind {
fn fmt(&self, out: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
InputFileKind::Id(ref s) => write!(out, "InputFileKind::Id({:?})", s),
InputFileKind::Url(ref s) => write!(out, "InputFileKind::Url({:?})", s),
InputFileKind::Reader(ref r) => write!(out, "InputFileKind::Reader({:?})", r),
}
}
}
impl From<InputFileReader> for InputFile {
fn from(reader: InputFileReader) -> Self {
Self::reader(reader)
}
}
impl<R> From<R> for InputFile
where
R: AsyncRead + Send + Sync + Unpin + 'static,
{
fn from(reader: R) -> Self {
InputFile::reader(InputFileReader::new(reader))
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::io::Cursor;
#[test]
fn deserialize_file_full() {
let data: File = serde_json::from_value(serde_json::json!({
"file_id": "id",
"file_unique_id": "unique-id",
"file_size": 123,
"file_path": "path"
}))
.unwrap();
assert_eq!(data.file_id, "id");
assert_eq!(data.file_unique_id, "unique-id");
assert_eq!(data.file_size.unwrap(), 123);
assert_eq!(data.file_path.unwrap(), "path");
}
#[test]
fn deserialize_file_partial() {
let data: File = serde_json::from_value(serde_json::json!({
"file_id": "id",
"file_unique_id": "unique-id"
}))
.unwrap();
assert_eq!(data.file_id, "id");
assert_eq!(data.file_unique_id, "unique-id");
assert!(data.file_size.is_none());
assert!(data.file_path.is_none());
}
#[tokio::test]
async fn
|
() {
let id = InputFile::file_id("file-id");
assert_eq!(format!("{:?}", id.kind), r#"InputFileKind::Id("file-id")"#);
let url = InputFile::url("http://example.com/archive.zip");
assert_eq!(
format!("{:?}", url.kind),
r#"InputFileKind::Url("http://example.com/archive.zip")"#
);
// NOTE: you must be sure that file exists in current working directory (usually it exists)
// otherwise test will fail
let path = InputFile::path("LICENSE").await.unwrap();
assert!(format!("{:?}", path.kind).starts_with("InputFileKind::Reader("),);
let reader = InputFileReader::from(Cursor::new(b"data")).info(("name", mime::TEXT_PLAIN));
let reader = InputFile::from(reader);
assert!(format!("{:?}", reader.kind).starts_with("InputFileKind::Reader("));
let reader = InputFile::from(Cursor::new(b"data"));
assert!(format!("{:?}", reader.kind).starts_with("InputFileKind::Reader("));
}
#[test]
fn input_file_info() {
let info = InputFileInfo::from("name");
assert_eq!(info.name, "name");
assert!(info.mime_type.is_none());
let info = InputFileInfo::from(("name", mime::TEXT_PLAIN));
assert_eq!(info.name, "name");
assert_eq!(info.mime_type.unwrap(), mime::TEXT_PLAIN);
let info = InputFileInfo::from(String::from("name"));
assert_eq!(info.name, "name");
assert!(info.mime_type.is_none());
let info = InputFileInfo::from((String::from("name"), mime::TEXT_PLAIN));
assert_eq!(info.name, "name");
assert_eq!(info.mime_type.unwrap(), mime::TEXT_PLAIN);
}
}
|
input_file
|
regionck.rs
|
//! The region check is a final pass that runs over the AST after we have
//! inferred the type constraints but before we have actually finalized
//! the types. Its purpose is to embed a variety of region constraints.
//! Inserting these constraints as a separate pass is good because (1) it
//! localizes the code that has to do with region inference and (2) often
//! we cannot know what constraints are needed until the basic types have
//! been inferred.
//!
//! ### Interaction with the borrow checker
//!
//! In general, the job of the borrowck module (which runs later) is to
//! check that all soundness criteria are met, given a particular set of
//! regions. The job of *this* module is to anticipate the needs of the
//! borrow checker and infer regions that will satisfy its requirements.
//! It is generally true that the inference doesn't need to be sound,
//! meaning that if there is a bug and we inferred bad regions, the borrow
//! checker should catch it. This is not entirely true though; for
//! example, the borrow checker doesn't check subtyping, and it doesn't
//! check that region pointers are always live when they are used. It
//! might be worthwhile to fix this so that borrowck serves as a kind of
//! verification step -- that would add confidence in the overall
//! correctness of the compiler, at the cost of duplicating some type
//! checks and effort.
//!
//! ### Inferring the duration of borrows, automatic and otherwise
//!
//! Whenever we introduce a borrowed pointer, for example as the result of
//! a borrow expression `let x = &data`, the lifetime of the pointer `x`
//! is always specified as a region inference variable. `regionck` has the
//! job of adding constraints such that this inference variable is as
//! narrow as possible while still accommodating all uses (that is, every
//! dereference of the resulting pointer must be within the lifetime).
//!
//! #### Reborrows
//!
//! Generally speaking, `regionck` does NOT try to ensure that the data
//! `data` will outlive the pointer `x`. That is the job of borrowck. The
//! one exception is when "re-borrowing" the contents of another borrowed
//! pointer. For example, imagine you have a borrowed pointer `b` with
//! lifetime `L1` and you have an expression `&*b`. The result of this
//! expression will be another borrowed pointer with lifetime `L2` (which is
//! an inference variable). The borrow checker is going to enforce the
//! constraint that `L2 < L1`, because otherwise you are re-borrowing data
//! for a lifetime larger than the original loan. However, without the
//! routines in this module, the region inferencer would not know of this
//! dependency and thus it might infer the lifetime of `L2` to be greater
//! than `L1` (issue #3148).
//!
//! There are a number of troublesome scenarios in the tests
//! `region-dependent-*.rs`, but here is one example:
//!
//! struct Foo { i: i32 }
//! struct Bar { foo: Foo }
//! fn get_i<'a>(x: &'a Bar) -> &'a i32 {
//! let foo = &x.foo; // Lifetime L1
//! &foo.i // Lifetime L2
//! }
//!
//! Note that this comes up either with `&` expressions, `ref`
//! bindings, and `autorefs`, which are the three ways to introduce
//! a borrow.
//!
//! The key point here is that when you are borrowing a value that
//! is "guaranteed" by a borrowed pointer, you must link the
//! lifetime of that borrowed pointer (`L1`, here) to the lifetime of
//! the borrow itself (`L2`). What do I mean by "guaranteed" by a
//! borrowed pointer? I mean any data that is reached by first
//! dereferencing a borrowed pointer and then either traversing
//! interior offsets or boxes. We say that the guarantor
//! of such data is the region of the borrowed pointer that was
//! traversed. This is essentially the same as the ownership
//! relation, except that a borrowed pointer never owns its
//! contents.
use crate::check::dropck;
use crate::check::FnCtxt;
use crate::middle::mem_categorization as mc;
use crate::middle::mem_categorization::Categorization;
use crate::middle::region;
use rustc::hir::def_id::DefId;
use rustc::infer::outlives::env::OutlivesEnvironment;
use rustc::infer::{self, RegionObligation, SuppressRegionErrors};
use rustc::ty::adjustment;
use rustc::ty::subst::Substs;
use rustc::ty::{self, Ty};
use rustc::hir::intravisit::{self, NestedVisitorMap, Visitor};
use rustc::hir::{self, PatKind};
use rustc_data_structures::sync::Lrc;
use std::mem;
use std::ops::Deref;
use std::rc::Rc;
use syntax::ast;
use syntax_pos::Span;
// a variation on try that just returns unit
macro_rules! ignore_err {
($e:expr) => {
match $e {
Ok(e) => e,
Err(_) => {
debug!("ignoring mem-categorization error!");
return ();
}
}
};
}
///////////////////////////////////////////////////////////////////////////
// PUBLIC ENTRY POINTS
impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> {
pub fn regionck_expr(&self, body: &'gcx hir::Body) {
let subject = self.tcx.hir().body_owner_def_id(body.id());
let id = body.value.hir_id;
let mut rcx = RegionCtxt::new(
self,
RepeatingScope(id),
id,
Subject(subject),
self.param_env,
);
// There are no add'l implied bounds when checking a
// standalone expr (e.g., the `E` in a type like `[u32; E]`).
rcx.outlives_environment.save_implied_bounds(id);
if self.err_count_since_creation() == 0 {
// regionck assumes typeck succeeded
rcx.visit_body(body);
rcx.visit_region_obligations(id);
}
rcx.resolve_regions_and_report_errors(SuppressRegionErrors::when_nll_is_enabled(self.tcx));
assert!(self.tables.borrow().free_region_map.is_empty());
self.tables.borrow_mut().free_region_map = rcx.outlives_environment.into_free_region_map();
}
/// Region checking during the WF phase for items. `wf_tys` are the
/// types from which we should derive implied bounds, if any.
pub fn regionck_item(&self, item_id: hir::HirId, span: Span, wf_tys: &[Ty<'tcx>]) {
debug!("regionck_item(item.id={:?}, wf_tys={:?})", item_id, wf_tys);
let subject = self.tcx.hir().local_def_id_from_hir_id(item_id);
let mut rcx = RegionCtxt::new(
self,
RepeatingScope(item_id),
item_id,
Subject(subject),
self.param_env,
);
rcx.outlives_environment
.add_implied_bounds(self, wf_tys, item_id, span);
rcx.outlives_environment.save_implied_bounds(item_id);
rcx.visit_region_obligations(item_id);
rcx.resolve_regions_and_report_errors(SuppressRegionErrors::default());
}
/// Region check a function body. Not invoked on closures, but
/// only on the "root" fn item (in which closures may be
/// embedded). Walks the function body and adds various add'l
/// constraints that are needed for region inference. This is
/// separated both to isolate "pure" region constraints from the
/// rest of type check and because sometimes we need type
/// inference to have completed before we can determine which
/// constraints to add.
pub fn regionck_fn(&self, fn_id: ast::NodeId, body: &'gcx hir::Body) {
debug!("regionck_fn(id={})", fn_id);
let subject = self.tcx.hir().body_owner_def_id(body.id());
let hir_id = body.value.hir_id;
let mut rcx = RegionCtxt::new(
self,
RepeatingScope(hir_id),
hir_id,
Subject(subject),
self.param_env,
);
if self.err_count_since_creation() == 0 {
let fn_hir_id = self.tcx.hir().node_to_hir_id(fn_id);
// regionck assumes typeck succeeded
rcx.visit_fn_body(fn_hir_id, body, self.tcx.hir().span_by_hir_id(fn_hir_id));
}
rcx.resolve_regions_and_report_errors(SuppressRegionErrors::when_nll_is_enabled(self.tcx));
// In this mode, we also copy the free-region-map into the
// tables of the enclosing fcx. In the other regionck modes
// (e.g., `regionck_item`), we don't have an enclosing tables.
assert!(self.tables.borrow().free_region_map.is_empty());
self.tables.borrow_mut().free_region_map = rcx.outlives_environment.into_free_region_map();
}
}
///////////////////////////////////////////////////////////////////////////
// INTERNALS
pub struct RegionCtxt<'a, 'gcx: 'a + 'tcx, 'tcx: 'a> {
pub fcx: &'a FnCtxt<'a, 'gcx, 'tcx>,
pub region_scope_tree: Lrc<region::ScopeTree>,
outlives_environment: OutlivesEnvironment<'tcx>,
// id of innermost fn body id
body_id: hir::HirId,
// call_site scope of innermost fn
call_site_scope: Option<region::Scope>,
// id of innermost fn or loop
repeating_scope: hir::HirId,
// id of AST node being analyzed (the subject of the analysis).
subject_def_id: DefId,
}
impl<'a, 'gcx, 'tcx> Deref for RegionCtxt<'a, 'gcx, 'tcx> {
type Target = FnCtxt<'a, 'gcx, 'tcx>;
fn deref(&self) -> &Self::Target {
&self.fcx
}
}
pub struct RepeatingScope(hir::HirId);
pub struct Subject(DefId);
impl<'a, 'gcx, 'tcx> RegionCtxt<'a, 'gcx, 'tcx> {
pub fn new(
fcx: &'a FnCtxt<'a, 'gcx, 'tcx>,
RepeatingScope(initial_repeating_scope): RepeatingScope,
initial_body_id: hir::HirId,
Subject(subject): Subject,
param_env: ty::ParamEnv<'tcx>,
) -> RegionCtxt<'a, 'gcx, 'tcx> {
let region_scope_tree = fcx.tcx.region_scope_tree(subject);
let outlives_environment = OutlivesEnvironment::new(param_env);
RegionCtxt {
fcx,
region_scope_tree,
repeating_scope: initial_repeating_scope,
body_id: initial_body_id,
call_site_scope: None,
subject_def_id: subject,
outlives_environment,
}
}
fn set_repeating_scope(&mut self, scope: hir::HirId) -> hir::HirId {
mem::replace(&mut self.repeating_scope, scope)
}
/// Try to resolve the type for the given node, returning `t_err` if an error results. Note that
/// we never care about the details of the error, the same error will be detected and reported
/// in the writeback phase.
///
/// Note one important point: we do not attempt to resolve *region variables* here. This is
/// because regionck is essentially adding constraints to those region variables and so may yet
/// influence how they are resolved.
///
/// Consider this silly example:
///
/// ```
/// fn borrow(x: &i32) -> &i32 {x}
/// fn foo(x: @i32) -> i32 { // block: B
/// let b = borrow(x); // region: <R0>
/// *b
/// }
/// ```
///
/// Here, the region of `b` will be `<R0>`. `<R0>` is constrained to be some subregion of the
/// block B and some superregion of the call. If we forced it now, we'd choose the smaller
/// region (the call). But that would make the *b illegal. Since we don't resolve, the type
/// of b will be `&<R0>.i32` and then `*b` will require that `<R0>` be bigger than the let and
/// the `*b` expression, so we will effectively resolve `<R0>` to be the block B.
pub fn resolve_type(&self, unresolved_ty: Ty<'tcx>) -> Ty<'tcx> {
self.resolve_type_vars_if_possible(&unresolved_ty)
}
/// Try to resolve the type for the given node.
fn resolve_node_type(&self, id: hir::HirId) -> Ty<'tcx> {
let t = self.node_ty(id);
self.resolve_type(t)
}
/// Try to resolve the type for the given node.
pub fn resolve_expr_type_adjusted(&mut self, expr: &hir::Expr) -> Ty<'tcx> {
let ty = self.tables.borrow().expr_ty_adjusted(expr);
self.resolve_type(ty)
}
/// This is the "main" function when region-checking a function item or a closure
/// within a function item. It begins by updating various fields (e.g., `call_site_scope`
/// and `outlives_environment`) to be appropriate to the function and then adds constraints
/// derived from the function body.
///
/// Note that it does **not** restore the state of the fields that
/// it updates! This is intentional, since -- for the main
/// function -- we wish to be able to read the final
/// `outlives_environment` and other fields from the caller. For
/// closures, however, we save and restore any "scoped state"
/// before we invoke this function. (See `visit_fn` in the
/// `intravisit::Visitor` impl below.)
fn visit_fn_body(
&mut self,
id: hir::HirId, // the id of the fn itself
body: &'gcx hir::Body,
span: Span,
) {
// When we enter a function, we can derive
debug!("visit_fn_body(id={:?})", id);
let body_id = body.id();
self.body_id = body_id.hir_id;
let call_site = region::Scope {
id: body.value.hir_id.local_id,
data: region::ScopeData::CallSite,
};
self.call_site_scope = Some(call_site);
let fn_sig = {
match self.tables.borrow().liberated_fn_sigs().get(id) {
Some(f) => f.clone(),
None => {
bug!("No fn-sig entry for id={:?}", id);
}
}
};
// Collect the types from which we create inferred bounds.
// For the return type, if diverging, substitute `bool` just
// because it will have no effect.
//
// FIXME(#27579) return types should not be implied bounds
let fn_sig_tys: Vec<_> = fn_sig
.inputs()
.iter()
.cloned()
.chain(Some(fn_sig.output()))
.collect();
self.outlives_environment.add_implied_bounds(
self.fcx,
&fn_sig_tys[..],
body_id.hir_id,
span,
);
self.outlives_environment
.save_implied_bounds(body_id.hir_id);
self.link_fn_args(
region::Scope {
id: body.value.hir_id.local_id,
data: region::ScopeData::Node,
},
&body.arguments,
);
self.visit_body(body);
self.visit_region_obligations(body_id.hir_id);
let call_site_scope = self.call_site_scope.unwrap();
debug!(
"visit_fn_body body.id {:?} call_site_scope: {:?}",
body.id(),
call_site_scope
);
let call_site_region = self.tcx.mk_region(ty::ReScope(call_site_scope));
self.type_of_node_must_outlive(infer::CallReturn(span), body_id.hir_id, call_site_region);
self.constrain_opaque_types(
&self.fcx.opaque_types.borrow(),
self.outlives_environment.free_region_map(),
);
}
fn visit_region_obligations(&mut self, hir_id: hir::HirId) {
debug!("visit_region_obligations: hir_id={:?}", hir_id);
// region checking can introduce new pending obligations
// which, when processed, might generate new region
// obligations. So make sure we process those.
self.select_all_obligations_or_error();
}
fn resolve_regions_and_report_errors(&self, suppress: SuppressRegionErrors) {
self.infcx.process_registered_region_obligations(
self.outlives_environment.region_bound_pairs_map(),
self.implicit_region_bound,
self.param_env,
);
self.fcx.resolve_regions_and_report_errors(
self.subject_def_id,
&self.region_scope_tree,
&self.outlives_environment,
suppress,
);
}
fn constrain_bindings_in_pat(&mut self, pat: &hir::Pat) {
debug!("regionck::visit_pat(pat={:?})", pat);
pat.each_binding(|_, hir_id, span, _| {
// If we have a variable that contains region'd data, that
// data will be accessible from anywhere that the variable is
// accessed. We must be wary of loops like this:
//
// // from src/test/compile-fail/borrowck-lend-flow.rs
// let mut v = box 3, w = box 4;
// let mut x = &mut w;
// loop {
// **x += 1; // (2)
// borrow(v); //~ ERROR cannot borrow
// x = &mut v; // (1)
// }
//
// Typically, we try to determine the region of a borrow from
// those points where it is dereferenced. In this case, one
// might imagine that the lifetime of `x` need only be the
// body of the loop. But of course this is incorrect because
// the pointer that is created at point (1) is consumed at
// point (2), meaning that it must be live across the loop
// iteration. The easiest way to guarantee this is to require
// that the lifetime of any regions that appear in a
// variable's type enclose at least the variable's scope.
let var_scope = self.region_scope_tree.var_scope(hir_id.local_id);
let var_region = self.tcx.mk_region(ty::ReScope(var_scope));
let origin = infer::BindingTypeIsNotValidAtDecl(span);
self.type_of_node_must_outlive(origin, hir_id, var_region);
let typ = self.resolve_node_type(hir_id);
let body_id = self.body_id;
let _ = dropck::check_safety_of_destructor_if_necessary(
self, typ, span, body_id, var_scope,
);
})
}
}
impl<'a, 'gcx, 'tcx> Visitor<'gcx> for RegionCtxt<'a, 'gcx, 'tcx> {
// (..) FIXME(#3238) should use visit_pat, not visit_arm/visit_local,
// However, right now we run into an issue whereby some free
// regions are not properly related if they appear within the
// types of arguments that must be inferred. This could be
// addressed by deferring the construction of the region
// hierarchy, and in particular the relationships between free
// regions, until regionck, as described in #3238.
fn nested_visit_map<'this>(&'this mut self) -> NestedVisitorMap<'this, 'gcx> {
NestedVisitorMap::None
}
fn visit_fn(
&mut self,
fk: intravisit::FnKind<'gcx>,
_: &'gcx hir::FnDecl,
body_id: hir::BodyId,
span: Span,
id: ast::NodeId,
) {
assert!(
match fk {
intravisit::FnKind::Closure(..) => true,
_ => false,
},
"visit_fn invoked for something other than a closure"
);
// Save state of current function before invoking
// `visit_fn_body`. We will restore afterwards.
let old_body_id = self.body_id;
let old_call_site_scope = self.call_site_scope;
let env_snapshot = self.outlives_environment.push_snapshot_pre_closure();
let body = self.tcx.hir().body(body_id);
let hir_id = self.tcx.hir().node_to_hir_id(id);
self.visit_fn_body(hir_id, body, span);
// Restore state from previous function.
self.outlives_environment
.pop_snapshot_post_closure(env_snapshot);
self.call_site_scope = old_call_site_scope;
self.body_id = old_body_id;
}
//visit_pat: visit_pat, // (..) see above
fn visit_arm(&mut self, arm: &'gcx hir::Arm) {
// see above
for p in &arm.pats {
self.constrain_bindings_in_pat(p);
}
intravisit::walk_arm(self, arm);
}
fn visit_local(&mut self, l: &'gcx hir::Local) {
// see above
self.constrain_bindings_in_pat(&l.pat);
self.link_local(l);
intravisit::walk_local(self, l);
}
fn visit_expr(&mut self, expr: &'gcx hir::Expr) {
debug!(
"regionck::visit_expr(e={:?}, repeating_scope={:?})",
expr, self.repeating_scope
);
// No matter what, the type of each expression must outlive the
// scope of that expression. This also guarantees basic WF.
let expr_ty = self.resolve_node_type(expr.hir_id);
// the region corresponding to this expression
let expr_region = self.tcx.mk_region(ty::ReScope(region::Scope {
id: expr.hir_id.local_id,
data: region::ScopeData::Node,
}));
self.type_must_outlive(
infer::ExprTypeIsNotInScope(expr_ty, expr.span),
expr_ty,
expr_region,
);
let is_method_call = self.tables.borrow().is_method_call(expr);
// If we are calling a method (either explicitly or via an
// overloaded operator), check that all of the types provided as
// arguments for its type parameters are well-formed, and all the regions
// provided as arguments outlive the call.
if is_method_call {
let origin = match expr.node {
hir::ExprKind::MethodCall(..) => infer::ParameterOrigin::MethodCall,
hir::ExprKind::Unary(op, _) if op == hir::UnDeref => {
infer::ParameterOrigin::OverloadedDeref
}
_ => infer::ParameterOrigin::OverloadedOperator,
};
let substs = self.tables.borrow().node_substs(expr.hir_id);
self.substs_wf_in_scope(origin, substs, expr.span, expr_region);
// Arguments (sub-expressions) are checked via `constrain_call`, below.
}
// Check any autoderefs or autorefs that appear.
let cmt_result = self.constrain_adjustments(expr);
// If necessary, constrain destructors in this expression. This will be
// the adjusted form if there is an adjustment.
match cmt_result {
Ok(head_cmt) => {
self.check_safety_of_rvalue_destructor_if_necessary(&head_cmt, expr.span);
}
Err(..) => {
self.tcx.sess.delay_span_bug(expr.span, "cat_expr Errd");
}
}
debug!(
"regionck::visit_expr(e={:?}, repeating_scope={:?}) - visiting subexprs",
expr, self.repeating_scope
);
match expr.node {
hir::ExprKind::Path(_) => {
let substs = self.tables.borrow().node_substs(expr.hir_id);
let origin = infer::ParameterOrigin::Path;
self.substs_wf_in_scope(origin, substs, expr.span, expr_region);
}
hir::ExprKind::Call(ref callee, ref args) => {
if is_method_call {
self.constrain_call(expr, Some(&callee), args.iter().map(|e| &*e));
} else {
self.constrain_callee(&callee);
self.constrain_call(expr, None, args.iter().map(|e| &*e));
}
intravisit::walk_expr(self, expr);
}
hir::ExprKind::MethodCall(.., ref args) => {
self.constrain_call(expr, Some(&args[0]), args[1..].iter().map(|e| &*e));
intravisit::walk_expr(self, expr);
}
hir::ExprKind::AssignOp(_, ref lhs, ref rhs) => {
if is_method_call {
self.constrain_call(expr, Some(&lhs), Some(&**rhs).into_iter());
}
intravisit::walk_expr(self, expr);
}
hir::ExprKind::Index(ref lhs, ref rhs) if is_method_call => {
self.constrain_call(expr, Some(&lhs), Some(&**rhs).into_iter());
intravisit::walk_expr(self, expr);
}
hir::ExprKind::Binary(_, ref lhs, ref rhs) if is_method_call => {
// As `ExprKind::MethodCall`, but the call is via an overloaded op.
self.constrain_call(expr, Some(&lhs), Some(&**rhs).into_iter());
intravisit::walk_expr(self, expr);
}
hir::ExprKind::Binary(_, ref lhs, ref rhs) => {
// If you do `x OP y`, then the types of `x` and `y` must
// outlive the operation you are performing.
let lhs_ty = self.resolve_expr_type_adjusted(&lhs);
let rhs_ty = self.resolve_expr_type_adjusted(&rhs);
for &ty in &[lhs_ty, rhs_ty] {
self.type_must_outlive(infer::Operand(expr.span), ty, expr_region);
}
intravisit::walk_expr(self, expr);
}
hir::ExprKind::Unary(hir::UnDeref, ref base) => {
// For *a, the lifetime of a must enclose the deref
if is_method_call {
self.constrain_call(expr, Some(base), None::<hir::Expr>.iter());
}
// For overloaded derefs, base_ty is the input to `Deref::deref`,
// but it's a reference type uing the same region as the output.
let base_ty = self.resolve_expr_type_adjusted(base);
if let ty::Ref(r_ptr, _, _) = base_ty.sty {
self.mk_subregion_due_to_dereference(expr.span, expr_region, r_ptr);
}
intravisit::walk_expr(self, expr);
}
hir::ExprKind::Unary(_, ref lhs) if is_method_call => {
// As above.
self.constrain_call(expr, Some(&lhs), None::<hir::Expr>.iter());
intravisit::walk_expr(self, expr);
}
hir::ExprKind::Index(ref vec_expr, _) => {
// For a[b], the lifetime of a must enclose the deref
let vec_type = self.resolve_expr_type_adjusted(&vec_expr);
self.constrain_index(expr, vec_type);
intravisit::walk_expr(self, expr);
}
hir::ExprKind::Cast(ref source, _) => {
// Determine if we are casting `source` to a trait
// instance. If so, we have to be sure that the type of
// the source obeys the trait's region bound.
self.constrain_cast(expr, &source);
intravisit::walk_expr(self, expr);
}
hir::ExprKind::AddrOf(m, ref base) => {
self.link_addr_of(expr, m, &base);
// Require that when you write a `&expr` expression, the
// resulting pointer has a lifetime that encompasses the
// `&expr` expression itself. Note that we constraining
// the type of the node expr.id here *before applying
// adjustments*.
//
// FIXME(https://github.com/rust-lang/rfcs/issues/811)
// nested method calls requires that this rule change
let ty0 = self.resolve_node_type(expr.hir_id);
self.type_must_outlive(infer::AddrOf(expr.span), ty0, expr_region);
intravisit::walk_expr(self, expr);
}
hir::ExprKind::Match(ref discr, ref arms, _) => {
self.link_match(&discr, &arms[..]);
intravisit::walk_expr(self, expr);
}
hir::ExprKind::Closure(.., body_id, _, _) => {
self.check_expr_fn_block(expr, body_id);
}
hir::ExprKind::Loop(ref body, _, _) => {
let repeating_scope = self.set_repeating_scope(body.hir_id);
intravisit::walk_expr(self, expr);
self.set_repeating_scope(repeating_scope);
}
hir::ExprKind::While(ref cond, ref body, _) => {
let repeating_scope = self.set_repeating_scope(cond.hir_id);
self.visit_expr(&cond);
self.set_repeating_scope(body.hir_id);
self.visit_block(&body);
self.set_repeating_scope(repeating_scope);
}
hir::ExprKind::Ret(Some(ref ret_expr)) => {
let call_site_scope = self.call_site_scope;
debug!(
"visit_expr ExprKind::Ret ret_expr.id {} call_site_scope: {:?}",
ret_expr.id, call_site_scope
);
let call_site_region = self.tcx.mk_region(ty::ReScope(call_site_scope.unwrap()));
self.type_of_node_must_outlive(
infer::CallReturn(ret_expr.span),
ret_expr.hir_id,
call_site_region,
);
intravisit::walk_expr(self, expr);
}
_ => {
intravisit::walk_expr(self, expr);
}
}
}
}
impl<'a, 'gcx, 'tcx> RegionCtxt<'a, 'gcx, 'tcx> {
fn constrain_cast(&mut self, cast_expr: &hir::Expr, source_expr: &hir::Expr) {
debug!(
"constrain_cast(cast_expr={:?}, source_expr={:?})",
cast_expr, source_expr
);
let source_ty = self.resolve_node_type(source_expr.hir_id);
let target_ty = self.resolve_node_type(cast_expr.hir_id);
self.walk_cast(cast_expr, source_ty, target_ty);
}
fn walk_cast(&mut self, cast_expr: &hir::Expr, from_ty: Ty<'tcx>, to_ty: Ty<'tcx>) {
debug!("walk_cast(from_ty={:?}, to_ty={:?})", from_ty, to_ty);
match (&from_ty.sty, &to_ty.sty) {
/*From:*/
(&ty::Ref(from_r, from_ty, _), /*To: */ &ty::Ref(to_r, to_ty, _)) => {
// Target cannot outlive source, naturally.
self.sub_regions(infer::Reborrow(cast_expr.span), to_r, from_r);
self.walk_cast(cast_expr, from_ty, to_ty);
}
/*From:*/
(_, /*To: */ &ty::Dynamic(.., r)) => {
// When T is existentially quantified as a trait
// `Foo+'to`, it must outlive the region bound `'to`.
self.type_must_outlive(infer::RelateObjectBound(cast_expr.span), from_ty, r);
}
/*From:*/
(&ty::Adt(from_def, _), /*To: */ &ty::Adt(to_def, _))
if from_def.is_box() && to_def.is_box() =>
{
self.walk_cast(cast_expr, from_ty.boxed_ty(), to_ty.boxed_ty());
}
_ => {}
}
}
fn check_expr_fn_block(&mut self, expr: &'gcx hir::Expr, body_id: hir::BodyId) {
let repeating_scope = self.set_repeating_scope(body_id.hir_id);
intravisit::walk_expr(self, expr);
self.set_repeating_scope(repeating_scope);
}
fn constrain_callee(&mut self, callee_expr: &hir::Expr) {
let callee_ty = self.resolve_node_type(callee_expr.hir_id);
match callee_ty.sty {
ty::FnDef(..) | ty::FnPtr(_) => {}
_ => {
// this should not happen, but it does if the program is
// erroneous
//
// bug!(
// callee_expr.span,
// "Calling non-function: {}",
// callee_ty);
}
}
}
fn constrain_call<'b, I: Iterator<Item = &'b hir::Expr>>(
&mut self,
call_expr: &hir::Expr,
receiver: Option<&hir::Expr>,
arg_exprs: I,
) {
//! Invoked on every call site (i.e., normal calls, method calls,
//! and overloaded operators). Constrains the regions which appear
//! in the type of the function. Also constrains the regions that
//! appear in the arguments appropriately.
debug!(
"constrain_call(call_expr={:?}, receiver={:?})",
call_expr, receiver
);
// `callee_region` is the scope representing the time in which the
// call occurs.
//
// FIXME(#6268) to support nested method calls, should be callee_id
let callee_scope = region::Scope {
id: call_expr.hir_id.local_id,
data: region::ScopeData::Node,
};
let callee_region = self.tcx.mk_region(ty::ReScope(callee_scope));
debug!("callee_region={:?}", callee_region);
for arg_expr in arg_exprs {
debug!("Argument: {:?}", arg_expr);
// ensure that any regions appearing in the argument type are
// valid for at least the lifetime of the function:
self.type_of_node_must_outlive(
infer::CallArg(arg_expr.span),
arg_expr.hir_id,
callee_region,
);
}
// as loop above, but for receiver
if let Some(r) = receiver {
debug!("receiver: {:?}", r);
self.type_of_node_must_outlive(infer::CallRcvr(r.span), r.hir_id, callee_region);
}
}
/// Creates a temporary `MemCategorizationContext` and pass it to the closure.
fn with_mc<F, R>(&self, f: F) -> R
where
F: for<'b> FnOnce(mc::MemCategorizationContext<'b, 'gcx, 'tcx>) -> R,
{
f(mc::MemCategorizationContext::with_infer(
&self.infcx,
&self.region_scope_tree,
&self.tables.borrow(),
))
}
/// Invoked on any adjustments that occur. Checks that if this is a region pointer being
/// dereferenced, the lifetime of the pointer includes the deref expr.
fn constrain_adjustments(&mut self, expr: &hir::Expr) -> mc::McResult<mc::cmt_<'tcx>> {
debug!("constrain_adjustments(expr={:?})", expr);
let mut cmt = self.with_mc(|mc| mc.cat_expr_unadjusted(expr))?;
let tables = self.tables.borrow();
let adjustments = tables.expr_adjustments(&expr);
if adjustments.is_empty() {
return Ok(cmt);
}
debug!("constrain_adjustments: adjustments={:?}", adjustments);
// If necessary, constrain destructors in the unadjusted form of this
// expression.
self.check_safety_of_rvalue_destructor_if_necessary(&cmt, expr.span);
let expr_region = self.tcx.mk_region(ty::ReScope(region::Scope {
id: expr.hir_id.local_id,
data: region::ScopeData::Node,
}));
for adjustment in adjustments {
debug!(
"constrain_adjustments: adjustment={:?}, cmt={:?}",
adjustment, cmt
);
if let adjustment::Adjust::Deref(Some(deref)) = adjustment.kind {
debug!("constrain_adjustments: overloaded deref: {:?}", deref);
// Treat overloaded autoderefs as if an AutoBorrow adjustment
// was applied on the base type, as that is always the case.
let input = self.tcx.mk_ref(
deref.region,
ty::TypeAndMut {
ty: cmt.ty,
mutbl: deref.mutbl,
},
);
let output = self.tcx.mk_ref(
deref.region,
ty::TypeAndMut {
ty: adjustment.target,
mutbl: deref.mutbl,
},
);
self.link_region(
expr.span,
deref.region,
ty::BorrowKind::from_mutbl(deref.mutbl),
&cmt,
);
// Specialized version of constrain_call.
self.type_must_outlive(infer::CallRcvr(expr.span), input, expr_region);
self.type_must_outlive(infer::CallReturn(expr.span), output, expr_region);
}
if let adjustment::Adjust::Borrow(ref autoref) = adjustment.kind {
self.link_autoref(expr, &cmt, autoref);
// Require that the resulting region encompasses
// the current node.
//
// FIXME(#6268) remove to support nested method calls
self.type_of_node_must_outlive(
infer::AutoBorrow(expr.span),
expr.hir_id,
expr_region,
);
}
cmt = self.with_mc(|mc| mc.cat_expr_adjusted(expr, cmt, &adjustment))?;
if let Categorization::Deref(_, mc::BorrowedPtr(_, r_ptr)) = cmt.cat {
self.mk_subregion_due_to_dereference(expr.span, expr_region, r_ptr);
}
}
Ok(cmt)
}
pub fn mk_subregion_due_to_dereference(
&mut self,
deref_span: Span,
minimum_lifetime: ty::Region<'tcx>,
maximum_lifetime: ty::Region<'tcx>,
) {
self.sub_regions(
infer::DerefPointer(deref_span),
minimum_lifetime,
maximum_lifetime,
)
}
fn check_safety_of_rvalue_destructor_if_necessary(&mut self, cmt: &mc::cmt_<'tcx>, span: Span) {
if let Categorization::Rvalue(region) = cmt.cat {
match *region {
ty::ReScope(rvalue_scope) => {
let typ = self.resolve_type(cmt.ty);
let body_id = self.body_id;
let _ = dropck::check_safety_of_destructor_if_necessary(
self,
typ,
span,
body_id,
rvalue_scope,
);
}
ty::ReStatic => {}
_ => {
span_bug!(
span,
"unexpected rvalue region in rvalue \
destructor safety checking: `{:?}`",
region
);
}
}
}
}
/// Invoked on any index expression that occurs. Checks that if this is a slice
/// being indexed, the lifetime of the pointer includes the deref expr.
fn constrain_index(&mut self, index_expr: &hir::Expr, indexed_ty: Ty<'tcx>) {
debug!(
"constrain_index(index_expr=?, indexed_ty={}",
self.ty_to_string(indexed_ty)
);
let r_index_expr = ty::ReScope(region::Scope {
id: index_expr.hir_id.local_id,
data: region::ScopeData::Node,
});
if let ty::Ref(r_ptr, r_ty, _) = indexed_ty.sty {
match r_ty.sty {
ty::Slice(_) | ty::Str => {
self.sub_regions(
infer::IndexSlice(index_expr.span),
self.tcx.mk_region(r_index_expr),
r_ptr,
);
}
_ => {}
}
}
}
/// Guarantees that any lifetimes which appear in the type of the node `id` (after applying
/// adjustments) are valid for at least `minimum_lifetime`
fn type_of_node_must_outlive(
&mut self,
origin: infer::SubregionOrigin<'tcx>,
hir_id: hir::HirId,
minimum_lifetime: ty::Region<'tcx>,
) {
// Try to resolve the type. If we encounter an error, then typeck
// is going to fail anyway, so just stop here and let typeck
// report errors later on in the writeback phase.
let ty0 = self.resolve_node_type(hir_id);
let ty = self.tables
.borrow()
.adjustments()
.get(hir_id)
.and_then(|adj| adj.last())
.map_or(ty0, |adj| adj.target);
let ty = self.resolve_type(ty);
debug!(
"constrain_regions_in_type_of_node(\
ty={}, ty0={}, id={:?}, minimum_lifetime={:?})",
ty, ty0, hir_id, minimum_lifetime
);
self.type_must_outlive(origin, ty, minimum_lifetime);
}
/// Adds constraints to inference such that `T: 'a` holds (or
/// reports an error if it cannot).
///
/// # Parameters
///
/// - `origin`, the reason we need this constraint
/// - `ty`, the type `T`
/// - `region`, the region `'a`
pub fn type_must_outlive(
&self,
origin: infer::SubregionOrigin<'tcx>,
ty: Ty<'tcx>,
region: ty::Region<'tcx>,
) {
self.infcx.register_region_obligation(
self.body_id,
RegionObligation {
sub_region: region,
sup_type: ty,
origin,
},
);
}
/// Computes the guarantor for an expression `&base` and then ensures that the lifetime of the
/// resulting pointer is linked to the lifetime of its guarantor (if any).
fn link_addr_of(&mut self, expr: &hir::Expr, mutability: hir::Mutability, base: &hir::Expr) {
debug!("link_addr_of(expr={:?}, base={:?})", expr, base);
let cmt = ignore_err!(self.with_mc(|mc| mc.cat_expr(base)));
debug!("link_addr_of: cmt={:?}", cmt);
self.link_region_from_node_type(expr.span, expr.hir_id, mutability, &cmt);
}
/// Computes the guarantors for any ref bindings in a `let` and
/// then ensures that the lifetime of the resulting pointer is
/// linked to the lifetime of the initialization expression.
fn link_local(&self, local: &hir::Local) {
debug!("regionck::for_local()");
let init_expr = match local.init {
None => {
return;
}
Some(ref expr) => &**expr,
};
let discr_cmt = Rc::new(ignore_err!(self.with_mc(|mc| mc.cat_expr(init_expr))));
self.link_pattern(discr_cmt, &local.pat);
}
/// Computes the guarantors for any ref bindings in a match and
/// then ensures that the lifetime of the resulting pointer is
/// linked to the lifetime of its guarantor (if any).
fn link_match(&self, discr: &hir::Expr, arms: &[hir::Arm]) {
debug!("regionck::for_match()");
let discr_cmt = Rc::new(ignore_err!(self.with_mc(|mc| mc.cat_expr(discr))));
debug!("discr_cmt={:?}", discr_cmt);
for arm in arms {
for root_pat in &arm.pats {
self.link_pattern(discr_cmt.clone(), &root_pat);
}
}
}
/// Computes the guarantors for any ref bindings in a match and
/// then ensures that the lifetime of the resulting pointer is
/// linked to the lifetime of its guarantor (if any).
fn link_fn_args(&self, body_scope: region::Scope, args: &[hir::Arg]) {
debug!("regionck::link_fn_args(body_scope={:?})", body_scope);
for arg in args {
let arg_ty = self.node_ty(arg.hir_id);
let re_scope = self.tcx.mk_region(ty::ReScope(body_scope));
let arg_cmt = self.with_mc(|mc| {
Rc::new(mc.cat_rvalue(arg.hir_id, arg.pat.span, re_scope, arg_ty))
});
debug!("arg_ty={:?} arg_cmt={:?} arg={:?}", arg_ty, arg_cmt, arg);
self.link_pattern(arg_cmt, &arg.pat);
}
}
/// Link lifetimes of any ref bindings in `root_pat` to the pointers found
/// in the discriminant, if needed.
fn link_pattern(&self, discr_cmt: mc::cmt<'tcx>, root_pat: &hir::Pat) {
debug!(
"link_pattern(discr_cmt={:?}, root_pat={:?})",
discr_cmt, root_pat
);
ignore_err!(self.with_mc(|mc| {
mc.cat_pattern(discr_cmt, root_pat, |sub_cmt, sub_pat| {
// `ref x` pattern
if let PatKind::Binding(..) = sub_pat.node {
if let Some(&bm) = mc.tables.pat_binding_modes().get(sub_pat.hir_id) {
if let ty::BindByReference(mutbl) = bm {
self.link_region_from_node_type(
sub_pat.span,
sub_pat.hir_id,
mutbl,
&sub_cmt,
);
}
} else {
self.tcx
.sess
.delay_span_bug(sub_pat.span, "missing binding mode");
}
}
})
}));
}
/// Link lifetime of borrowed pointer resulting from autoref to lifetimes in the value being
/// autoref'd.
fn link_autoref(
&self,
expr: &hir::Expr,
expr_cmt: &mc::cmt_<'tcx>,
autoref: &adjustment::AutoBorrow<'tcx>,
) {
debug!(
"link_autoref(autoref={:?}, expr_cmt={:?})",
autoref, expr_cmt
);
match *autoref {
adjustment::AutoBorrow::Ref(r, m) => {
self.link_region(expr.span, r, ty::BorrowKind::from_mutbl(m.into()), expr_cmt);
}
adjustment::AutoBorrow::RawPtr(m) => {
let r = self.tcx.mk_region(ty::ReScope(region::Scope {
id: expr.hir_id.local_id,
data: region::ScopeData::Node,
}));
self.link_region(expr.span, r, ty::BorrowKind::from_mutbl(m), expr_cmt);
}
}
}
/// Like `link_region()`, except that the region is extracted from the type of `id`,
/// which must be some reference (`&T`, `&str`, etc).
fn link_region_from_node_type(
&self,
span: Span,
id: hir::HirId,
mutbl: hir::Mutability,
cmt_borrowed: &mc::cmt_<'tcx>,
) {
debug!(
"link_region_from_node_type(id={:?}, mutbl={:?}, cmt_borrowed={:?})",
id, mutbl, cmt_borrowed
);
let rptr_ty = self.resolve_node_type(id);
if let ty::Ref(r, _, _) = rptr_ty.sty {
debug!("rptr_ty={}", rptr_ty);
self.link_region(span, r, ty::BorrowKind::from_mutbl(mutbl), cmt_borrowed);
}
}
/// Informs the inference engine that `borrow_cmt` is being borrowed with
/// kind `borrow_kind` and lifetime `borrow_region`.
/// In order to ensure borrowck is satisfied, this may create constraints
/// between regions, as explained in `link_reborrowed_region()`.
fn link_region(
&self,
span: Span,
borrow_region: ty::Region<'tcx>,
borrow_kind: ty::BorrowKind,
borrow_cmt: &mc::cmt_<'tcx>,
) {
let origin = infer::DataBorrowed(borrow_cmt.ty, span);
self.type_must_outlive(origin, borrow_cmt.ty, borrow_region);
let mut borrow_kind = borrow_kind;
let mut borrow_cmt_cat = borrow_cmt.cat.clone();
loop {
debug!(
"link_region(borrow_region={:?}, borrow_kind={:?}, borrow_cmt={:?})",
borrow_region, borrow_kind, borrow_cmt
);
match borrow_cmt_cat {
Categorization::Deref(ref_cmt, mc::BorrowedPtr(ref_kind, ref_region)) => {
match self.link_reborrowed_region(
span,
borrow_region,
borrow_kind,
ref_cmt,
ref_region,
ref_kind,
borrow_cmt.note,
) {
Some((c, k)) => {
borrow_cmt_cat = c.cat.clone();
borrow_kind = k;
}
None => {
return;
}
}
}
Categorization::Downcast(cmt_base, _)
| Categorization::Deref(cmt_base, mc::Unique)
| Categorization::Interior(cmt_base, _) => {
// Borrowing interior or owned data requires the base
// to be valid and borrowable in the same fashion.
borrow_cmt_cat = cmt_base.cat.clone();
borrow_kind = borrow_kind;
}
Categorization::Deref(_, mc::UnsafePtr(..))
| Categorization::StaticItem
| Categorization::Upvar(..)
| Categorization::Local(..)
| Categorization::ThreadLocal(..)
| Categorization::Rvalue(..) => {
// These are all "base cases" with independent lifetimes
// that are not subject to inference
return;
}
}
}
}
/// This is the most complicated case: the path being borrowed is
/// itself the referent of a borrowed pointer. Let me give an
/// example fragment of code to make clear(er) the situation:
///
/// let r: &'a mut T = ...; // the original reference "r" has lifetime 'a
/// ...
/// &'z *r // the reborrow has lifetime 'z
///
/// Now, in this case, our primary job is to add the inference
/// constraint that `'z <= 'a`. Given this setup, let's clarify the
/// parameters in (roughly) terms of the example:
///
/// ```plain,ignore (pseudo-Rust)
/// A borrow of: `& 'z bk * r` where `r` has type `& 'a bk T`
/// borrow_region ^~ ref_region ^~
/// borrow_kind ^~ ref_kind ^~
/// ref_cmt ^
/// ```
///
/// Here `bk` stands for some borrow-kind (e.g., `mut`, `uniq`, etc).
///
/// Unfortunately, there are some complications beyond the simple
/// scenario I just painted:
///
/// 1. The reference `r` might in fact be a "by-ref" upvar. In that
/// case, we have two jobs. First, we are inferring whether this reference
/// should be an `&T`, `&mut T`, or `&uniq T` reference, and we must
/// adjust that based on this borrow (e.g., if this is an `&mut` borrow,
/// then `r` must be an `&mut` reference). Second, whenever we link
/// two regions (here, `'z <= 'a`), we supply a *cause*, and in this
/// case we adjust the cause to indicate that the reference being
/// "reborrowed" is itself an upvar. This provides a nicer error message
/// should something go wrong.
///
/// 2. There may in fact be more levels of reborrowing. In the
/// example, I said the borrow was like `&'z *r`, but it might
/// in fact be a borrow like `&'z **q` where `q` has type `&'a
/// &'b mut T`. In that case, we want to ensure that `'z <= 'a`
/// and `'z <= 'b`. This is explained more below.
///
/// The return value of this function indicates whether we need to
/// recurse and process `ref_cmt` (see case 2 above).
fn link_reborrowed_region(
&self,
span: Span,
borrow_region: ty::Region<'tcx>,
borrow_kind: ty::BorrowKind,
ref_cmt: mc::cmt<'tcx>,
ref_region: ty::Region<'tcx>,
mut ref_kind: ty::BorrowKind,
note: mc::Note,
) -> Option<(mc::cmt<'tcx>, ty::BorrowKind)> {
// Possible upvar ID we may need later to create an entry in the
// maybe link map.
// Detect by-ref upvar `x`:
let cause = match note {
mc::NoteUpvarRef(ref upvar_id) => {
match self.tables.borrow().upvar_capture_map.get(upvar_id) {
Some(&ty::UpvarCapture::ByRef(ref upvar_borrow)) => {
// The mutability of the upvar may have been modified
// by the above adjustment, so update our local variable.
ref_kind = upvar_borrow.kind;
infer::ReborrowUpvar(span, *upvar_id)
}
_ => {
span_bug!(span, "Illegal upvar id: {:?}", upvar_id);
}
}
}
mc::NoteClosureEnv(ref upvar_id) => {
// We don't have any mutability changes to propagate, but
// we do want to note that an upvar reborrow caused this
// link
infer::ReborrowUpvar(span, *upvar_id)
}
_ => infer::Reborrow(span),
};
debug!(
"link_reborrowed_region: {:?} <= {:?}",
borrow_region, ref_region
);
self.sub_regions(cause, borrow_region, ref_region);
// If we end up needing to recurse and establish a region link
// with `ref_cmt`, calculate what borrow kind we will end up
// needing. This will be used below.
//
// One interesting twist is that we can weaken the borrow kind
// when we recurse: to reborrow an `&mut` referent as mutable,
// borrowck requires a unique path to the `&mut` reference but not
// necessarily a *mutable* path.
let new_borrow_kind = match borrow_kind {
ty::ImmBorrow => ty::ImmBorrow,
ty::MutBorrow | ty::UniqueImmBorrow => ty::UniqueImmBorrow,
};
// Decide whether we need to recurse and link any regions within
// the `ref_cmt`. This is concerned for the case where the value
// being reborrowed is in fact a borrowed pointer found within
// another borrowed pointer. For example:
//
// let p: &'b &'a mut T = ...;
// ...
// &'z **p
//
// What makes this case particularly tricky is that, if the data
// being borrowed is a `&mut` or `&uniq` borrow, borrowck requires
// not only that `'z <= 'a`, (as before) but also `'z <= 'b`
// (otherwise the user might mutate through the `&mut T` reference
// after `'b` expires and invalidate the borrow we are looking at
// now).
//
// So let's re-examine our parameters in light of this more
// complicated (possible) scenario:
//
// A borrow of: `& 'z bk * * p` where `p` has type `&'b bk & 'a bk T`
// borrow_region ^~ ref_region ^~
// borrow_kind ^~ ref_kind ^~
// ref_cmt ^~~
//
// (Note that since we have not examined `ref_cmt.cat`, we don't
// know whether this scenario has occurred; but I wanted to show
// how all the types get adjusted.)
match ref_kind {
ty::ImmBorrow =>
|
ty::MutBorrow | ty::UniqueImmBorrow => {
// The reference being reborrowed is either an `&mut T` or
// `&uniq T`. This is the case where recursion is needed.
return Some((ref_cmt, new_borrow_kind));
}
}
}
/// Checks that the values provided for type/region arguments in a given
/// expression are well-formed and in-scope.
fn substs_wf_in_scope(
&mut self,
origin: infer::ParameterOrigin,
substs: &Substs<'tcx>,
expr_span: Span,
expr_region: ty::Region<'tcx>,
) {
debug!(
"substs_wf_in_scope(substs={:?}, \
expr_region={:?}, \
origin={:?}, \
expr_span={:?})",
substs, expr_region, origin, expr_span
);
let origin = infer::ParameterInScope(origin, expr_span);
for region in substs.regions() {
self.sub_regions(origin.clone(), expr_region, region);
}
for ty in substs.types() {
let ty = self.resolve_type(ty);
self.type_must_outlive(origin.clone(), ty, expr_region);
}
}
}
|
{
// The reference being reborrowed is a shareable ref of
// type `&'a T`. In this case, it doesn't matter where we
// *found* the `&T` pointer, the memory it references will
// be valid and immutable for `'a`. So we can stop here.
//
// (Note that the `borrow_kind` must also be ImmBorrow or
// else the user is borrowed imm memory as mut memory,
// which means they'll get an error downstream in borrowck
// anyhow.)
return None;
}
|
control_rfc3542_unix.go
|
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build darwin dragonfly freebsd linux netbsd openbsd
package ipv6
import (
"syscall"
"unsafe"
"bosun.org/_third_party/golang.org/x/net/internal/iana"
)
func marshalTrafficClass(b []byte, cm *ControlMessage) []byte {
m := (*syscall.Cmsghdr)(unsafe.Pointer(&b[0]))
m.Level = iana.ProtocolIPv6
m.Type = sysIPV6_TCLASS
m.SetLen(syscall.CmsgLen(4))
if cm != nil {
data := b[syscall.CmsgLen(0):]
// TODO(mikio): fix potential misaligned memory access
*(*int32)(unsafe.Pointer(&data[:4][0])) = int32(cm.TrafficClass)
}
return b[syscall.CmsgSpace(4):]
}
func parseTrafficClass(cm *ControlMessage, b []byte) {
// TODO(mikio): fix potential misaligned memory access
cm.TrafficClass = int(*(*int32)(unsafe.Pointer(&b[:4][0])))
}
func marshalHopLimit(b []byte, cm *ControlMessage) []byte {
m := (*syscall.Cmsghdr)(unsafe.Pointer(&b[0]))
m.Level = iana.ProtocolIPv6
m.Type = sysIPV6_HOPLIMIT
m.SetLen(syscall.CmsgLen(4))
if cm != nil {
data := b[syscall.CmsgLen(0):]
// TODO(mikio): fix potential misaligned memory access
*(*int32)(unsafe.Pointer(&data[:4][0])) = int32(cm.HopLimit)
}
return b[syscall.CmsgSpace(4):]
}
func parseHopLimit(cm *ControlMessage, b []byte) {
// TODO(mikio): fix potential misaligned memory access
cm.HopLimit = int(*(*int32)(unsafe.Pointer(&b[:4][0])))
}
func marshalPacketInfo(b []byte, cm *ControlMessage) []byte {
m := (*syscall.Cmsghdr)(unsafe.Pointer(&b[0]))
m.Level = iana.ProtocolIPv6
m.Type = sysIPV6_PKTINFO
m.SetLen(syscall.CmsgLen(sysSizeofInet6Pktinfo))
if cm != nil {
pi := (*sysInet6Pktinfo)(unsafe.Pointer(&b[syscall.CmsgLen(0)]))
if ip := cm.Src.To16(); ip != nil && ip.To4() == nil {
copy(pi.Addr[:], ip)
}
if cm.IfIndex > 0 {
pi.setIfindex(cm.IfIndex)
}
}
return b[syscall.CmsgSpace(sysSizeofInet6Pktinfo):]
}
func parsePacketInfo(cm *ControlMessage, b []byte) {
pi := (*sysInet6Pktinfo)(unsafe.Pointer(&b[0]))
cm.Dst = pi.Addr[:]
cm.IfIndex = int(pi.Ifindex)
}
func marshalNextHop(b []byte, cm *ControlMessage) []byte {
m := (*syscall.Cmsghdr)(unsafe.Pointer(&b[0]))
m.Level = iana.ProtocolIPv6
m.Type = sysIPV6_NEXTHOP
m.SetLen(syscall.CmsgLen(sysSizeofSockaddrInet6))
if cm != nil
|
return b[syscall.CmsgSpace(sysSizeofSockaddrInet6):]
}
func parseNextHop(cm *ControlMessage, b []byte) {
}
func marshalPathMTU(b []byte, cm *ControlMessage) []byte {
m := (*syscall.Cmsghdr)(unsafe.Pointer(&b[0]))
m.Level = iana.ProtocolIPv6
m.Type = sysIPV6_PATHMTU
m.SetLen(syscall.CmsgLen(sysSizeofIPv6Mtuinfo))
return b[syscall.CmsgSpace(sysSizeofIPv6Mtuinfo):]
}
func parsePathMTU(cm *ControlMessage, b []byte) {
mi := (*sysIPv6Mtuinfo)(unsafe.Pointer(&b[0]))
cm.Dst = mi.Addr.Addr[:]
cm.IfIndex = int(mi.Addr.Scope_id)
cm.MTU = int(mi.Mtu)
}
|
{
sa := (*sysSockaddrInet6)(unsafe.Pointer(&b[syscall.CmsgLen(0)]))
sa.setSockaddr(cm.NextHop, cm.IfIndex)
}
|
bbbp.py
|
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
import time
import datetime
import os
import sys
import h5py
from pathlib import Path
import pandas as pd
import matplotlib.pyplot as plt
import evidential_deep_learning as edl
from .util import normalize, gallery
class BBBP:
def __init__(self, model, opts, dataset="", learning_rate=1e-3, tag="", custom_plot_folder="", custom_best_results_dat_folder=""):
self.loss_function = edl.losses.MSE
self.model = model
self.optimizer = tf.optimizers.Adam(learning_rate)
self.min_rmse = float('inf')
self.min_nll = float('inf')
self.min_vloss = float('inf')
trainer = self.__class__.__name__
current_time = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
self.save_dir = os.path.join('save','{}_{}_{}_{}'.format(current_time, dataset, trainer, tag))
Path(self.save_dir).mkdir(parents=True, exist_ok=True)
self.custom_plot_folder = custom_plot_folder
Path(self.custom_plot_folder).mkdir(parents=True, exist_ok=True)
self.custom_best_results_dat_folder = custom_best_results_dat_folder
Path(self.custom_best_results_dat_folder).mkdir(parents=True, exist_ok=True)
train_log_dir = os.path.join('logs', '{}_{}_{}_{}_train'.format(current_time, dataset, trainer, tag))
self.train_summary_writer = tf.summary.create_file_writer(train_log_dir)
val_log_dir = os.path.join('logs', '{}_{}_{}_{}_val'.format(current_time, dataset, trainer, tag))
self.val_summary_writer = tf.summary.create_file_writer(val_log_dir)
@tf.function
def run_train_step(self, x, y):
with tf.GradientTape() as tape:
y_hat = self.model(x, training=True) #forward pass
loss = self.loss_function(y, y_hat)
loss += tf.reduce_mean(self.model.losses)
grads = tape.gradient(loss, self.model.variables) #compute gradient
self.optimizer.apply_gradients(zip(grads, self.model.variables))
return loss, y_hat
@tf.function
def evaluate(self, x, y):
preds = tf.stack([self.model(x, training=True) for _ in range(5)], axis=0) #forward pass
mu, var = tf.nn.moments(preds, axes=0)
rmse = edl.losses.RMSE(y, mu)
nll = edl.losses.Gaussian_NLL(y, mu, tf.sqrt(var))
loss = self.loss_function(y, mu)
return mu, var, loss, rmse, nll
@tf.function
def siyan_bbbp_evaluate(self, x_input):
preds = tf.stack([self.model(x_input, training=True) for _ in range(15)], axis=0) # forward pass
mean_mu = tf.reduce_mean(preds, axis=0)
epistemic = tf.math.reduce_std(preds, axis=0)
return mean_mu, epistemic
def save_train_summary(self, loss, x, y, y_hat):
with self.train_summary_writer.as_default():
# tf.summary.scalar('loss', tf.reduce_mean(loss), step=self.iter)
tf.summary.scalar('mse', tf.reduce_mean(edl.losses.MSE(y, y_hat)), step=self.iter)
idx = np.random.choice(int(tf.shape(x)[0]), 9)
if tf.shape(x).shape==4:
tf.summary.image("x", [gallery(tf.gather(x,idx).numpy())], max_outputs=1, step=self.iter)
if tf.shape(y).shape==4:
tf.summary.image("y", [gallery(tf.gather(y,idx).numpy())], max_outputs=1, step=self.iter)
tf.summary.image("y_hat", [gallery(tf.gather(y_hat,idx).numpy())], max_outputs=1, step=self.iter)
def save_val_summary(self, loss, x, y, mu, var):
with self.val_summary_writer.as_default():
tf.summary.scalar('loss', tf.reduce_mean(self.loss_function(y, mu)), step=self.iter)
tf.summary.scalar('mse', tf.reduce_mean(edl.losses.MSE(y, mu)), step=self.iter)
idx = np.random.choice(int(tf.shape(x)[0]), 9)
if tf.shape(x).shape==4:
tf.summary.image("x", [gallery(tf.gather(x,idx).numpy())], max_outputs=1, step=self.iter)
if tf.shape(y).shape==4:
tf.summary.image("y", [gallery(tf.gather(y,idx).numpy())], max_outputs=1, step=self.iter)
tf.summary.image("y_hat", [gallery(tf.gather(mu,idx).numpy())], max_outputs=1, step=self.iter)
tf.summary.image("y_var", [gallery(normalize(tf.gather(var,idx)).numpy())], max_outputs=1, step=self.iter)
def get_batch(self, x, y, batch_size):
idx = np.random.choice(x.shape[0], batch_size, replace=False)
if isinstance(x, tf.Tensor):
x_ = x[idx,...]
y_ = y[idx,...]
elif isinstance(x, np.ndarray) or isinstance(x, h5py.Dataset):
idx = np.sort(idx)
x_ = x[idx,...]
y_ = y[idx,...]
x_divisor = 255. if x_.dtype == np.uint8 else 1.0
y_divisor = 255. if y_.dtype == np.uint8 else 1.0
x_ = tf.convert_to_tensor(x_/x_divisor, tf.float32)
y_ = tf.convert_to_tensor(y_/y_divisor, tf.float32)
else:
print("unknown dataset type {} {}".format(type(x), type(y)))
return x_, y_
def save(self, name):
self.model.save(os.path.join(self.save_dir, "{}.h5".format(name)))
# pass
''' Siyan added for testing plot '''
def plot_scatter_with_var_from_pandas(self, x_train, y_train, x_test, y_test, mu, var, path, n_stds=3, test_bounds=[[-7, +7]], show=True):
plt.scatter(x_train, y_train, s=1., c='#463c3c', zorder=0, label='Train (x_train vs y_train)')
for k in np.linspace(0, n_stds, 4):
if k == 0:
plt.fill_between(x_test[:, 0], (mu - k * var), (mu + k * var), alpha=0.3, edgecolor=None,
facecolor='#00aeef', linewidth=0, antialiased=True, zorder=1, label='Unc.')
else:
plt.fill_between(x_test[:, 0], (mu - k * var), (mu + k * var), alpha=0.3, edgecolor=None,
facecolor='#00aeef', linewidth=0, antialiased=True, zorder=1)
plt.plot(x_test, y_test, 'r--', zorder=2, label='True (x_test vs y_test)')
plt.plot(x_test, mu, color='#007cab', zorder=3, label='Pred (x_test vs mu)')
plt.gca().set_xlim(*test_bounds)
plt.gca().set_ylim(-150, 150)
plt.title(path)
plt.legend()
plt.savefig(path, transparent=True)
if show:
plt.show()
plt.clf()
def
|
(self, x_train, y_train, x_test, y_test, x_valid, y_valid, y_scale, batch_size=128, iters=10000, verbose=True):
''' Siyan added START '''
# np.random.seed(1234)
test_eval_count = 0
valid_eval_count = 0
test_best_iter_str = '0'
valid_best_iter_str = '0'
x_valid_input = tf.convert_to_tensor(x_valid, tf.float32)
y_valid_input = tf.convert_to_tensor(y_valid, tf.float32)
tic = time.time()
for self.iter in range(iters):
x_input_batch, y_input_batch = self.get_batch(x_train, y_train, batch_size)
loss, y_hat = self.run_train_step(x_input_batch, y_input_batch)
if self.iter % 10 == 0:
self.save_train_summary(loss, x_input_batch, y_input_batch, y_hat)
if self.iter % 100 == 0:
x_test_batch, y_test_batch = self.get_batch(x_test, y_test, min(100, x_test.shape[0]))
mu, var, vloss, rmse, nll = self.evaluate(x_test_batch, y_test_batch)
nll += np.log(y_scale[0,0])
rmse *= y_scale[0,0]
self.save_val_summary(vloss, x_test_batch, y_test_batch, mu, var)
if rmse.numpy() < self.min_rmse:
self.min_rmse = rmse.numpy()
print("SAVING")
self.save("model_rmse")
if nll.numpy() < self.min_nll:
self.min_nll = nll.numpy()
self.save("model_nll")
if vloss.numpy() < self.min_vloss:
self.min_vloss = vloss.numpy()
self.save("model_vloss")
# if verbose: print("[{}] \t RMSE: {:.4f} \t NLL: {:.4f} \t train_loss: {:.4f} \t t: {:.2f} sec".format(self.iter, self.min_rmse, self.min_nll, vloss, time.time()-tic))
# tic = time.time()
''' Siyan Test START --- (for entire test data instead of batch of test data)'''
if self.iter % 10 == 0:
x_test_input = tf.convert_to_tensor(x_test, tf.float32)
y_test_input = tf.convert_to_tensor(y_test, tf.float32)
test_mu, test_var, test_vloss, test_rmse, test_nll = self.evaluate(x_test_input, y_test_input)
if test_eval_count == 0:
tmp_test_loss = test_vloss
else:
if test_vloss < tmp_test_loss:
tmp_test_loss = test_vloss
print("[{}] Test loss: {:.6f} \t RMSE: {:.4f} \t NLL: {:.4f} ".
format(self.iter, test_vloss, test_rmse.numpy(), test_nll.numpy()))
test_mean_mu, test_epistemic = self.siyan_bbbp_evaluate(x_test_input)
### update the DataFrame
test_results_df = pd.DataFrame({
'test_x': list(x_test.flatten()),
'test_y': list(y_test.flatten()),
'test_mean_mu': list(test_mean_mu.numpy().flatten()),
'test_epistemic': list(test_epistemic.numpy().flatten())
# 'test_mu': list(test_mu.numpy().flatten()),
# 'test_var': list(test_var.numpy().flatten())
})
test_best_iter_str = str(self.iter)
test_eval_count += 1
''' Siyan Test END --- (for entire test data instead of batch of test data)'''
''' Siyan Test START --- (for entire validation data instead of batch of validation data)'''
valid_mu, valid_var, valid_vloss, valid_rmse, valid_nll = self.evaluate(x_valid_input, y_valid_input)
if valid_eval_count == 0:
tmp_valid_loss = valid_vloss
else:
if valid_vloss < tmp_valid_loss:
tmp_valid_loss = valid_vloss
print("[{}] Validation loss: {:.6f} \t RMSE: {:.4f} \t NLL: {:.4f} ".
format(self.iter, valid_vloss, valid_rmse.numpy(), valid_nll.numpy()))
valid_mean_mu, valid_epistemic = self.siyan_bbbp_evaluate(x_valid_input)
### update the DataFrame
valid_results_df = pd.DataFrame({
'valid_x': list(x_valid.flatten()),
'valid_y': list(y_valid.flatten()),
'valid_mean_mu': list(valid_mean_mu.numpy().flatten()),
'valid_epistemic': list(valid_epistemic.numpy().flatten())
# 'valid_mean_var': list(valid_mean_var.numpy().flatten()),
# 'valid_reduce_std_mu': list(valid_reduce_std_mu.numpy().flatten()),
# 'valid_reduce_mean_var': list(valid_reduce_mean_var.numpy().flatten())
})
valid_best_iter_str = str(self.iter)
valid_eval_count += 1
''' Siyan Test END --- (for entire validation data instead of batch of validation data)'''
test_results_df.to_csv(self.custom_best_results_dat_folder + "/best_test_results_iter_"+test_best_iter_str+".dat", sep=" ")
print('--- Saved testing results to '+self.custom_best_results_dat_folder+'/best_test_results_iter_'+test_best_iter_str+".dat")
valid_results_df.to_csv(self.custom_best_results_dat_folder + "/best_valid_results_iter_"+valid_best_iter_str+".dat", sep=" ")
print('--- Saved validation results to '+self.custom_best_results_dat_folder +'/best_valid_results_iter_'+valid_best_iter_str+".dat")
''' Test/validation results calculation and plotting:
(1) Best train results; (2) Best validation results. And compare to the original results
'''
load_test_df = pd.read_csv(self.custom_best_results_dat_folder + "/best_test_results_iter_"+test_best_iter_str+".dat", sep=" ")
load_valid_df = pd.read_csv(self.custom_best_results_dat_folder + "/best_valid_results_iter_"+valid_best_iter_str+".dat", sep=" ")
valid_bounds = [[-7, +7]]
self.plot_scatter_with_var_from_pandas(x_train, y_train, x_valid, y_valid,
load_test_df['test_mean_mu'].values, load_test_df['test_epistemic'].values, path=self.custom_plot_folder+"/test_plot.pdf", n_stds=3, test_bounds=valid_bounds, show=True)
self.plot_scatter_with_var_from_pandas(x_train, y_train, x_valid, y_valid,
load_valid_df['valid_mean_mu'].values, load_valid_df['valid_epistemic'], path=self.custom_plot_folder+"/valid_plot.pdf", n_stds=3, test_bounds=valid_bounds, show=True)
return self.model, self.min_rmse, self.min_nll
|
train
|
model.js
|
/**
* Applica (www.applicadoit.com).
* User: bimbobruno
* Date: 2/21/13
* Time: 1:25 PM
* Applica
*/
define(["framework/core", "framework/ui"], function(core, ui) {
var exports = {};
var AjaxService = core.AObject.extend({
ctor: function() {
this.data = {};
this.url = null;
this.method = "GET";
this.dataType = "json";
//this.contentType = "application/json";
},
load: function() {
var self = this;
$.ajax({
type: this.method,
traditional: true,
url: this.url,
data: this.data,
contentType: this.contentType,
dataType: this.dataType,
success: function(response) {
self.onSuccess(response);
},
error: function() {
self.onError(msg.MSG_LOAD_ERROR);
}
});
},
onSuccess: function(response) {
var self = this;
if(response.error) {
self.invoke("error", response.message);
return;
}
var data = response.value;
self.invoke("load", data);
},
onError: function(error) {
this.invoke("error", error);
}
});
var AfterActionExecutor = core.AObject.extend({
ctor: function(command) {
AfterActionExecutor.super.ctor.call(this);
this.command = command;
this.commands = {
redirect: function(value) {
location.href = BASE + value;
},
command: function(value) {
ui.CommandsManager.instance().invoke(value);
}
};
},
execute: function() {
if(!core.utils.stringIsNullOrEmpty(this.command)) {
var split = this.command.split(":");
if(!split.size == 2) {
return;
}
var command = split[0];
var value = split[1];
var fn = this.commands[command];
if($.isFunction(fn)) {
fn(value);
}
}
}
});
var FormService = core.AObject.extend({
ctor: function() {
FormService.super.ctor.call(this);
this.method = "GET";
this.url = null;
this.data = {};
this.identifier = null;
this.title = null;
},
load: function() {
var self = this;
if(!this.url) throw "FormService.load(): url is needed";
self.element = null;
$.ajax({
type: this.method,
url: this.url,
data: this.data,
dataType: 'json',
success: function(response) {
if(response.error) {
self.invoke("error", response.message);
}
else {
self.element = _E("div").html(response.content);
self.title = response.title;
self.invoke("load", self.element);
}
},
error: function() {
self.invoke("error", "generic error");
}
});
},
performAction: function(data) {
var self = this;
var url = this.action;
var options = {
method: 'POST',
url: url,
data: data
};
$.ajax({
type: options.method,
url: options.url,
data: options.data,
dataType: "json",
success: function(response) {
if(response.error) {
self.invoke("error", response.message);
} else if(!response.valid) {
self.invoke("validationError", response.validationResult);
} else {
self.invoke("complete");
if(response.after) {
var executor = new AfterActionExecutor(response.after);
executor.execute();
}
}
},
error: function() {
self.invoke("error", "generic error");
}
});
},
save: function(data) {
var self = this;
if(!this.identifier) {
throw "Please specify an identifier in formService to save";
}
var url = BASE + "crud/form/" + this.identifier + "/save";
var options = {
method: 'POST',
url: url,
data: data
};
$.ajax({
type: options.method,
url: options.url,
data: options.data,
dataType: "json",
success: function(response) {
if(response.error) {
self.invoke("error", response.message);
} else if(!response.valid) {
self.invoke("validationError", response.validationResult);
} else {
self.invoke("save");
}
},
error: function() {
self.invoke("error", "generic error");
}
});
}
});
var GridService = core.AObject.extend({
ctor: function() {
this.loadRequest = {
filters: [],
page: 1,
sorts: null
};
this.method = "GET";
this.url = null;
this.data = {};
this.sort = [];
this.element = null;
this.formIdentifier = null;
this.identifier = null;
this.title = null;
this.searchFormIncluded = false;
},
load: function(opts) {
var self = this;
if(!this.url) throw "GridService.load(): url is needed";
self.element = null;
$.ajax({
type: this.method,
url: this.url,
data: $.extend(this.data, { loadRequest: JSON.stringify(this.loadRequest) }),
dataType: "json",
success: function(response) {
self.element = null;
if(response.error) {
self.invoke("error", response.message);
} else {
self.element = _E("div").html(response.content);
self.formIdentifier = response.formIdentifier;
self.title = response.title;
self.searchFormIncluded = response.searchFormIncluded;
self.invoke("load", self.element);
}
},
error: function() {
self.invoke("error", "generic error");
}
});
},
remove: function(ids) {
if(!this.identifier) {
throw "Please specify an identifier in gridService to remove";
}
if(!ids || ids.length == 0) {
return;
}
var url = BASE + "crud/grid/" + this.identifier + "/delete";
var self = this;
$.ajax({
type: "POST",
url: url,
data: { ids: ids.join() },
dataType: "json",
success: function(response) {
if(response.error) {
self.invoke("error", response.message);
} else {
self.invoke("remove", ids);
}
},
error: function() {
self.invoke("error", "generic error");
}
});
},
|
reload: function() {
this.load(this.options);
},
setPage: function(page) {
this.loadRequest.page = page;
},
nextPage: function() {
this.loadRequest.page++;
},
previousPage: function() {
this.loadRequest.page--;
if(this.loadRequest.page <= 0) {
this.loadRequest.page = 1;
}
},
setSort: function(property, descending) {
this.loadRequest.sorts = [{
property: property,
descending: descending
}];
},
setFilters: function(filters) {
this.loadRequest.filters = filters;
this.loadRequest.page = 1;
}
});
var DateInterval = core.AObject.extend({
ctor: function() {
DateInterval.super.ctor.call(this);
this.from = 0;
this.to = 0;
},
set_to: function(to) {
this.to = to;
this.check();
},
set_from: function(from) {
this.from = from;
this.check();
},
initLastHour: function() {
this.to = new Date().getTime();
this.from = this.to - 60 * 60 * 1000;
this.invoke("to_change");
this.invoke("from_change");
},
initLastDay: function() {
this.initTo(new Date().getTime(), 1);
},
initLastWeek: function() {
var now = new Date();
this.initTo(now, now.getDay());
},
initFrom: function(from, days) {
this.from = from;
var dateTo = new Date();
dateTo.setTime(this.from);
dateTo.setDate(dateTo.getDate() + days);
this.to = dateTo.getTime();
this.invoke("to_change");
this.invoke("from_change");
},
initTo: function(to, days) {
this.to = to;
var dateFrom = new Date();
dateFrom.setTime(this.to);
dateFrom.setDate(dateFrom.getDate() - days);
this.from = dateFrom.getTime();
this.invoke("to_change");
this.invoke("from_change");
},
subFrom: function(days) {
var dateFrom = new Date();
dateFrom.setTime(this.from);
dateFrom.setDate(dateFrom.getDate() - days);
this.from = dateFrom.getTime();
this.check();
},
addFrom: function(days) {
var dateFrom = new Date();
dateFrom.setTime(this.from);
dateFrom.setDate(dateFrom.getDate() + days);
this.from = dateFrom.getTime();
this.check();
},
subTo: function(days) {
var dateTo = new Date();
dateTo.setTime(this.to);
dateTo.setDate(dateTo.getDate() - days);
this.to = dateTo.getTime();
this.check();
},
addTo: function(days) {
var dateTo = new Date();
dateTo.setTime(this.to);
dateTo.setDate(dateTo.getDate() + days);
this.to = dateTo.getTime();
this.check();
},
check: function() {
if(!this.from || !this.to) { return; }
if(this.from > this.to) {
throw "Error normalizing dates: from is greater than to";
}
},
normalize: function() {
if(this.from) {
var dateFrom = new Date();
dateFrom.setTime(this.from);
dateFrom.setHours(0);
dateFrom.setMinutes(0);
dateFrom.setSeconds(0);
dateFrom.setMilliseconds(0);
this.set("from", dateFrom.getTime());
}
if(this.to) {
var dateTo = new Date();
dateTo.setTime(this.to);
dateTo.setHours(23);
dateTo.setMinutes(59);
dateTo.setSeconds(59);
dateTo.setMilliseconds(999);
this.set("to", dateTo.getTime());
}
}
});
exports.AjaxService = AjaxService;
exports.GridService = GridService;
exports.FormService = FormService;
exports.DateInterval = DateInterval;
return exports;
});
| |
file_test.go
|
package file
import (
"fmt"
"os"
"path/filepath"
"strings"
"testing"
"time"
"github.com/asim/go-micro/v3/store"
"github.com/davecgh/go-spew/spew"
"github.com/kr/pretty"
)
func cleanup(db string, s store.Store) {
s.Close()
dir := filepath.Join(DefaultDir, db+"/")
|
func TestFileStoreReInit(t *testing.T) {
s := NewStore(store.Table("aaa"))
defer cleanup(DefaultDatabase, s)
s.Init(store.Table("bbb"))
if s.Options().Table != "bbb" {
t.Error("Init didn't reinitialise the store")
}
}
func TestFileStoreBasic(t *testing.T) {
s := NewStore()
defer cleanup(DefaultDatabase, s)
fileTest(s, t)
}
func TestFileStoreTable(t *testing.T) {
s := NewStore(store.Table("testTable"))
defer cleanup(DefaultDatabase, s)
fileTest(s, t)
}
func TestFileStoreDatabase(t *testing.T) {
s := NewStore(store.Database("testdb"))
defer cleanup("testdb", s)
fileTest(s, t)
}
func TestFileStoreDatabaseTable(t *testing.T) {
s := NewStore(store.Table("testTable"), store.Database("testdb"))
defer cleanup("testdb", s)
fileTest(s, t)
}
func fileTest(s store.Store, t *testing.T) {
if len(os.Getenv("IN_TRAVIS_CI")) == 0 {
t.Logf("Options %s %v\n", s.String(), s.Options())
}
// Read and Write an expiring Record
if err := s.Write(&store.Record{
Key: "Hello",
Value: []byte("World"),
Expiry: time.Millisecond * 150,
}); err != nil {
t.Error(err)
}
if r, err := s.Read("Hello"); err != nil {
t.Fatal(err)
} else {
if len(r) != 1 {
t.Error("Read returned multiple records")
}
if r[0].Key != "Hello" {
t.Errorf("Expected %s, got %s", "Hello", r[0].Key)
}
if string(r[0].Value) != "World" {
t.Errorf("Expected %s, got %s", "World", r[0].Value)
}
}
// wait for expiry
time.Sleep(time.Millisecond * 200)
if _, err := s.Read("Hello"); err != store.ErrNotFound {
t.Errorf("Expected %# v, got %# v", store.ErrNotFound, err)
}
// Write 3 records with various expiry and get with Table
records := []*store.Record{
&store.Record{
Key: "foo",
Value: []byte("foofoo"),
},
&store.Record{
Key: "foobar",
Value: []byte("foobarfoobar"),
Expiry: time.Millisecond * 100,
},
}
for _, r := range records {
if err := s.Write(r); err != nil {
t.Errorf("Couldn't write k: %s, v: %# v (%s)", r.Key, pretty.Formatter(r.Value), err)
}
}
if results, err := s.Read("foo", store.ReadPrefix()); err != nil {
t.Errorf("Couldn't read all \"foo\" keys, got %# v (%s)", spew.Sdump(results), err)
} else {
if len(results) != 2 {
t.Errorf("Expected 2 items, got %d", len(results))
//t.Logf("Table test: %v\n", spew.Sdump(results))
}
}
// wait for the expiry
time.Sleep(time.Millisecond * 200)
if results, err := s.Read("foo", store.ReadPrefix()); err != nil {
t.Errorf("Couldn't read all \"foo\" keys, got %# v (%s)", spew.Sdump(results), err)
} else if len(results) != 1 {
t.Errorf("Expected 1 item, got %d", len(results))
//t.Logf("Table test: %v\n", spew.Sdump(results))
}
if err := s.Delete("foo"); err != nil {
t.Errorf("Delete failed (%v)", err)
}
if results, err := s.Read("foo"); err != store.ErrNotFound {
t.Errorf("Expected read failure read all \"foo\" keys, got %# v (%s)", spew.Sdump(results), err)
} else {
if len(results) != 0 {
t.Errorf("Expected 0 items, got %d (%# v)", len(results), spew.Sdump(results))
}
}
// Write 3 records with various expiry and get with Suffix
records = []*store.Record{
&store.Record{
Key: "foo",
Value: []byte("foofoo"),
},
&store.Record{
Key: "barfoo",
Value: []byte("barfoobarfoo"),
Expiry: time.Millisecond * 100,
},
&store.Record{
Key: "bazbarfoo",
Value: []byte("bazbarfoobazbarfoo"),
Expiry: 2 * time.Millisecond * 100,
},
}
for _, r := range records {
if err := s.Write(r); err != nil {
t.Errorf("Couldn't write k: %s, v: %# v (%s)", r.Key, pretty.Formatter(r.Value), err)
}
}
if results, err := s.Read("foo", store.ReadSuffix()); err != nil {
t.Errorf("Couldn't read all \"foo\" keys, got %# v (%s)", spew.Sdump(results), err)
} else {
if len(results) != 3 {
t.Errorf("Expected 3 items, got %d", len(results))
//t.Logf("Table test: %v\n", spew.Sdump(results))
}
}
time.Sleep(time.Millisecond * 100)
if results, err := s.Read("foo", store.ReadSuffix()); err != nil {
t.Errorf("Couldn't read all \"foo\" keys, got %# v (%s)", spew.Sdump(results), err)
} else {
if len(results) != 2 {
t.Errorf("Expected 2 items, got %d", len(results))
//t.Logf("Table test: %v\n", spew.Sdump(results))
}
}
time.Sleep(time.Millisecond * 100)
if results, err := s.Read("foo", store.ReadSuffix()); err != nil {
t.Errorf("Couldn't read all \"foo\" keys, got %# v (%s)", spew.Sdump(results), err)
} else {
if len(results) != 1 {
t.Errorf("Expected 1 item, got %d", len(results))
// t.Logf("Table test: %# v\n", spew.Sdump(results))
}
}
if err := s.Delete("foo"); err != nil {
t.Errorf("Delete failed (%v)", err)
}
if results, err := s.Read("foo", store.ReadSuffix()); err != nil {
t.Errorf("Couldn't read all \"foo\" keys, got %# v (%s)", spew.Sdump(results), err)
} else {
if len(results) != 0 {
t.Errorf("Expected 0 items, got %d (%# v)", len(results), spew.Sdump(results))
}
}
// Test Table, Suffix and WriteOptions
if err := s.Write(&store.Record{
Key: "foofoobarbar",
Value: []byte("something"),
}, store.WriteTTL(time.Millisecond*100)); err != nil {
t.Error(err)
}
if err := s.Write(&store.Record{
Key: "foofoo",
Value: []byte("something"),
}, store.WriteExpiry(time.Now().Add(time.Millisecond*100))); err != nil {
t.Error(err)
}
if err := s.Write(&store.Record{
Key: "barbar",
Value: []byte("something"),
// TTL has higher precedence than expiry
}, store.WriteExpiry(time.Now().Add(time.Hour)), store.WriteTTL(time.Millisecond*100)); err != nil {
t.Error(err)
}
if results, err := s.Read("foo", store.ReadPrefix(), store.ReadSuffix()); err != nil {
t.Error(err)
} else {
if len(results) != 1 {
t.Errorf("Expected 1 results, got %d: %# v", len(results), spew.Sdump(results))
}
}
time.Sleep(time.Millisecond * 100)
if results, err := s.List(); err != nil {
t.Errorf("List failed: %s", err)
} else {
if len(results) != 0 {
t.Errorf("Expiry options were not effective, results :%v", spew.Sdump(results))
}
}
// write the following records
for i := 0; i < 10; i++ {
s.Write(&store.Record{
Key: fmt.Sprintf("a%d", i),
Value: []byte{},
})
}
// read back a few records
if results, err := s.Read("a", store.ReadLimit(5), store.ReadPrefix()); err != nil {
t.Error(err)
} else {
if len(results) != 5 {
t.Fatal("Expected 5 results, got ", len(results))
}
if !strings.HasPrefix(results[0].Key, "a") {
t.Fatalf("Expected a prefix, got %s", results[0].Key)
}
}
// read the rest back
if results, err := s.Read("a", store.ReadLimit(30), store.ReadOffset(5), store.ReadPrefix()); err != nil {
t.Fatal(err)
} else {
if len(results) != 5 {
t.Fatal("Expected 5 results, got ", len(results))
}
}
}
|
os.RemoveAll(dir)
}
|
tests.py
|
import gettext
import os
import re
from datetime import datetime, timedelta
from importlib import import_module
import pytz
from django import forms
from django.conf import settings
from django.contrib import admin
from django.contrib.admin import widgets
from django.contrib.admin.tests import AdminSeleniumTestCase
from django.contrib.auth.models import User
from django.core.files.storage import default_storage
from django.core.files.uploadedfile import SimpleUploadedFile
from django.db.models import (
CharField, DateField, DateTimeField, ManyToManyField, UUIDField,
)
from django.test import SimpleTestCase, TestCase, override_settings
from django.urls import reverse
from django.utils import translation
from .models import (
Advisor, Album, Band, Bee, Car, Company, Event, Honeycomb, Individual,
Inventory, Member, MyFileField, Profile, School, Student,
UnsafeLimitChoicesTo, VideoStream,
)
from .widgetadmin import site as widget_admin_site
class TestDataMixin:
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(username='super', password='secret', email=None)
cls.u2 = User.objects.create_user(username='testser', password='secret')
Car.objects.create(owner=cls.superuser, make='Volkswagen', model='Passat')
Car.objects.create(owner=cls.u2, make='BMW', model='M3')
class AdminFormfieldForDBFieldTests(SimpleTestCase):
"""
Tests for correct behavior of ModelAdmin.formfield_for_dbfield
"""
def assertFormfield(self, model, fieldname, widgetclass, **admin_overrides):
"""
Helper to call formfield_for_dbfield for a given model and field name
and verify that the returned formfield is appropriate.
"""
# Override any settings on the model admin
class MyModelAdmin(admin.ModelAdmin):
pass
for k in admin_overrides:
setattr(MyModelAdmin, k, admin_overrides[k])
# Construct the admin, and ask it for a formfield
ma = MyModelAdmin(model, admin.site)
ff = ma.formfield_for_dbfield(model._meta.get_field(fieldname), request=None)
# "unwrap" the widget wrapper, if needed
if isinstance(ff.widget, widgets.RelatedFieldWidgetWrapper):
widget = ff.widget.widget
else:
widget = ff.widget
self.assertIsInstance(widget, widgetclass)
# Return the formfield so that other tests can continue
return ff
def test_DateField(self):
self.assertFormfield(Event, 'start_date', widgets.AdminDateWidget)
def test_DateTimeField(self):
self.assertFormfield(Member, 'birthdate', widgets.AdminSplitDateTime)
def test_TimeField(self):
self.assertFormfield(Event, 'start_time', widgets.AdminTimeWidget)
def test_TextField(self):
self.assertFormfield(Event, 'description', widgets.AdminTextareaWidget)
def test_URLField(self):
self.assertFormfield(Event, 'link', widgets.AdminURLFieldWidget)
def test_IntegerField(self):
self.assertFormfield(Event, 'min_age', widgets.AdminIntegerFieldWidget)
def test_CharField(self):
self.assertFormfield(Member, 'name', widgets.AdminTextInputWidget)
def test_EmailField(self):
self.assertFormfield(Member, 'email', widgets.AdminEmailInputWidget)
def test_FileField(self):
self.assertFormfield(Album, 'cover_art', widgets.AdminFileWidget)
def test_ForeignKey(self):
self.assertFormfield(Event, 'main_band', forms.Select)
def test_raw_id_ForeignKey(self):
self.assertFormfield(Event, 'main_band', widgets.ForeignKeyRawIdWidget,
raw_id_fields=['main_band'])
def test_radio_fields_ForeignKey(self):
ff = self.assertFormfield(Event, 'main_band', widgets.AdminRadioSelect,
radio_fields={'main_band': admin.VERTICAL})
self.assertIsNone(ff.empty_label)
def test_many_to_many(self):
self.assertFormfield(Band, 'members', forms.SelectMultiple)
def test_raw_id_many_to_many(self):
self.assertFormfield(Band, 'members', widgets.ManyToManyRawIdWidget,
raw_id_fields=['members'])
def test_filtered_many_to_many(self):
self.assertFormfield(Band, 'members', widgets.FilteredSelectMultiple,
filter_vertical=['members'])
def test_formfield_overrides(self):
self.assertFormfield(Event, 'start_date', forms.TextInput,
formfield_overrides={DateField: {'widget': forms.TextInput}})
def test_formfield_overrides_widget_instances(self):
"""
Widget instances in formfield_overrides are not shared between
different fields. (#19423)
"""
class BandAdmin(admin.ModelAdmin):
formfield_overrides = {
CharField: {'widget': forms.TextInput(attrs={'size': '10'})}
}
ma = BandAdmin(Band, admin.site)
f1 = ma.formfield_for_dbfield(Band._meta.get_field('name'), request=None)
f2 = ma.formfield_for_dbfield(Band._meta.get_field('style'), request=None)
self.assertNotEqual(f1.widget, f2.widget)
self.assertEqual(f1.widget.attrs['maxlength'], '100')
self.assertEqual(f2.widget.attrs['maxlength'], '20')
self.assertEqual(f2.widget.attrs['size'], '10')
def test_formfield_overrides_m2m_filter_widget(self):
"""
The autocomplete_fields, raw_id_fields, filter_vertical, and
filter_horizontal widgets for ManyToManyFields may be overridden by
specifying a widget in formfield_overrides.
"""
class BandAdmin(admin.ModelAdmin):
filter_vertical = ['members']
formfield_overrides = {
ManyToManyField: {'widget': forms.CheckboxSelectMultiple},
}
ma = BandAdmin(Band, admin.site)
field = ma.formfield_for_dbfield(Band._meta.get_field('members'), request=None)
self.assertIsInstance(field.widget.widget, forms.CheckboxSelectMultiple)
def test_formfield_overrides_for_datetime_field(self):
"""
Overriding the widget for DateTimeField doesn't overrides the default
form_class for that field (#26449).
"""
class MemberAdmin(admin.ModelAdmin):
formfield_overrides = {DateTimeField: {'widget': widgets.AdminSplitDateTime}}
ma = MemberAdmin(Member, admin.site)
f1 = ma.formfield_for_dbfield(Member._meta.get_field('birthdate'), request=None)
self.assertIsInstance(f1.widget, widgets.AdminSplitDateTime)
self.assertIsInstance(f1, forms.SplitDateTimeField)
def test_formfield_overrides_for_custom_field(self):
"""
formfield_overrides works for a custom field class.
"""
class AlbumAdmin(admin.ModelAdmin):
formfield_overrides = {MyFileField: {'widget': forms.TextInput()}}
ma = AlbumAdmin(Member, admin.site)
f1 = ma.formfield_for_dbfield(Album._meta.get_field('backside_art'), request=None)
self.assertIsInstance(f1.widget, forms.TextInput)
def test_field_with_choices(self):
self.assertFormfield(Member, 'gender', forms.Select)
def test_choices_with_radio_fields(self):
self.assertFormfield(Member, 'gender', widgets.AdminRadioSelect,
radio_fields={'gender': admin.VERTICAL})
def test_inheritance(self):
self.assertFormfield(Album, 'backside_art', widgets.AdminFileWidget)
def test_m2m_widgets(self):
"""m2m fields help text as it applies to admin app (#9321)."""
class AdvisorAdmin(admin.ModelAdmin):
filter_vertical = ['companies']
self.assertFormfield(Advisor, 'companies', widgets.FilteredSelectMultiple,
filter_vertical=['companies'])
ma = AdvisorAdmin(Advisor, admin.site)
f = ma.formfield_for_dbfield(Advisor._meta.get_field('companies'), request=None)
self.assertEqual(
f.help_text,
'Hold down “Control”, or “Command” on a Mac, to select more than one.'
)
@override_settings(ROOT_URLCONF='admin_widgets.urls')
class AdminFormfieldForDBFieldWithRequestTests(TestDataMixin, TestCase):
def test_filter_choices_by_request_user(self):
"""
Ensure the user can only see their own cars in the foreign key dropdown.
"""
self.client.force_login(self.superuser)
response = self.client.get(reverse('admin:admin_widgets_cartire_add'))
self.assertNotContains(response, "BMW M3")
self.assertContains(response, "Volkswagen Passat")
@override_settings(ROOT_URLCONF='admin_widgets.urls')
class AdminForeignKeyWidgetChangeList(TestDataMixin, TestCase):
def setUp(self):
self.client.force_login(self.superuser)
def test_changelist_ForeignKey(self):
response = self.client.get(reverse('admin:admin_widgets_car_changelist'))
self.assertContains(response, '/auth/user/add/')
@override_settings(ROOT_URLCONF='admin_widgets.urls')
class AdminForeignKeyRawIdWidget(TestDataMixin, TestCase):
def setUp(self):
self.client.force_login(self.superuser)
def test_nonexistent_target_id(self):
band = Band.objects.create(name='Bogey Blues')
pk = band.pk
band.delete()
post_data = {
"main_band": str(pk),
}
# Try posting with a nonexistent pk in a raw id field: this
# should result in an error message, not a server exception.
response = self.client.post(reverse('admin:admin_widgets_event_add'), post_data)
self.assertContains(response, 'Select a valid choice. That choice is not one of the available choices.')
def test_invalid_target_id(self):
for test_str in ('Iñtërnâtiônàlizætiøn', "1234'", -1234):
# This should result in an error message, not a server exception.
response = self.client.post(reverse('admin:admin_widgets_event_add'), {"main_band": test_str})
self.assertContains(response, 'Select a valid choice. That choice is not one of the available choices.')
def test_url_params_from_lookup_dict_any_iterable(self):
lookup1 = widgets.url_params_from_lookup_dict({'color__in': ('red', 'blue')})
lookup2 = widgets.url_params_from_lookup_dict({'color__in': ['red', 'blue']})
self.assertEqual(lookup1, {'color__in': 'red,blue'})
self.assertEqual(lookup1, lookup2)
def test_url_params_from_lookup_dict_callable(self):
def my_callable():
return 'works'
lookup1 = widgets.url_params_from_lookup_dict({'myfield': my_callable})
lookup2 = widgets.url_params_from_lookup_dict({'myfield': my_callable()})
self.assertEqual(lookup1, lookup2)
def test_label_and_url_for_value_invalid_uuid(self):
field = Bee._meta.get_field('honeycomb')
self.assertIsInstance(field.target_field, UUIDField)
widget = widgets.ForeignKeyRawIdWidget(field.remote_field, admin.site)
self.assertEqual(widget.label_and_url_for_value('invalid-uuid'), ('', ''))
class FilteredSelectMultipleWidgetTest(SimpleTestCase):
def test_render(self):
# Backslash in verbose_name to ensure it is JavaScript escaped.
w = widgets.FilteredSelectMultiple('test\\', False)
self.assertHTMLEqual(
w.render('test', 'test'),
'<select multiple name="test" class="selectfilter" '
'data-field-name="test\\" data-is-stacked="0">\n</select>'
)
def test_stacked_render(self):
# Backslash in verbose_name to ensure it is JavaScript escaped.
w = widgets.FilteredSelectMultiple('test\\', True)
self.assertHTMLEqual(
w.render('test', 'test'),
'<select multiple name="test" class="selectfilterstacked" '
'data-field-name="test\\" data-is-stacked="1">\n</select>'
)
class AdminDateWidgetTest(SimpleTestCase):
def test_attrs(self):
w = widgets.AdminDateWidget()
self.assertHTMLEqual(
w.render('test', datetime(2007, 12, 1, 9, 30)),
'<input value="2007-12-01" type="text" class="vDateField" name="test" size="10">',
)
# pass attrs to widget
w = widgets.AdminDateWidget(attrs={'size': 20, 'class': 'myDateField'})
self.assertHTMLEqual(
w.render('test', datetime(2007, 12, 1, 9, 30)),
'<input value="2007-12-01" type="text" class="myDateField" name="test" size="20">',
)
class AdminTimeWidgetTest(SimpleTestCase):
def test_attrs(self):
w = widgets.AdminTimeWidget()
self.assertHTMLEqual(
w.render('test', datetime(2007, 12, 1, 9, 30)),
'<input value="09:30:00" type="text" class="vTimeField" name="test" size="8">',
)
# pass attrs to widget
w = widgets.AdminTimeWidget(attrs={'size': 20, 'class': 'myTimeField'})
self.assertHTMLEqual(
w.render('test', datetime(2007, 12, 1, 9, 30)),
'<input value="09:30:00" type="text" class="myTimeField" name="test" size="20">',
)
class AdminSplitDateTimeWidgetTest(SimpleTestCase):
def test_render(self):
w = widgets.AdminSplitDateTime()
self.assertHTMLEqual(
w.render('test', datetime(2007, 12, 1, 9, 30)),
'<p class="datetime">'
'Date: <input value="2007-12-01" type="text" class="vDateField" '
'name="test_0" size="10"><br>'
'Time: <input value="09:30:00" type="text" class="vTimeField" '
'name="test_1" size="8"></p>'
)
def test_localization(self):
w = widgets.AdminSplitDateTime()
with self.settings(USE_L10N=True), translation.override('de-at'):
w.is_localized = True
self.assertHTMLEqual(
w.render('test', datetime(2007, 12, 1, 9, 30)),
'<p class="datetime">'
'Datum: <input value="01.12.2007" type="text" '
'class="vDateField" name="test_0"size="10"><br>'
'Zeit: <input value="09:30:00" type="text" class="vTimeField" '
'name="test_1" size="8"></p>'
)
class AdminURLWidgetTest(SimpleTestCase):
def test_get_context_validates_url(self):
w = widgets.AdminURLFieldWidget()
for invalid in ['', '/not/a/full/url/', 'javascript:alert("Danger XSS!")']:
with self.subTest(url=invalid):
self.assertFalse(w.get_context('name', invalid, {})['url_valid'])
self.assertTrue(w.get_context('name', 'http://example.com', {})['url_valid'])
def test_render(self):
w = widgets.AdminURLFieldWidget()
self.assertHTMLEqual(
w.render('test', ''),
'<input class="vURLField" name="test" type="url">'
)
self.assertHTMLEqual(
w.render('test', 'http://example.com'),
'<p class="url">Currently:<a href="http://example.com">'
'http://example.com</a><br>'
'Change:<input class="vURLField" name="test" type="url" '
'value="http://example.com"></p>'
)
def test_render_idn(self):
w = widgets.AdminURLFieldWidget()
self.assertHTMLEqual(
w.render('test', 'http://example-äüö.com'),
'<p class="url">Currently: <a href="http://xn--example--7za4pnc.com">'
'http://example-äüö.com</a><br>'
'Change:<input class="vURLField" name="test" type="url" '
'value="http://example-äüö.com"></p>'
)
def test_render_quoting(self):
"""
WARNING: This test doesn't use assertHTMLEqual since it will get rid
of some escapes which are tested here!
"""
HREF_RE = re.compile('href="([^"]+)"')
VALUE_RE = re.compile('value="([^"]+)"')
TEXT_RE = re.compile('<a[^>]+>([^>]+)</a>')
w = widgets.AdminURLFieldWidget()
output = w.render('test', 'http://example.com/<sometag>some-text</sometag>')
self.assertEqual(
HREF_RE.search(output)[1],
'http://example.com/%3Csometag%3Esome-text%3C/sometag%3E',
)
self.assertEqual(
TEXT_RE.search(output)[1],
'http://example.com/<sometag>some-text</sometag>',
)
self.assertEqual(
VALUE_RE.search(output)[1],
'http://example.com/<sometag>some-text</sometag>',
)
output = w.render('test', 'http://example-äüö.com/<sometag>some-text</sometag>')
self.assertEqual(
HREF_RE.search(output)[1],
'http://xn--example--7za4pnc.com/%3Csometag%3Esome-text%3C/sometag%3E',
)
self.assertEqual(
TEXT_RE.search(output)[1],
'http://example-äüö.com/<sometag>some-text</sometag>',
)
self.assertEqual(
VALUE_RE.search(output)[1],
'http://example-äüö.com/<sometag>some-text</sometag>',
)
output = w.render('test', 'http://www.example.com/%C3%A4"><script>alert("XSS!")</script>"')
self.assertEqual(
HREF_RE.search(output)[1],
'http://www.example.com/%C3%A4%22%3E%3Cscript%3Ealert(%22XSS!%22)%3C/script%3E%22',
)
self.assertEqual(
TEXT_RE.search(output)[1],
'http://www.example.com/%C3%A4"><script>'
'alert("XSS!")</script>"'
)
self.assertEqual(
VALUE_RE.search(output)[1],
'http://www.example.com/%C3%A4"><script>alert("XSS!")</script>"',
)
class AdminUUIDWidgetTests(SimpleTestCase):
def test_attrs(self):
w = widgets.AdminUUIDInputWidget()
self.assertHTMLEqual(
w.render('test', '550e8400-e29b-41d4-a716-446655440000'),
'<input value="550e8400-e29b-41d4-a716-446655440000" type="text" class="vUUIDField" name="test">',
)
w = widgets.AdminUUIDInputWidget(attrs={'class': 'myUUIDInput'})
self.assertHTMLEqual(
w.render('test', '550e8400-e29b-41d4-a716-446655440000'),
'<input value="550e8400-e29b-41d4-a716-446655440000" type="text" class="myUUIDInput" name="test">',
)
@override_settings(ROOT_URLCONF='admin_widgets.urls')
class AdminFileWidgetTests(TestDataMixin, TestCase):
@classmethod
def setUpTestData(cls):
super().setUpTestData()
band = Band.objects.create(name='Linkin Park')
cls.album = band.album_set.create(
name='Hybrid Theory', cover_art=r'albums\hybrid_theory.jpg'
)
def test_render(self):
w = widgets.AdminFileWidget()
self.assertHTMLEqual(
w.render('test', self.album.cover_art),
'<p class="file-upload">Currently: <a href="%(STORAGE_URL)salbums/'
r'hybrid_theory.jpg">albums\hybrid_theory.jpg</a> '
'<span class="clearable-file-input">'
'<input type="checkbox" name="test-clear" id="test-clear_id"> '
'<label for="test-clear_id">Clear</label></span><br>'
'Change: <input type="file" name="test"></p>' % {
'STORAGE_URL': default_storage.url(''),
},
)
self.assertHTMLEqual(
w.render('test', SimpleUploadedFile('test', b'content')),
'<input type="file" name="test">',
)
def test_render_required(self):
widget = widgets.AdminFileWidget()
widget.is_required = True
self.assertHTMLEqual(
widget.render('test', self.album.cover_art),
'<p class="file-upload">Currently: <a href="%(STORAGE_URL)salbums/'
r'hybrid_theory.jpg">albums\hybrid_theory.jpg</a><br>'
'Change: <input type="file" name="test"></p>' % {
'STORAGE_URL': default_storage.url(''),
},
)
def test_render_disabled(self):
widget = widgets.AdminFileWidget(attrs={'disabled': True})
self.assertHTMLEqual(
widget.render('test', self.album.cover_art),
'<p class="file-upload">Currently: <a href="%(STORAGE_URL)salbums/'
r'hybrid_theory.jpg">albums\hybrid_theory.jpg</a> '
'<span class="clearable-file-input">'
'<input type="checkbox" name="test-clear" id="test-clear_id" disabled>'
'<label for="test-clear_id">Clear</label></span><br>'
'Change: <input type="file" name="test" disabled></p>' % {
'STORAGE_URL': default_storage.url(''),
},
)
def test_readonly_fields(self):
"""
File widgets should render as a link when they're marked "read only."
"""
self.client.force_login(self.superuser)
response = self.client.get(reverse('admin:admin_widgets_album_change', args=(self.album.id,)))
self.assertContains(
response,
'<div class="readonly"><a href="%(STORAGE_URL)salbums/hybrid_theory.jpg">'
r'albums\hybrid_theory.jpg</a></div>' % {'STORAGE_URL': default_storage.url('')},
html=True,
)
self.assertNotContains(
response,
'<input type="file" name="cover_art" id="id_cover_art">',
html=True,
)
response = self.client.get(reverse('admin:admin_widgets_album_add'))
self.assertContains(
response,
'<div class="readonly"></div>',
html=True,
)
@override_settings(ROOT_URLCONF='admin_widgets.urls')
class ForeignKeyRawIdWidgetTest(TestCase):
def test_render(self):
band = Band.objects.create(name='Linkin Park')
band.album_set.create(
name='Hybrid Theory', cover_art=r'albums\hybrid_theory.jpg'
)
rel = Album._meta.get_field('band').remote_field
w = widgets.ForeignKeyRawIdWidget(rel, widget_admin_site)
self.assertHTMLEqual(
w.render('test', band.uuid, attrs={}),
'<input type="text" name="test" value="%(banduuid)s" '
'class="vForeignKeyRawIdAdminField">'
'<a href="/admin_widgets/band/?_to_field=uuid" class="related-lookup" '
'id="lookup_id_test" title="Lookup"></a> <strong>'
'<a href="/admin_widgets/band/%(bandpk)s/change/">Linkin Park</a>'
'</strong>' % {'banduuid': band.uuid, 'bandpk': band.pk}
)
def test_relations_to_non_primary_key(self):
# ForeignKeyRawIdWidget works with fields which aren't related to
# the model's primary key.
apple = Inventory.objects.create(barcode=86, name='Apple')
Inventory.objects.create(barcode=22, name='Pear')
core = Inventory.objects.create(
barcode=87, name='Core', parent=apple
)
rel = Inventory._meta.get_field('parent').remote_field
w = widgets.ForeignKeyRawIdWidget(rel, widget_admin_site)
self.assertHTMLEqual(
w.render('test', core.parent_id, attrs={}),
'<input type="text" name="test" value="86" '
'class="vForeignKeyRawIdAdminField">'
'<a href="/admin_widgets/inventory/?_to_field=barcode" '
'class="related-lookup" id="lookup_id_test" title="Lookup"></a>'
' <strong><a href="/admin_widgets/inventory/%(pk)s/change/">'
'Apple</a></strong>' % {'pk': apple.pk}
)
def test_fk_related_model_not_in_admin(self):
# FK to a model not registered with admin site. Raw ID widget should
# have no magnifying glass link. See #16542
big_honeycomb = Honeycomb.objects.create(location='Old tree')
big_honeycomb.bee_set.create()
rel = Bee._meta.get_field('honeycomb').remote_field
w = widgets.ForeignKeyRawIdWidget(rel, widget_admin_site)
self.assertHTMLEqual(
w.render('honeycomb_widget', big_honeycomb.pk, attrs={}),
'<input type="text" name="honeycomb_widget" value="%(hcombpk)s">'
' <strong>%(hcomb)s</strong>'
% {'hcombpk': big_honeycomb.pk, 'hcomb': big_honeycomb}
)
def test_fk_to_self_model_not_in_admin(self):
# FK to self, not registered with admin site. Raw ID widget should have
# no magnifying glass link. See #16542
subject1 = Individual.objects.create(name='Subject #1')
Individual.objects.create(name='Child', parent=subject1)
rel = Individual._meta.get_field('parent').remote_field
w = widgets.ForeignKeyRawIdWidget(rel, widget_admin_site)
self.assertHTMLEqual(
w.render('individual_widget', subject1.pk, attrs={}),
'<input type="text" name="individual_widget" value="%(subj1pk)s">'
' <strong>%(subj1)s</strong>'
% {'subj1pk': subject1.pk, 'subj1': subject1}
)
def test_proper_manager_for_label_lookup(self):
# see #9258
rel = Inventory._meta.get_field('parent').remote_field
w = widgets.ForeignKeyRawIdWidget(rel, widget_admin_site)
hidden = Inventory.objects.create(
barcode=93, name='Hidden', hidden=True
)
child_of_hidden = Inventory.objects.create(
barcode=94, name='Child of hidden', parent=hidden
)
self.assertHTMLEqual(
w.render('test', child_of_hidden.parent_id, attrs={}),
'<input type="text" name="test" value="93" class="vForeignKeyRawIdAdminField">'
'<a href="/admin_widgets/inventory/?_to_field=barcode" '
'class="related-lookup" id="lookup_id_test" title="Lookup"></a>'
' <strong><a href="/admin_widgets/inventory/%(pk)s/change/">'
'Hidden</a></strong>' % {'pk': hidden.pk}
)
def test_render_unsafe_limit_choices_to(self):
rel = UnsafeLimitChoicesTo._meta.get_field('band').remote_field
w = widgets.ForeignKeyRawIdWidget(rel, widget_admin_site)
self.assertHTMLEqual(
w.render('test', None),
'<input type="text" name="test" class="vForeignKeyRawIdAdminField">\n'
'<a href="/admin_widgets/band/?name=%22%26%3E%3Cescapeme&_to_field=artist_ptr" '
'class="related-lookup" id="lookup_id_test" title="Lookup"></a>'
)
def test_render_fk_as_pk_model(self):
rel = VideoStream._meta.get_field('release_event').remote_field
w = widgets.ForeignKeyRawIdWidget(rel, widget_admin_site)
self.assertHTMLEqual(
w.render('test', None),
'<input type="text" name="test" class="vForeignKeyRawIdAdminField">\n'
'<a href="/admin_widgets/releaseevent/?_to_field=album" '
'class="related-lookup" id="lookup_id_test" title="Lookup"></a>'
)
@override_settings(ROOT_URLCONF='admin_widgets.urls')
class ManyToManyRawIdWidgetTest(TestCase):
def test_render(self):
band = Band.objects.create(name='Linkin Park')
m1 = Member.objects.create(name='Chester')
m2 = Member.objects.create(name='Mike')
band.members.add(m1, m2)
rel = Band._meta.get_field('members').remote_field
w = widgets.ManyToManyRawIdWidget(rel, widget_admin_site)
self.assertHTMLEqual(
w.render('test', [m1.pk, m2.pk], attrs={}), (
'<input type="text" name="test" value="%(m1pk)s,%(m2pk)s" class="vManyToManyRawIdAdminField">'
'<a href="/admin_widgets/member/" class="related-lookup" id="lookup_id_test" title="Lookup"></a>'
) % {'m1pk': m1.pk, 'm2pk': m2.pk}
)
self.assertHTMLEqual(
w.render('test', [m1.pk]), (
'<input type="text" name="test" value="%(m1pk)s" class="vManyToManyRawIdAdminField">'
'<a href="/admin_widgets/member/" class="related-lookup" id="lookup_id_test" title="Lookup"></a>'
) % {'m1pk': m1.pk}
)
def test_m2m_related_model_not_in_admin(self):
# M2M relationship with model not registered with admin site. Raw ID
# widget should have no magnifying glass link. See #16542
consultor1 = Advisor.objects.create(name='Rockstar Techie')
c1 = Company.objects.create(name='Doodle')
c2 = Company.objects.create(name='Pear')
consultor1.companies.add(c1, c2)
rel = Advisor._meta.get_field('companies').remote_field
w = widgets.ManyToManyRawIdWidget(rel, widget_admin_site)
self.assertHTMLEqual(
w.render('company_widget1', [c1.pk, c2.pk], attrs={}),
'<input type="text" name="company_widget1" value="%(c1pk)s,%(c2pk)s">' % {'c1pk': c1.pk, 'c2pk': c2.pk}
)
self.assertHTMLEqual(
w.render('company_widget2', [c1.pk]),
'<input type="text" name="company_widget2" value="%(c1pk)s">' % {'c1pk': c1.pk}
)
@override_settings(ROOT_URLCONF='admin_widgets.urls')
class RelatedFieldWidgetWrapperTests(SimpleTestCase):
def test_no_can_add_related(self):
rel = Individual._meta.get_field('parent').remote_field
w = widgets.AdminRadioSelect()
# Used to fail with a name error.
w = widgets.RelatedFieldWidgetWrapper(w, rel, widget_admin_site)
self.assertFalse(w.can_add_related)
def test_select_multiple_widget_cant_change_delete_related(self):
rel = Individual._meta.get_field('parent').remote_field
widget = forms.SelectMultiple()
wrapper = widgets.RelatedFieldWidgetWrapper(
widget, rel, widget_admin_site,
can_add_related=True,
can_change_related=True,
can_delete_related=True,
)
self.assertTrue(wrapper.can_add_related)
self.assertFalse(wrapper.can_change_related)
self.assertFalse(wrapper.can_delete_related)
def test_on_delete_cascade_rel_cant_delete_related(self):
rel = Individual._meta.get_field('soulmate').remote_field
widget = forms.Select()
wrapper = widgets.RelatedFieldWidgetWrapper(
widget, rel, widget_admin_site,
can_add_related=True,
can_change_related=True,
can_delete_related=True,
)
self.assertTrue(wrapper.can_add_related)
self.assertTrue(wrapper.can_change_related)
self.assertFalse(wrapper.can_delete_related)
def test_custom_widget_render(self):
class CustomWidget(forms.Select):
def render(self, *args, **kwargs):
return 'custom render output'
rel = Album._meta.get_field('band').remote_field
widget = CustomWidget()
wrapper = widgets.RelatedFieldWidgetWrapper(
widget, rel, widget_admin_site,
can_add_related=True,
can_change_related=True,
can_delete_related=True,
)
output = wrapper.render('name', 'value')
self.assertIn('custom render output', output)
def test_widget_delegates_value_omitted_from_data(self):
class CustomWidget(forms.Select):
def value_omitted_from_data(self, data, files, name):
return False
rel = Album._meta.get_field('band').remote_field
widget = CustomWidget()
wrapper = widgets.RelatedFieldWidgetWrapper(widget, rel, widget_admin_site)
self.assertIs(wrapper.value_omitted_from_data({}, {}, 'band'), False)
def test_widget_is_hidden(self):
rel = Album._meta.get_field('band').remote_field
widget = forms.HiddenInput()
widget.choices = ()
wrapper = widgets.RelatedFieldWidgetWrapper(widget, rel, widget_admin_site)
self.assertIs(wrapper.is_hidden, True)
context = wrapper.get_context('band', None, {})
self.assertIs(context['is_hidden'], True)
output = wrapper.render('name', 'value')
# Related item links are hidden.
self.assertNotIn('<a ', output)
def test_widget_is_not_hidden(self):
rel = Album._meta.get_field('band').remote_field
widget = forms.Select()
wrapper = widgets.RelatedFieldWidgetWrapper(widget, rel, widget_admin_site)
self.assertIs(wrapper.is_hidden, False)
context = wrapper.get_context('band', None, {})
self.assertIs(context['is_hidden'], False)
output = wrapper.render('name', 'value')
# Related item links are present.
self.assertIn('<a ', output)
@override_settings(ROOT_URLCONF='admin_widgets.urls')
class AdminWidgetSeleniumTestCase(AdminSeleniumTestCase):
available_apps = ['admin_widgets'] + AdminSeleniumTestCase.available_apps
def setUp(self):
self.u1 = User.objects.create_superuser(username='super', password='secret', email='[email protected]')
class DateTimePickerSeleniumTests(AdminWidgetSeleniumTestCase):
def test_show_hide_date_time_pick
|
Asia/Singapore')
class DateTimePickerShortcutsSeleniumTests(AdminWidgetSeleniumTestCase):
def test_date_time_picker_shortcuts(self):
"""
date/time/datetime picker shortcuts work in the current time zone.
Refs #20663.
This test case is fairly tricky, it relies on selenium still running the browser
in the default time zone "America/Chicago" despite `override_settings` changing
the time zone to "Asia/Singapore".
"""
self.admin_login(username='super', password='secret', login_url='/')
error_margin = timedelta(seconds=10)
# If we are neighbouring a DST, we add an hour of error margin.
tz = pytz.timezone('America/Chicago')
utc_now = datetime.now(pytz.utc)
tz_yesterday = (utc_now - timedelta(days=1)).astimezone(tz).tzname()
tz_tomorrow = (utc_now + timedelta(days=1)).astimezone(tz).tzname()
if tz_yesterday != tz_tomorrow:
error_margin += timedelta(hours=1)
self.selenium.get(self.live_server_url + reverse('admin:admin_widgets_member_add'))
self.selenium.find_element_by_id('id_name').send_keys('test')
# Click on the "today" and "now" shortcuts.
shortcuts = self.selenium.find_elements_by_css_selector('.field-birthdate .datetimeshortcuts')
now = datetime.now()
for shortcut in shortcuts:
shortcut.find_element_by_tag_name('a').click()
# There is a time zone mismatch warning.
# Warning: This would effectively fail if the TIME_ZONE defined in the
# settings has the same UTC offset as "Asia/Singapore" because the
# mismatch warning would be rightfully missing from the page.
self.selenium.find_elements_by_css_selector('.field-birthdate .timezonewarning')
# Submit the form.
with self.wait_page_loaded():
self.selenium.find_element_by_name('_save').click()
# Make sure that "now" in javascript is within 10 seconds
# from "now" on the server side.
member = Member.objects.get(name='test')
self.assertGreater(member.birthdate, now - error_margin)
self.assertLess(member.birthdate, now + error_margin)
# The above tests run with Asia/Singapore which are on the positive side of
# UTC. Here we test with a timezone on the negative side.
@override_settings(TIME_ZONE='US/Eastern')
class DateTimePickerAltTimezoneSeleniumTests(DateTimePickerShortcutsSeleniumTests):
pass
class HorizontalVerticalFilterSeleniumTests(AdminWidgetSeleniumTestCase):
def setUp(self):
super().setUp()
self.lisa = Student.objects.create(name='Lisa')
self.john = Student.objects.create(name='John')
self.bob = Student.objects.create(name='Bob')
self.peter = Student.objects.create(name='Peter')
self.jenny = Student.objects.create(name='Jenny')
self.jason = Student.objects.create(name='Jason')
self.cliff = Student.objects.create(name='Cliff')
self.arthur = Student.objects.create(name='Arthur')
self.school = School.objects.create(name='School of Awesome')
def assertActiveButtons(self, mode, field_name, choose, remove, choose_all=None, remove_all=None):
choose_link = '#id_%s_add_link' % field_name
choose_all_link = '#id_%s_add_all_link' % field_name
remove_link = '#id_%s_remove_link' % field_name
remove_all_link = '#id_%s_remove_all_link' % field_name
self.assertEqual(self.has_css_class(choose_link, 'active'), choose)
self.assertEqual(self.has_css_class(remove_link, 'active'), remove)
if mode == 'horizontal':
self.assertEqual(self.has_css_class(choose_all_link, 'active'), choose_all)
self.assertEqual(self.has_css_class(remove_all_link, 'active'), remove_all)
def execute_basic_operations(self, mode, field_name):
original_url = self.selenium.current_url
from_box = '#id_%s_from' % field_name
to_box = '#id_%s_to' % field_name
choose_link = 'id_%s_add_link' % field_name
choose_all_link = 'id_%s_add_all_link' % field_name
remove_link = 'id_%s_remove_link' % field_name
remove_all_link = 'id_%s_remove_all_link' % field_name
# Initial positions ---------------------------------------------------
self.assertSelectOptions(from_box, [
str(self.arthur.id), str(self.bob.id),
str(self.cliff.id), str(self.jason.id),
str(self.jenny.id), str(self.john.id),
])
self.assertSelectOptions(to_box, [str(self.lisa.id), str(self.peter.id)])
self.assertActiveButtons(mode, field_name, False, False, True, True)
# Click 'Choose all' --------------------------------------------------
if mode == 'horizontal':
self.selenium.find_element_by_id(choose_all_link).click()
elif mode == 'vertical':
# There 's no 'Choose all' button in vertical mode, so individually
# select all options and click 'Choose'.
for option in self.selenium.find_elements_by_css_selector(from_box + ' > option'):
option.click()
self.selenium.find_element_by_id(choose_link).click()
self.assertSelectOptions(from_box, [])
self.assertSelectOptions(to_box, [
str(self.lisa.id), str(self.peter.id),
str(self.arthur.id), str(self.bob.id),
str(self.cliff.id), str(self.jason.id),
str(self.jenny.id), str(self.john.id),
])
self.assertActiveButtons(mode, field_name, False, False, False, True)
# Click 'Remove all' --------------------------------------------------
if mode == 'horizontal':
self.selenium.find_element_by_id(remove_all_link).click()
elif mode == 'vertical':
# There 's no 'Remove all' button in vertical mode, so individually
# select all options and click 'Remove'.
for option in self.selenium.find_elements_by_css_selector(to_box + ' > option'):
option.click()
self.selenium.find_element_by_id(remove_link).click()
self.assertSelectOptions(from_box, [
str(self.lisa.id), str(self.peter.id),
str(self.arthur.id), str(self.bob.id),
str(self.cliff.id), str(self.jason.id),
str(self.jenny.id), str(self.john.id),
])
self.assertSelectOptions(to_box, [])
self.assertActiveButtons(mode, field_name, False, False, True, False)
# Choose some options ------------------------------------------------
from_lisa_select_option = self.selenium.find_element_by_css_selector(
'{} > option[value="{}"]'.format(from_box, self.lisa.id)
)
# Check the title attribute is there for tool tips: ticket #20821
self.assertEqual(from_lisa_select_option.get_attribute('title'), from_lisa_select_option.get_attribute('text'))
self.select_option(from_box, str(self.lisa.id))
self.select_option(from_box, str(self.jason.id))
self.select_option(from_box, str(self.bob.id))
self.select_option(from_box, str(self.john.id))
self.assertActiveButtons(mode, field_name, True, False, True, False)
self.selenium.find_element_by_id(choose_link).click()
self.assertActiveButtons(mode, field_name, False, False, True, True)
self.assertSelectOptions(from_box, [
str(self.peter.id), str(self.arthur.id),
str(self.cliff.id), str(self.jenny.id),
])
self.assertSelectOptions(to_box, [
str(self.lisa.id), str(self.bob.id),
str(self.jason.id), str(self.john.id),
])
# Check the tooltip is still there after moving: ticket #20821
to_lisa_select_option = self.selenium.find_element_by_css_selector(
'{} > option[value="{}"]'.format(to_box, self.lisa.id)
)
self.assertEqual(to_lisa_select_option.get_attribute('title'), to_lisa_select_option.get_attribute('text'))
# Remove some options -------------------------------------------------
self.select_option(to_box, str(self.lisa.id))
self.select_option(to_box, str(self.bob.id))
self.assertActiveButtons(mode, field_name, False, True, True, True)
self.selenium.find_element_by_id(remove_link).click()
self.assertActiveButtons(mode, field_name, False, False, True, True)
self.assertSelectOptions(from_box, [
str(self.peter.id), str(self.arthur.id),
str(self.cliff.id), str(self.jenny.id),
str(self.lisa.id), str(self.bob.id)
])
self.assertSelectOptions(to_box, [str(self.jason.id), str(self.john.id)])
# Choose some more options --------------------------------------------
self.select_option(from_box, str(self.arthur.id))
self.select_option(from_box, str(self.cliff.id))
self.selenium.find_element_by_id(choose_link).click()
self.assertSelectOptions(from_box, [
str(self.peter.id), str(self.jenny.id),
str(self.lisa.id), str(self.bob.id),
])
self.assertSelectOptions(to_box, [
str(self.jason.id), str(self.john.id),
str(self.arthur.id), str(self.cliff.id),
])
# Choose some more options --------------------------------------------
self.select_option(from_box, str(self.peter.id))
self.select_option(from_box, str(self.lisa.id))
# Confirm they're selected after clicking inactive buttons: ticket #26575
self.assertSelectedOptions(from_box, [str(self.peter.id), str(self.lisa.id)])
self.selenium.find_element_by_id(remove_link).click()
self.assertSelectedOptions(from_box, [str(self.peter.id), str(self.lisa.id)])
# Unselect the options ------------------------------------------------
self.deselect_option(from_box, str(self.peter.id))
self.deselect_option(from_box, str(self.lisa.id))
# Choose some more options --------------------------------------------
self.select_option(to_box, str(self.jason.id))
self.select_option(to_box, str(self.john.id))
# Confirm they're selected after clicking inactive buttons: ticket #26575
self.assertSelectedOptions(to_box, [str(self.jason.id), str(self.john.id)])
self.selenium.find_element_by_id(choose_link).click()
self.assertSelectedOptions(to_box, [str(self.jason.id), str(self.john.id)])
# Unselect the options ------------------------------------------------
self.deselect_option(to_box, str(self.jason.id))
self.deselect_option(to_box, str(self.john.id))
# Pressing buttons shouldn't change the URL.
self.assertEqual(self.selenium.current_url, original_url)
def test_basic(self):
self.selenium.set_window_size(1024, 768)
self.school.students.set([self.lisa, self.peter])
self.school.alumni.set([self.lisa, self.peter])
self.admin_login(username='super', password='secret', login_url='/')
self.selenium.get(self.live_server_url + reverse('admin:admin_widgets_school_change', args=(self.school.id,)))
self.wait_page_ready()
self.execute_basic_operations('vertical', 'students')
self.execute_basic_operations('horizontal', 'alumni')
# Save and check that everything is properly stored in the database ---
self.selenium.find_element_by_xpath('//input[@value="Save"]').click()
self.wait_page_ready()
self.school = School.objects.get(id=self.school.id) # Reload from database
self.assertEqual(list(self.school.students.all()), [self.arthur, self.cliff, self.jason, self.john])
self.assertEqual(list(self.school.alumni.all()), [self.arthur, self.cliff, self.jason, self.john])
def test_filter(self):
"""
Typing in the search box filters out options displayed in the 'from'
box.
"""
from selenium.webdriver.common.keys import Keys
self.selenium.set_window_size(1024, 768)
self.school.students.set([self.lisa, self.peter])
self.school.alumni.set([self.lisa, self.peter])
self.admin_login(username='super', password='secret', login_url='/')
self.selenium.get(self.live_server_url + reverse('admin:admin_widgets_school_change', args=(self.school.id,)))
for field_name in ['students', 'alumni']:
from_box = '#id_%s_from' % field_name
to_box = '#id_%s_to' % field_name
choose_link = 'id_%s_add_link' % field_name
remove_link = 'id_%s_remove_link' % field_name
input = self.selenium.find_element_by_id('id_%s_input' % field_name)
# Initial values
self.assertSelectOptions(from_box, [
str(self.arthur.id), str(self.bob.id),
str(self.cliff.id), str(self.jason.id),
str(self.jenny.id), str(self.john.id),
])
# Typing in some characters filters out non-matching options
input.send_keys('a')
self.assertSelectOptions(from_box, [str(self.arthur.id), str(self.jason.id)])
input.send_keys('R')
self.assertSelectOptions(from_box, [str(self.arthur.id)])
# Clearing the text box makes the other options reappear
input.send_keys([Keys.BACK_SPACE])
self.assertSelectOptions(from_box, [str(self.arthur.id), str(self.jason.id)])
input.send_keys([Keys.BACK_SPACE])
self.assertSelectOptions(from_box, [
str(self.arthur.id), str(self.bob.id),
str(self.cliff.id), str(self.jason.id),
str(self.jenny.id), str(self.john.id),
])
# -----------------------------------------------------------------
# Choosing a filtered option sends it properly to the 'to' box.
input.send_keys('a')
self.assertSelectOptions(from_box, [str(self.arthur.id), str(self.jason.id)])
self.select_option(from_box, str(self.jason.id))
self.selenium.find_element_by_id(choose_link).click()
self.assertSelectOptions(from_box, [str(self.arthur.id)])
self.assertSelectOptions(to_box, [
str(self.lisa.id), str(self.peter.id), str(self.jason.id),
])
self.select_option(to_box, str(self.lisa.id))
self.selenium.find_element_by_id(remove_link).click()
self.assertSelectOptions(from_box, [str(self.arthur.id), str(self.lisa.id)])
self.assertSelectOptions(to_box, [str(self.peter.id), str(self.jason.id)])
input.send_keys([Keys.BACK_SPACE]) # Clear text box
self.assertSelectOptions(from_box, [
str(self.arthur.id), str(self.bob.id),
str(self.cliff.id), str(self.jenny.id),
str(self.john.id), str(self.lisa.id),
])
self.assertSelectOptions(to_box, [str(self.peter.id), str(self.jason.id)])
# -----------------------------------------------------------------
# Pressing enter on a filtered option sends it properly to
# the 'to' box.
self.select_option(to_box, str(self.jason.id))
self.selenium.find_element_by_id(remove_link).click()
input.send_keys('ja')
self.assertSelectOptions(from_box, [str(self.jason.id)])
input.send_keys([Keys.ENTER])
self.assertSelectOptions(to_box, [str(self.peter.id), str(self.jason.id)])
input.send_keys([Keys.BACK_SPACE, Keys.BACK_SPACE])
# Save and check that everything is properly stored in the database ---
with self.wait_page_loaded():
self.selenium.find_element_by_xpath('//input[@value="Save"]').click()
self.school = School.objects.get(id=self.school.id) # Reload from database
self.assertEqual(list(self.school.students.all()), [self.jason, self.peter])
self.assertEqual(list(self.school.alumni.all()), [self.jason, self.peter])
def test_back_button_bug(self):
"""
Some browsers had a bug where navigating away from the change page
and then clicking the browser's back button would clear the
filter_horizontal/filter_vertical widgets (#13614).
"""
self.school.students.set([self.lisa, self.peter])
self.school.alumni.set([self.lisa, self.peter])
self.admin_login(username='super', password='secret', login_url='/')
change_url = reverse('admin:admin_widgets_school_change', args=(self.school.id,))
self.selenium.get(self.live_server_url + change_url)
# Navigate away and go back to the change form page.
self.selenium.find_element_by_link_text('Home').click()
self.selenium.back()
expected_unselected_values = [
str(self.arthur.id), str(self.bob.id), str(self.cliff.id),
str(self.jason.id), str(self.jenny.id), str(self.john.id),
]
expected_selected_values = [str(self.lisa.id), str(self.peter.id)]
# Everything is still in place
self.assertSelectOptions('#id_students_from', expected_unselected_values)
self.assertSelectOptions('#id_students_to', expected_selected_values)
self.assertSelectOptions('#id_alumni_from', expected_unselected_values)
self.assertSelectOptions('#id_alumni_to', expected_selected_values)
def test_refresh_page(self):
"""
Horizontal and vertical filter widgets keep selected options on page
reload (#22955).
"""
self.school.students.add(self.arthur, self.jason)
self.school.alumni.add(self.arthur, self.jason)
self.admin_login(username='super', password='secret', login_url='/')
change_url = reverse('admin:admin_widgets_school_change', args=(self.school.id,))
self.selenium.get(self.live_server_url + change_url)
options_len = len(self.selenium.find_elements_by_css_selector('#id_students_to > option'))
self.assertEqual(options_len, 2)
# self.selenium.refresh() or send_keys(Keys.F5) does hard reload and
# doesn't replicate what happens when a user clicks the browser's
# 'Refresh' button.
with self.wait_page_loaded():
self.selenium.execute_script("location.reload()")
options_len = len(self.selenium.find_elements_by_css_selector('#id_students_to > option'))
self.assertEqual(options_len, 2)
class AdminRawIdWidgetSeleniumTests(AdminWidgetSeleniumTestCase):
def setUp(self):
super().setUp()
Band.objects.create(id=42, name='Bogey Blues')
Band.objects.create(id=98, name='Green Potatoes')
def test_ForeignKey(self):
self.admin_login(username='super', password='secret', login_url='/')
self.selenium.get(self.live_server_url + reverse('admin:admin_widgets_event_add'))
main_window = self.selenium.current_window_handle
# No value has been selected yet
self.assertEqual(self.selenium.find_element_by_id('id_main_band').get_attribute('value'), '')
# Open the popup window and click on a band
self.selenium.find_element_by_id('lookup_id_main_band').click()
self.wait_for_and_switch_to_popup()
link = self.selenium.find_element_by_link_text('Bogey Blues')
self.assertIn('/band/42/', link.get_attribute('href'))
link.click()
# The field now contains the selected band's id
self.selenium.switch_to.window(main_window)
self.wait_for_value('#id_main_band', '42')
# Reopen the popup window and click on another band
self.selenium.find_element_by_id('lookup_id_main_band').click()
self.wait_for_and_switch_to_popup()
link = self.selenium.find_element_by_link_text('Green Potatoes')
self.assertIn('/band/98/', link.get_attribute('href'))
link.click()
# The field now contains the other selected band's id
self.selenium.switch_to.window(main_window)
self.wait_for_value('#id_main_band', '98')
def test_many_to_many(self):
self.admin_login(username='super', password='secret', login_url='/')
self.selenium.get(self.live_server_url + reverse('admin:admin_widgets_event_add'))
main_window = self.selenium.current_window_handle
# No value has been selected yet
self.assertEqual(self.selenium.find_element_by_id('id_supporting_bands').get_attribute('value'), '')
# Help text for the field is displayed
self.assertEqual(
self.selenium.find_element_by_css_selector('.field-supporting_bands div.help').text,
'Supporting Bands.'
)
# Open the popup window and click on a band
self.selenium.find_element_by_id('lookup_id_supporting_bands').click()
self.wait_for_and_switch_to_popup()
link = self.selenium.find_element_by_link_text('Bogey Blues')
self.assertIn('/band/42/', link.get_attribute('href'))
link.click()
# The field now contains the selected band's id
self.selenium.switch_to.window(main_window)
self.wait_for_value('#id_supporting_bands', '42')
# Reopen the popup window and click on another band
self.selenium.find_element_by_id('lookup_id_supporting_bands').click()
self.wait_for_and_switch_to_popup()
link = self.selenium.find_element_by_link_text('Green Potatoes')
self.assertIn('/band/98/', link.get_attribute('href'))
link.click()
# The field now contains the two selected bands' ids
self.selenium.switch_to.window(main_window)
self.wait_for_value('#id_supporting_bands', '42,98')
class RelatedFieldWidgetSeleniumTests(AdminWidgetSeleniumTestCase):
def test_ForeignKey_using_to_field(self):
self.admin_login(username='super', password='secret', login_url='/')
self.selenium.get(self.live_server_url + reverse('admin:admin_widgets_profile_add'))
main_window = self.selenium.current_window_handle
# Click the Add User button to add new
self.selenium.find_element_by_id('add_id_user').click()
self.wait_for_and_switch_to_popup()
password_field = self.selenium.find_element_by_id('id_password')
password_field.send_keys('password')
username_field = self.selenium.find_element_by_id('id_username')
username_value = 'newuser'
username_field.send_keys(username_value)
save_button_css_selector = '.submit-row > input[type=submit]'
self.selenium.find_element_by_css_selector(save_button_css_selector).click()
self.selenium.switch_to.window(main_window)
# The field now contains the new user
self.selenium.find_element_by_css_selector('#id_user option[value=newuser]')
# Click the Change User button to change it
self.selenium.find_element_by_id('change_id_user').click()
self.wait_for_and_switch_to_popup()
username_field = self.selenium.find_element_by_id('id_username')
username_value = 'changednewuser'
username_field.clear()
username_field.send_keys(username_value)
save_button_css_selector = '.submit-row > input[type=submit]'
self.selenium.find_element_by_css_selector(save_button_css_selector).click()
self.selenium.switch_to.window(main_window)
self.selenium.find_element_by_css_selector('#id_user option[value=changednewuser]')
# Go ahead and submit the form to make sure it works
self.selenium.find_element_by_css_selector(save_button_css_selector).click()
self.wait_for_text('li.success', 'The profile “changednewuser” was added successfully.')
profiles = Profile.objects.all()
self.assertEqual(len(profiles), 1)
self.assertEqual(profiles[0].user.username, username_value)
|
er_widgets(self):
"""
Pressing the ESC key or clicking on a widget value closes the date and
time picker widgets.
"""
from selenium.webdriver.common.keys import Keys
self.admin_login(username='super', password='secret', login_url='/')
# Open a page that has a date and time picker widgets
self.selenium.get(self.live_server_url + reverse('admin:admin_widgets_member_add'))
# First, with the date picker widget ---------------------------------
cal_icon = self.selenium.find_element_by_id('calendarlink0')
# The date picker is hidden
self.assertFalse(self.selenium.find_element_by_id('calendarbox0').is_displayed())
# Click the calendar icon
cal_icon.click()
# The date picker is visible
self.assertTrue(self.selenium.find_element_by_id('calendarbox0').is_displayed())
# Press the ESC key
self.selenium.find_element_by_tag_name('body').send_keys([Keys.ESCAPE])
# The date picker is hidden again
self.assertFalse(self.selenium.find_element_by_id('calendarbox0').is_displayed())
# Click the calendar icon, then on the 15th of current month
cal_icon.click()
self.selenium.find_element_by_xpath("//a[contains(text(), '15')]").click()
self.assertFalse(self.selenium.find_element_by_id('calendarbox0').is_displayed())
self.assertEqual(
self.selenium.find_element_by_id('id_birthdate_0').get_attribute('value'),
datetime.today().strftime('%Y-%m-') + '15',
)
# Then, with the time picker widget ----------------------------------
time_icon = self.selenium.find_element_by_id('clocklink0')
# The time picker is hidden
self.assertFalse(self.selenium.find_element_by_id('clockbox0').is_displayed())
# Click the time icon
time_icon.click()
# The time picker is visible
self.assertTrue(self.selenium.find_element_by_id('clockbox0').is_displayed())
self.assertEqual(
[
x.text for x in
self.selenium.find_elements_by_xpath("//ul[@class='timelist']/li/a")
],
['Now', 'Midnight', '6 a.m.', 'Noon', '6 p.m.']
)
# Press the ESC key
self.selenium.find_element_by_tag_name('body').send_keys([Keys.ESCAPE])
# The time picker is hidden again
self.assertFalse(self.selenium.find_element_by_id('clockbox0').is_displayed())
# Click the time icon, then select the 'Noon' value
time_icon.click()
self.selenium.find_element_by_xpath("//a[contains(text(), 'Noon')]").click()
self.assertFalse(self.selenium.find_element_by_id('clockbox0').is_displayed())
self.assertEqual(
self.selenium.find_element_by_id('id_birthdate_1').get_attribute('value'),
'12:00:00',
)
def test_calendar_nonday_class(self):
"""
Ensure cells that are not days of the month have the `nonday` CSS class.
Refs #4574.
"""
self.admin_login(username='super', password='secret', login_url='/')
# Open a page that has a date and time picker widgets
self.selenium.get(self.live_server_url + reverse('admin:admin_widgets_member_add'))
# fill in the birth date.
self.selenium.find_element_by_id('id_birthdate_0').send_keys('2013-06-01')
# Click the calendar icon
self.selenium.find_element_by_id('calendarlink0').click()
# get all the tds within the calendar
calendar0 = self.selenium.find_element_by_id('calendarin0')
tds = calendar0.find_elements_by_tag_name('td')
# make sure the first and last 6 cells have class nonday
for td in tds[:6] + tds[-6:]:
self.assertEqual(td.get_attribute('class'), 'nonday')
def test_calendar_selected_class(self):
"""
Ensure cell for the day in the input has the `selected` CSS class.
Refs #4574.
"""
self.admin_login(username='super', password='secret', login_url='/')
# Open a page that has a date and time picker widgets
self.selenium.get(self.live_server_url + reverse('admin:admin_widgets_member_add'))
# fill in the birth date.
self.selenium.find_element_by_id('id_birthdate_0').send_keys('2013-06-01')
# Click the calendar icon
self.selenium.find_element_by_id('calendarlink0').click()
# get all the tds within the calendar
calendar0 = self.selenium.find_element_by_id('calendarin0')
tds = calendar0.find_elements_by_tag_name('td')
# verify the selected cell
selected = tds[6]
self.assertEqual(selected.get_attribute('class'), 'selected')
self.assertEqual(selected.text, '1')
def test_calendar_no_selected_class(self):
"""
Ensure no cells are given the selected class when the field is empty.
Refs #4574.
"""
self.admin_login(username='super', password='secret', login_url='/')
# Open a page that has a date and time picker widgets
self.selenium.get(self.live_server_url + reverse('admin:admin_widgets_member_add'))
# Click the calendar icon
self.selenium.find_element_by_id('calendarlink0').click()
# get all the tds within the calendar
calendar0 = self.selenium.find_element_by_id('calendarin0')
tds = calendar0.find_elements_by_tag_name('td')
# verify there are no cells with the selected class
selected = [td for td in tds if td.get_attribute('class') == 'selected']
self.assertEqual(len(selected), 0)
def test_calendar_show_date_from_input(self):
"""
The calendar shows the date from the input field for every locale
supported by Django.
"""
self.selenium.set_window_size(1024, 768)
self.admin_login(username='super', password='secret', login_url='/')
# Enter test data
member = Member.objects.create(name='Bob', birthdate=datetime(1984, 5, 15), gender='M')
# Get month name translations for every locale
month_string = 'May'
path = os.path.join(os.path.dirname(import_module('django.contrib.admin').__file__), 'locale')
for language_code, language_name in settings.LANGUAGES:
try:
catalog = gettext.translation('djangojs', path, [language_code])
except OSError:
continue
if month_string in catalog._catalog:
month_name = catalog._catalog[month_string]
else:
month_name = month_string
# Get the expected caption
may_translation = month_name
expected_caption = '{:s} {:d}'.format(may_translation.upper(), 1984)
# Test with every locale
with override_settings(LANGUAGE_CODE=language_code, USE_L10N=True):
# Open a page that has a date picker widget
url = reverse('admin:admin_widgets_member_change', args=(member.pk,))
self.selenium.get(self.live_server_url + url)
# Click on the calendar icon
self.selenium.find_element_by_id('calendarlink0').click()
# Make sure that the right month and year are displayed
self.wait_for_text('#calendarin0 caption', expected_caption)
@override_settings(TIME_ZONE='
|
api_op_DescribeSpotDatafeedSubscription.go
|
// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
package ec2
import (
"context"
"github.com/aws/aws-sdk-go-v2/aws"
"github.com/aws/aws-sdk-go-v2/internal/awsutil"
)
// Contains the parameters for DescribeSpotDatafeedSubscription.
type DescribeSpotDatafeedSubscriptionInput struct {
_ struct{} `type:"structure"`
// Checks whether you have the required permissions for the action, without
// actually making the request, and provides an error response. If you have
// the required permissions, the error response is DryRunOperation. Otherwise,
// it is UnauthorizedOperation.
DryRun *bool `locationName:"dryRun" type:"boolean"`
}
|
func (s DescribeSpotDatafeedSubscriptionInput) String() string {
return awsutil.Prettify(s)
}
// Contains the output of DescribeSpotDatafeedSubscription.
type DescribeSpotDatafeedSubscriptionOutput struct {
_ struct{} `type:"structure"`
// The Spot Instance data feed subscription.
SpotDatafeedSubscription *SpotDatafeedSubscription `locationName:"spotDatafeedSubscription" type:"structure"`
}
// String returns the string representation
func (s DescribeSpotDatafeedSubscriptionOutput) String() string {
return awsutil.Prettify(s)
}
const opDescribeSpotDatafeedSubscription = "DescribeSpotDatafeedSubscription"
// DescribeSpotDatafeedSubscriptionRequest returns a request value for making API operation for
// Amazon Elastic Compute Cloud.
//
// Describes the data feed for Spot Instances. For more information, see Spot
// Instance Data Feed (https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-data-feeds.html)
// in the Amazon EC2 User Guide for Linux Instances.
//
// // Example sending a request using DescribeSpotDatafeedSubscriptionRequest.
// req := client.DescribeSpotDatafeedSubscriptionRequest(params)
// resp, err := req.Send(context.TODO())
// if err == nil {
// fmt.Println(resp)
// }
//
// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeSpotDatafeedSubscription
func (c *Client) DescribeSpotDatafeedSubscriptionRequest(input *DescribeSpotDatafeedSubscriptionInput) DescribeSpotDatafeedSubscriptionRequest {
op := &aws.Operation{
Name: opDescribeSpotDatafeedSubscription,
HTTPMethod: "POST",
HTTPPath: "/",
}
if input == nil {
input = &DescribeSpotDatafeedSubscriptionInput{}
}
req := c.newRequest(op, input, &DescribeSpotDatafeedSubscriptionOutput{})
return DescribeSpotDatafeedSubscriptionRequest{Request: req, Input: input, Copy: c.DescribeSpotDatafeedSubscriptionRequest}
}
// DescribeSpotDatafeedSubscriptionRequest is the request type for the
// DescribeSpotDatafeedSubscription API operation.
type DescribeSpotDatafeedSubscriptionRequest struct {
*aws.Request
Input *DescribeSpotDatafeedSubscriptionInput
Copy func(*DescribeSpotDatafeedSubscriptionInput) DescribeSpotDatafeedSubscriptionRequest
}
// Send marshals and sends the DescribeSpotDatafeedSubscription API request.
func (r DescribeSpotDatafeedSubscriptionRequest) Send(ctx context.Context) (*DescribeSpotDatafeedSubscriptionResponse, error) {
r.Request.SetContext(ctx)
err := r.Request.Send()
if err != nil {
return nil, err
}
resp := &DescribeSpotDatafeedSubscriptionResponse{
DescribeSpotDatafeedSubscriptionOutput: r.Request.Data.(*DescribeSpotDatafeedSubscriptionOutput),
response: &aws.Response{Request: r.Request},
}
return resp, nil
}
// DescribeSpotDatafeedSubscriptionResponse is the response type for the
// DescribeSpotDatafeedSubscription API operation.
type DescribeSpotDatafeedSubscriptionResponse struct {
*DescribeSpotDatafeedSubscriptionOutput
response *aws.Response
}
// SDKResponseMetdata returns the response metadata for the
// DescribeSpotDatafeedSubscription request.
func (r *DescribeSpotDatafeedSubscriptionResponse) SDKResponseMetdata() *aws.Response {
return r.response
}
|
// String returns the string representation
|
jiami.js
|
"use strict";
function _typeof(obj) { "@babel/helpers - typeof"; if (typeof Symbol === "function" && typeof Symbol.iterator === "symbol") { _typeof = function _typeof(obj) { return typeof obj; }; } else { _typeof = function _typeof(obj) { return obj && typeof Symbol === "function" && obj.constructor === Symbol && obj !== Symbol.prototype ? "symbol" : typeof obj; }; } return _typeof(obj); }
/*! For license information please see jsencrypt.min.js.LICENSE.txt */
!function (t, e) {
"object" == (typeof exports === "undefined" ? "undefined" : _typeof(exports)) && "object" == (typeof module === "undefined" ? "undefined" : _typeof(module)) ? module.exports = e() : "function" == typeof define && define.amd ? define([], e) : "object" == (typeof exports === "undefined" ? "undefined" : _typeof(exports)) ? exports.JSEncrypt = e() : t.JSEncrypt = e();
}(window, function () {
return function () {
"use strict";
var t = [, function (t, e, i) {
function r(t) {
return "0123456789abcdefghijklmnopqrstuvwxyz".charAt(t);
}
function n(t, e) {
return t & e;
}
function s(t, e) {
return t | e;
}
function o(t, e) {
return t ^ e;
}
function h(t, e) {
return t & ~e;
}
function a(t) {
if (0 == t) return -1;
var e = 0;
return 0 == (65535 & t) && (t >>= 16, e += 16), 0 == (255 & t) && (t >>= 8, e += 8), 0 == (15 & t) && (t >>= 4, e += 4), 0 == (3 & t) && (t >>= 2, e += 2), 0 == (1 & t) && ++e, e;
}
function u(t) {
for (var e = 0; 0 != t;) {
t &= t - 1, ++e;
}
return e;
}
i.d(e, {
"default": function _default() {
return nt;
}
});
var c,
f = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";
function l(t) {
var e,
i,
r = "";
for (e = 0; e + 3 <= t.length; e += 3) {
i = parseInt(t.substring(e, e + 3), 16), r += f.charAt(i >> 6) + f.charAt(63 & i);
}
for (e + 1 == t.length ? (i = parseInt(t.substring(e, e + 1), 16), r += f.charAt(i << 2)) : e + 2 == t.length && (i = parseInt(t.substring(e, e + 2), 16), r += f.charAt(i >> 2) + f.charAt((3 & i) << 4)); (3 & r.length) > 0;) {
r += "=";
}
return r;
}
function p(t) {
var e,
i = "",
n = 0,
s = 0;
for (e = 0; e < t.length && "=" != t.charAt(e); ++e) {
var o = f.indexOf(t.charAt(e));
o < 0 || (0 == n ? (i += r(o >> 2), s = 3 & o, n = 1) : 1 == n ? (i += r(s << 2 | o >> 4), s = 15 & o, n = 2) : 2 == n ? (i += r(s), i += r(o >> 2), s = 3 & o, n = 3) : (i += r(s << 2 | o >> 4), i += r(15 & o), n = 0));
}
return 1 == n && (i += r(s << 2)), i;
}
var g,
d = {
decode: function decode(t) {
var e;
if (void 0 === g) {
var i = "= \f\n\r\t \u2028\u2029";
for (g = Object.create(null), e = 0; e < 64; ++e) {
g["ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/".charAt(e)] = e;
}
for (g["-"] = 62, g._ = 63, e = 0; e < i.length; ++e) {
g[i.charAt(e)] = -1;
}
}
var r = [],
n = 0,
s = 0;
for (e = 0; e < t.length; ++e) {
var o = t.charAt(e);
if ("=" == o) break;
if (-1 != (o = g[o])) {
if (void 0 === o) throw new Error("Illegal character at offset " + e);
n |= o, ++s >= 4 ? (r[r.length] = n >> 16, r[r.length] = n >> 8 & 255, r[r.length] = 255 & n, n = 0, s = 0) : n <<= 6;
}
}
switch (s) {
case 1:
throw new Error("Base64 encoding incomplete: at least 2 bits missing");
case 2:
r[r.length] = n >> 10;
break;
case 3:
r[r.length] = n >> 16, r[r.length] = n >> 8 & 255;
}
return r;
},
re: /-----BEGIN [^-]+-----([A-Za-z0-9+\/=\s]+)-----END [^-]+-----|begin-base64[^\n]+\n([A-Za-z0-9+\/=\s]+)====/,
unarmor: function unarmor(t) {
var e = d.re.exec(t);
if (e) if (e[1]) t = e[1];else {
if (!e[2]) throw new Error("RegExp out of sync");
t = e[2];
}
return d.decode(t);
}
},
v = 1e13,
m = function () {
function t(t) {
this.buf = [+t || 0];
}
return t.prototype.mulAdd = function (t, e) {
var i,
r,
n = this.buf,
s = n.length;
for (i = 0; i < s; ++i) {
(r = n[i] * t + e) < v ? e = 0 : r -= (e = 0 | r / v) * v, n[i] = r;
}
e > 0 && (n[i] = e);
}, t.prototype.sub = function (t) {
var e,
i,
r = this.buf,
n = r.length;
for (e = 0; e < n; ++e) {
(i = r[e] - t) < 0 ? (i += v, t = 1) : t = 0, r[e] = i;
}
for (; 0 === r[r.length - 1];) {
r.pop();
}
}, t.prototype.toString = function (t) {
if (10 != (t || 10)) throw new Error("only base 10 is supported");
for (var e = this.buf, i = e[e.length - 1].toString(), r = e.length - 2; r >= 0; --r) {
i += (v + e[r]).toString().substring(1);
}
return i;
}, t.prototype.valueOf = function () {
for (var t = this.buf, e = 0, i = t.length - 1; i >= 0; --i) {
e = e * v + t[i];
}
return e;
}, t.prototype.simplify = function () {
var t = this.buf;
return 1 == t.length ? t[0] : this;
}, t;
}(),
y = /^(\d\d)(0[1-9]|1[0-2])(0[1-9]|[12]\d|3[01])([01]\d|2[0-3])(?:([0-5]\d)(?:([0-5]\d)(?:[.,](\d{1,3}))?)?)?(Z|[-+](?:[0]\d|1[0-2])([0-5]\d)?)?$/,
b = /^(\d\d\d\d)(0[1-9]|1[0-2])(0[1-9]|[12]\d|3[01])([01]\d|2[0-3])(?:([0-5]\d)(?:([0-5]\d)(?:[.,](\d{1,3}))?)?)?(Z|[-+](?:[0]\d|1[0-2])([0-5]\d)?)?$/;
function T(t, e) {
return t.length > e && (t = t.substring(0, e) + "…"), t;
}
var S,
E = function () {
function t(e, i) {
this.hexDigits = "0123456789ABCDEF", e instanceof t ? (this.enc = e.enc, this.pos = e.pos) : (this.enc = e, this.pos = i);
}
return t.prototype.get = function (t) {
if (void 0 === t && (t = this.pos++), t >= this.enc.length) throw new Error("Requesting byte offset " + t + " on a stream of length " + this.enc.length);
return "string" == typeof this.enc ? this.enc.charCodeAt(t) : this.enc[t];
}, t.prototype.hexByte = function (t) {
return this.hexDigits.charAt(t >> 4 & 15) + this.hexDigits.charAt(15 & t);
}, t.prototype.hexDump = function (t, e, i) {
for (var r = "", n = t; n < e; ++n) {
if (r += this.hexByte(this.get(n)), !0 !== i) switch (15 & n) {
case 7:
r += " ";
break;
case 15:
r += "\n";
break;
default:
r += " ";
}
}
return r;
}, t.prototype.isASCII = function (t, e) {
for (var i = t; i < e; ++i) {
var r = this.get(i);
if (r < 32 || r > 176) return !1;
}
return !0;
}, t.prototype.parseStringISO = function (t, e) {
for (var i = "", r = t; r < e; ++r) {
i += String.fromCharCode(this.get(r));
}
return i;
}, t.prototype.parseStringUTF = function (t, e) {
for (var i = "", r = t; r < e;) {
var n = this.get(r++);
i += n < 128 ? String.fromCharCode(n) : n > 191 && n < 224 ? String.fromCharCode((31 & n) << 6 | 63 & this.get(r++)) : String.fromCharCode((15 & n) << 12 | (63 & this.get(r++)) << 6 | 63 & this.get(r++));
}
return i;
}, t.prototype.parseStringBMP = function (t, e) {
for (var i, r, n = "", s = t; s < e;) {
i = this.get(s++), r = this.get(s++), n += String.fromCharCode(i << 8 | r);
}
return n;
}, t.prototype.parseTime = function (t, e, i) {
var r = this.parseStringISO(t, e),
n = (i ? y : b).exec(r);
return n ? (i && (n[1] = +n[1], n[1] += +n[1] < 70 ? 2e3 : 1900), r = n[1] + "-" + n[2] + "-" + n[3] + " " + n[4], n[5] && (r += ":" + n[5], n[6] && (r += ":" + n[6], n[7] && (r += "." + n[7]))), n[8] && (r += " UTC", "Z" != n[8] && (r += n[8], n[9] && (r += ":" + n[9]))), r) : "Unrecognized time: " + r;
}, t.prototype.parseInteger = function (t, e) {
for (var i, r = this.get(t), n = r > 127, s = n ? 255 : 0, o = ""; r == s && ++t < e;) {
r = this.get(t);
}
if (0 == (i = e - t)) return n ? -1 : 0;
if (i > 4) {
for (o = r, i <<= 3; 0 == (128 & (+o ^ s));) {
o = +o << 1, --i;
}
o = "(" + i + " bit)\n";
}
n && (r -= 256);
for (var h = new m(r), a = t + 1; a < e; ++a) {
h.mulAdd(256, this.get(a));
}
return o + h.toString();
}, t.prototype.parseBitString = function (t, e, i) {
for (var r = this.get(t), n = "(" + ((e - t - 1 << 3) - r) + " bit)\n", s = "", o = t + 1; o < e; ++o) {
for (var h = this.get(o), a = o == e - 1 ? r : 0, u = 7; u >= a; --u) {
s += h >> u & 1 ? "1" : "0";
}
if (s.length > i) return n + T(s, i);
}
return n + s;
}, t.prototype.parseOctetString = function (t, e, i) {
if (this.isASCII(t, e)) return T(this.parseStringISO(t, e), i);
var r = e - t,
n = "(" + r + " byte)\n";
r > (i /= 2) && (e = t + i);
for (var s = t; s < e; ++s) {
n += this.hexByte(this.get(s));
}
return r > i && (n += "…"), n;
}, t.prototype.parseOID = function (t, e, i) {
for (var r = "", n = new m(), s = 0, o = t; o < e; ++o) {
var h = this.get(o);
if (n.mulAdd(128, 127 & h), s += 7, !(128 & h)) {
if ("" === r) {
if ((n = n.simplify()) instanceof m) n.sub(80), r = "2." + n.toString();else {
var a = n < 80 ? n < 40 ? 0 : 1 : 2;
r = a + "." + (n - 40 * a);
}
} else r += "." + n.toString();
if (r.length > i) return T(r, i);
n = new m(), s = 0;
}
}
return s > 0 && (r += ".incomplete"), r;
}, t;
}(),
w = function () {
function t(t, e, i, r, n) {
if (!(r instanceof D)) throw new Error("Invalid tag value.");
this.stream = t, this.header = e, this.length = i, this.tag = r, this.sub = n;
}
return t.prototype.typeName = function () {
switch (this.tag.tagClass) {
case 0:
switch (this.tag.tagNumber) {
case 0:
return "EOC";
case 1:
return "BOOLEAN";
case 2:
return "INTEGER";
case 3:
return "BIT_STRING";
case 4:
return "OCTET_STRING";
case 5:
return "NULL";
case 6:
return "OBJECT_IDENTIFIER";
case 7:
return "ObjectDescriptor";
case 8:
return "EXTERNAL";
case 9:
return "REAL";
case 10:
return "ENUMERATED";
case 11:
return "EMBEDDED_PDV";
case 12:
return "UTF8String";
case 16:
return "SEQUENCE";
case 17:
return "SET";
case 18:
return "NumericString";
case 19:
return "PrintableString";
case 20:
return "TeletexString";
case 21:
return "VideotexString";
case 22:
return "IA5String";
case 23:
return "UTCTime";
case 24:
return "GeneralizedTime";
case 25:
return "GraphicString";
case 26:
return "VisibleString";
case 27:
return "GeneralString";
case 28:
return "UniversalString";
case 30:
return "BMPString";
}
return "Universal_" + this.tag.tagNumber.toString();
case 1:
return "Application_" + this.tag.tagNumber.toString();
case 2:
return "[" + this.tag.tagNumber.toString() + "]";
case 3:
return "Private_" + this.tag.tagNumber.toString();
}
}, t.prototype.content = function (t) {
if (void 0 === this.tag) return null;
void 0 === t && (t = 1 / 0);
var e = this.posContent(),
i = Math.abs(this.length);
if (!this.tag.isUniversal()) return null !== this.sub ? "(" + this.sub.length + " elem)" : this.stream.parseOctetString(e, e + i, t);
switch (this.tag.tagNumber) {
case 1:
return 0 === this.stream.get(e) ? "false" : "true";
case 2:
return this.stream.parseInteger(e, e + i);
case 3:
return this.sub ? "(" + this.sub.length + " elem)" : this.stream.parseBitString(e, e + i, t);
case 4:
return this.sub ? "(" + this.sub.length + " elem)" : this.stream.parseOctetString(e, e + i, t);
case 6:
return this.stream.parseOID(e, e + i, t);
case 16:
case 17:
return null !== this.sub ? "(" + this.sub.length + " elem)" : "(no elem)";
case 12:
return T(this.stream.parseStringUTF(e, e + i), t);
case 18:
case 19:
case 20:
case 21:
case 22:
case 26:
return T(this.stream.parseStringISO(e, e + i), t);
case 30:
return T(this.stream.parseStringBMP(e, e + i), t);
case 23:
case 24:
return this.stream.parseTime(e, e + i, 23 == this.tag.tagNumber);
}
return null;
}, t.prototype.toString = function () {
return this.typeName() + "@" + this.stream.pos + "[header:" + this.header + ",length:" + this.length + ",sub:" + (null === this.sub ? "null" : this.sub.length) + "]";
}, t.prototype.toPrettyString = function (t) {
void 0 === t && (t = "");
var e = t + this.typeName() + " @" + this.stream.pos;
if (this.length >= 0 && (e += "+"), e += this.length, this.tag.tagConstructed ? e += " (constructed)" : !this.tag.isUniversal() || 3 != this.tag.tagNumber && 4 != this.tag.tagNumber || null === this.sub || (e += " (encapsulates)"), e += "\n", null !== this.sub) {
t += " ";
for (var i = 0, r = this.sub.length; i < r; ++i) {
e += this.sub[i].toPrettyString(t);
}
}
return e;
}, t.prototype.posStart = function () {
return this.stream.pos;
}, t.prototype.posContent = function () {
return this.stream.pos + this.header;
}, t.prototype.posEnd = function () {
return this.stream.pos + this.header + Math.abs(this.length);
}, t.prototype.toHexString = function () {
return this.stream.hexDump(this.posStart(), this.posEnd(), !0);
}, t.decodeLength = function (t) {
var e = t.get(),
i = 127 & e;
if (i == e) return i;
if (i > 6) throw new Error("Length over 48 bits not supported at position " + (t.pos - 1));
if (0 === i) return null;
e = 0;
for (var r = 0; r < i; ++r) {
e = 256 * e + t.get();
}
return e;
}, t.prototype.getHexStringValue = function () {
var t = this.toHexString(),
e = 2 * this.header,
i = 2 * this.length;
return t.substr(e, i);
}, t.decode = function (e) {
var i;
i = e instanceof E ? e : new E(e, 0);
var r = new E(i),
n = new D(i),
s = t.decodeLength(i),
o = i.pos,
h = o - r.pos,
a = null,
u = function u() {
var e = [];
if (null !== s) {
for (var r = o + s; i.pos < r;) {
e[e.length] = t.decode(i);
}
if (i.pos != r) throw new Error("Content size is not correct for container starting at offset " + o);
} else try {
for (;;) {
var n = t.decode(i);
if (n.tag.isEOC()) break;
e[e.length] = n;
}
s = o - i.pos;
} catch (t) {
throw new Error("Exception while decoding undefined length content: " + t);
}
return e;
};
if (n.tagConstructed) a = u();else if (n.isUniversal() && (3 == n.tagNumber || 4 == n.tagNumber)) try {
if (3 == n.tagNumber && 0 != i.get()) throw new Error("BIT STRINGs with unused bits cannot encapsulate.");
a = u();
for (var c = 0; c < a.length; ++c) {
if (a[c].tag.isEOC()) throw new Error("EOC is not supposed to be actual content.");
}
} catch (t) {
a = null;
}
if (null === a) {
if (null === s) throw new Error("We can't skip over an invalid tag with undefined length at offset " + o);
i.pos = o + Math.abs(s);
}
return new t(r, h, s, n, a);
}, t;
}(),
D = function () {
function t(t) {
var e = t.get();
if (this.tagClass = e >> 6, this.tagConstructed = 0 != (32 & e), this.tagNumber = 31 & e, 31 == this.tagNumber) {
var i = new m();
do {
e = t.get(), i.mulAdd(128, 127 & e);
} while (128 & e);
this.tagNumber = i.simplify();
}
}
return t.prototype.isUniversal = function () {
return 0 === this.tagClass;
}, t.prototype.isEOC = function () {
return 0 === this.tagClass && 0 === this.tagNumber;
}, t;
}(),
x = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97, 101, 103, 107, 109, 113, 127, 131, 137, 139, 149, 151, 157, 163, 167, 173, 179, 181, 191, 193, 197, 199, 211, 223, 227, 229, 233, 239, 241, 251, 257, 263, 269, 271, 277, 281, 283, 293, 307, 311, 313, 317, 331, 337, 347, 349, 353, 359, 367, 373, 379, 383, 389, 397, 401, 409, 419, 421, 431, 433, 439, 443, 449, 457, 461, 463, 467, 479, 487, 491, 499, 503, 509, 521, 523, 541, 547, 557, 563, 569, 571, 577, 587, 593, 599, 601, 607, 613, 617, 619, 631, 641, 643, 647, 653, 659, 661, 673, 677, 683, 691, 701, 709, 719, 727, 733, 739, 743, 751, 757, 761, 769, 773, 787, 797, 809, 811, 821, 823, 827, 829, 839, 853, 857, 859, 863, 877, 881, 883, 887, 907, 911, 919, 929, 937, 941, 947, 953, 967, 971, 977, 983, 991, 997],
R = (1 << 26) / x[x.length - 1],
B = function () {
function t(t, e, i) {
null != t && ("number" == typeof t ? this.fromNumber(t, e, i) : null == e && "string" != typeof t ? this.fromString(t, 256) : this.fromString(t, e));
}
return t.prototype.toString = function (t) {
if (this.s < 0) return "-" + this.negate().toString(t);
var e;
if (16 == t) e = 4;else if (8 == t) e = 3;else if (2 == t) e = 1;else if (32 == t) e = 5;else {
if (4 != t) return this.toRadix(t);
e = 2;
}
var i,
n = (1 << e) - 1,
s = !1,
o = "",
h = this.t,
a = this.DB - h * this.DB % e;
if (h-- > 0) for (a < this.DB && (i = this[h] >> a) > 0 && (s = !0, o = r(i)); h >= 0;) {
a < e ? (i = (this[h] & (1 << a) - 1) << e - a, i |= this[--h] >> (a += this.DB - e)) : (i = this[h] >> (a -= e) & n, a <= 0 && (a += this.DB, --h)), i > 0 && (s = !0), s && (o += r(i));
}
return s ? o : "0";
}, t.prototype.negate = function () {
var e = N();
return t.ZERO.subTo(this, e), e;
}, t.prototype.abs = function () {
return this.s < 0 ? this.negate() : this;
}, t.prototype.compareTo = function (t) {
var e = this.s - t.s;
if (0 != e) return e;
var i = this.t;
if (0 != (e = i - t.t)) return this.s < 0 ? -e : e;
for (; --i >= 0;) {
if (0 != (e = this[i] - t[i])) return e;
}
return 0;
}, t.prototype.bitLength = function () {
return this.t <= 0 ? 0 : this.DB * (this.t - 1) + F(this[this.t - 1] ^ this.s & this.DM);
}, t.prototype.mod = function (e) {
var i = N();
return this.abs().divRemTo(e, null, i), this.s < 0 && i.compareTo(t.ZERO) > 0 && e.subTo(i, i), i;
}, t.prototype.modPowInt = function (t, e) {
var i;
return i = t < 256 || e.isEven() ? new A(e) : new V(e), this.exp(t, i);
}, t.prototype.clone = function () {
var t = N();
return this.copyTo(t), t;
}, t.prototype.intValue = function () {
if (this.s < 0) {
if (1 == this.t) return this[0] - this.DV;
if (0 == this.t) return -1;
} else {
if (1 == this.t) return this[0];
if (0 == this.t) return 0;
}
return (this[1] & (1 << 32 - this.DB) - 1) << this.DB | this[0];
}, t.prototype.byteValue = function () {
return 0 == this.t ? this.s : this[0] << 24 >> 24;
}, t.prototype.shortValue = function () {
return 0 == this.t ? this.s : this[0] << 16 >> 16;
}, t.prototype.signum = function () {
return this.s < 0 ? -1 : this.t <= 0 || 1 == this.t && this[0] <= 0 ? 0 : 1;
}, t.prototype.toByteArray = function () {
var t = this.t,
e = [];
e[0] = this.s;
var i,
r = this.DB - t * this.DB % 8,
n = 0;
if (t-- > 0) for (r < this.DB && (i = this[t] >> r) != (this.s & this.DM) >> r && (e[n++] = i | this.s << this.DB - r); t >= 0;) {
r < 8 ? (i = (this[t] & (1 << r) - 1) << 8 - r, i |= this[--t] >> (r += this.DB - 8)) : (i = this[t] >> (r -= 8) & 255, r <= 0 && (r += this.DB, --t)), 0 != (128 & i) && (i |= -256), 0 == n && (128 & this.s) != (128 & i) && ++n, (n > 0 || i != this.s) && (e[n++] = i);
}
return e;
}, t.prototype.equals = function (t) {
return 0 == this.compareTo(t);
}, t.prototype.min = function (t) {
return this.compareTo(t) < 0 ? this : t;
}, t.prototype.max = function (t) {
return this.compareTo(t) > 0 ? this : t;
}, t.prototype.and = function (t) {
var e = N();
return this.bitwiseTo(t, n, e), e;
}, t.prototype.or = function (t) {
var e = N();
return this.bitwiseTo(t, s, e), e;
}, t.prototype.xor = function (t) {
var e = N();
return this.bitwiseTo(t, o, e), e;
|
for (var t = N(), e = 0; e < this.t; ++e) {
t[e] = this.DM & ~this[e];
}
return t.t = this.t, t.s = ~this.s, t;
}, t.prototype.shiftLeft = function (t) {
var e = N();
return t < 0 ? this.rShiftTo(-t, e) : this.lShiftTo(t, e), e;
}, t.prototype.shiftRight = function (t) {
var e = N();
return t < 0 ? this.lShiftTo(-t, e) : this.rShiftTo(t, e), e;
}, t.prototype.getLowestSetBit = function () {
for (var t = 0; t < this.t; ++t) {
if (0 != this[t]) return t * this.DB + a(this[t]);
}
return this.s < 0 ? this.t * this.DB : -1;
}, t.prototype.bitCount = function () {
for (var t = 0, e = this.s & this.DM, i = 0; i < this.t; ++i) {
t += u(this[i] ^ e);
}
return t;
}, t.prototype.testBit = function (t) {
var e = Math.floor(t / this.DB);
return e >= this.t ? 0 != this.s : 0 != (this[e] & 1 << t % this.DB);
}, t.prototype.setBit = function (t) {
return this.changeBit(t, s);
}, t.prototype.clearBit = function (t) {
return this.changeBit(t, h);
}, t.prototype.flipBit = function (t) {
return this.changeBit(t, o);
}, t.prototype.add = function (t) {
var e = N();
return this.addTo(t, e), e;
}, t.prototype.subtract = function (t) {
var e = N();
return this.subTo(t, e), e;
}, t.prototype.multiply = function (t) {
var e = N();
return this.multiplyTo(t, e), e;
}, t.prototype.divide = function (t) {
var e = N();
return this.divRemTo(t, e, null), e;
}, t.prototype.remainder = function (t) {
var e = N();
return this.divRemTo(t, null, e), e;
}, t.prototype.divideAndRemainder = function (t) {
var e = N(),
i = N();
return this.divRemTo(t, e, i), [e, i];
}, t.prototype.modPow = function (t, e) {
var i,
r,
n = t.bitLength(),
s = C(1);
if (n <= 0) return s;
i = n < 18 ? 1 : n < 48 ? 3 : n < 144 ? 4 : n < 768 ? 5 : 6, r = n < 8 ? new A(e) : e.isEven() ? new I(e) : new V(e);
var o = [],
h = 3,
a = i - 1,
u = (1 << i) - 1;
if (o[1] = r.convert(this), i > 1) {
var c = N();
for (r.sqrTo(o[1], c); h <= u;) {
o[h] = N(), r.mulTo(c, o[h - 2], o[h]), h += 2;
}
}
var f,
l,
p = t.t - 1,
g = !0,
d = N();
for (n = F(t[p]) - 1; p >= 0;) {
for (n >= a ? f = t[p] >> n - a & u : (f = (t[p] & (1 << n + 1) - 1) << a - n, p > 0 && (f |= t[p - 1] >> this.DB + n - a)), h = i; 0 == (1 & f);) {
f >>= 1, --h;
}
if ((n -= h) < 0 && (n += this.DB, --p), g) o[f].copyTo(s), g = !1;else {
for (; h > 1;) {
r.sqrTo(s, d), r.sqrTo(d, s), h -= 2;
}
h > 0 ? r.sqrTo(s, d) : (l = s, s = d, d = l), r.mulTo(d, o[f], s);
}
for (; p >= 0 && 0 == (t[p] & 1 << n);) {
r.sqrTo(s, d), l = s, s = d, d = l, --n < 0 && (n = this.DB - 1, --p);
}
}
return r.revert(s);
}, t.prototype.modInverse = function (e) {
var i = e.isEven();
if (this.isEven() && i || 0 == e.signum()) return t.ZERO;
for (var r = e.clone(), n = this.clone(), s = C(1), o = C(0), h = C(0), a = C(1); 0 != r.signum();) {
for (; r.isEven();) {
r.rShiftTo(1, r), i ? (s.isEven() && o.isEven() || (s.addTo(this, s), o.subTo(e, o)), s.rShiftTo(1, s)) : o.isEven() || o.subTo(e, o), o.rShiftTo(1, o);
}
for (; n.isEven();) {
n.rShiftTo(1, n), i ? (h.isEven() && a.isEven() || (h.addTo(this, h), a.subTo(e, a)), h.rShiftTo(1, h)) : a.isEven() || a.subTo(e, a), a.rShiftTo(1, a);
}
r.compareTo(n) >= 0 ? (r.subTo(n, r), i && s.subTo(h, s), o.subTo(a, o)) : (n.subTo(r, n), i && h.subTo(s, h), a.subTo(o, a));
}
return 0 != n.compareTo(t.ONE) ? t.ZERO : a.compareTo(e) >= 0 ? a.subtract(e) : a.signum() < 0 ? (a.addTo(e, a), a.signum() < 0 ? a.add(e) : a) : a;
}, t.prototype.pow = function (t) {
return this.exp(t, new O());
}, t.prototype.gcd = function (t) {
var e = this.s < 0 ? this.negate() : this.clone(),
i = t.s < 0 ? t.negate() : t.clone();
if (e.compareTo(i) < 0) {
var r = e;
e = i, i = r;
}
var n = e.getLowestSetBit(),
s = i.getLowestSetBit();
if (s < 0) return e;
for (n < s && (s = n), s > 0 && (e.rShiftTo(s, e), i.rShiftTo(s, i)); e.signum() > 0;) {
(n = e.getLowestSetBit()) > 0 && e.rShiftTo(n, e), (n = i.getLowestSetBit()) > 0 && i.rShiftTo(n, i), e.compareTo(i) >= 0 ? (e.subTo(i, e), e.rShiftTo(1, e)) : (i.subTo(e, i), i.rShiftTo(1, i));
}
return s > 0 && i.lShiftTo(s, i), i;
}, t.prototype.isProbablePrime = function (t) {
var e,
i = this.abs();
if (1 == i.t && i[0] <= x[x.length - 1]) {
for (e = 0; e < x.length; ++e) {
if (i[0] == x[e]) return !0;
}
return !1;
}
if (i.isEven()) return !1;
for (e = 1; e < x.length;) {
for (var r = x[e], n = e + 1; n < x.length && r < R;) {
r *= x[n++];
}
for (r = i.modInt(r); e < n;) {
if (r % x[e++] == 0) return !1;
}
}
return i.millerRabin(t);
}, t.prototype.copyTo = function (t) {
for (var e = this.t - 1; e >= 0; --e) {
t[e] = this[e];
}
t.t = this.t, t.s = this.s;
}, t.prototype.fromInt = function (t) {
this.t = 1, this.s = t < 0 ? -1 : 0, t > 0 ? this[0] = t : t < -1 ? this[0] = t + this.DV : this.t = 0;
}, t.prototype.fromString = function (e, i) {
var r;
if (16 == i) r = 4;else if (8 == i) r = 3;else if (256 == i) r = 8;else if (2 == i) r = 1;else if (32 == i) r = 5;else {
if (4 != i) return void this.fromRadix(e, i);
r = 2;
}
this.t = 0, this.s = 0;
for (var n = e.length, s = !1, o = 0; --n >= 0;) {
var h = 8 == r ? 255 & +e[n] : H(e, n);
h < 0 ? "-" == e.charAt(n) && (s = !0) : (s = !1, 0 == o ? this[this.t++] = h : o + r > this.DB ? (this[this.t - 1] |= (h & (1 << this.DB - o) - 1) << o, this[this.t++] = h >> this.DB - o) : this[this.t - 1] |= h << o, (o += r) >= this.DB && (o -= this.DB));
}
8 == r && 0 != (128 & +e[0]) && (this.s = -1, o > 0 && (this[this.t - 1] |= (1 << this.DB - o) - 1 << o)), this.clamp(), s && t.ZERO.subTo(this, this);
}, t.prototype.clamp = function () {
for (var t = this.s & this.DM; this.t > 0 && this[this.t - 1] == t;) {
--this.t;
}
}, t.prototype.dlShiftTo = function (t, e) {
var i;
for (i = this.t - 1; i >= 0; --i) {
e[i + t] = this[i];
}
for (i = t - 1; i >= 0; --i) {
e[i] = 0;
}
e.t = this.t + t, e.s = this.s;
}, t.prototype.drShiftTo = function (t, e) {
for (var i = t; i < this.t; ++i) {
e[i - t] = this[i];
}
e.t = Math.max(this.t - t, 0), e.s = this.s;
}, t.prototype.lShiftTo = function (t, e) {
for (var i = t % this.DB, r = this.DB - i, n = (1 << r) - 1, s = Math.floor(t / this.DB), o = this.s << i & this.DM, h = this.t - 1; h >= 0; --h) {
e[h + s + 1] = this[h] >> r | o, o = (this[h] & n) << i;
}
for (h = s - 1; h >= 0; --h) {
e[h] = 0;
}
e[s] = o, e.t = this.t + s + 1, e.s = this.s, e.clamp();
}, t.prototype.rShiftTo = function (t, e) {
e.s = this.s;
var i = Math.floor(t / this.DB);
if (i >= this.t) e.t = 0;else {
var r = t % this.DB,
n = this.DB - r,
s = (1 << r) - 1;
e[0] = this[i] >> r;
for (var o = i + 1; o < this.t; ++o) {
e[o - i - 1] |= (this[o] & s) << n, e[o - i] = this[o] >> r;
}
r > 0 && (e[this.t - i - 1] |= (this.s & s) << n), e.t = this.t - i, e.clamp();
}
}, t.prototype.subTo = function (t, e) {
for (var i = 0, r = 0, n = Math.min(t.t, this.t); i < n;) {
r += this[i] - t[i], e[i++] = r & this.DM, r >>= this.DB;
}
if (t.t < this.t) {
for (r -= t.s; i < this.t;) {
r += this[i], e[i++] = r & this.DM, r >>= this.DB;
}
r += this.s;
} else {
for (r += this.s; i < t.t;) {
r -= t[i], e[i++] = r & this.DM, r >>= this.DB;
}
r -= t.s;
}
e.s = r < 0 ? -1 : 0, r < -1 ? e[i++] = this.DV + r : r > 0 && (e[i++] = r), e.t = i, e.clamp();
}, t.prototype.multiplyTo = function (e, i) {
var r = this.abs(),
n = e.abs(),
s = r.t;
for (i.t = s + n.t; --s >= 0;) {
i[s] = 0;
}
for (s = 0; s < n.t; ++s) {
i[s + r.t] = r.am(0, n[s], i, s, 0, r.t);
}
i.s = 0, i.clamp(), this.s != e.s && t.ZERO.subTo(i, i);
}, t.prototype.squareTo = function (t) {
for (var e = this.abs(), i = t.t = 2 * e.t; --i >= 0;) {
t[i] = 0;
}
for (i = 0; i < e.t - 1; ++i) {
var r = e.am(i, e[i], t, 2 * i, 0, 1);
(t[i + e.t] += e.am(i + 1, 2 * e[i], t, 2 * i + 1, r, e.t - i - 1)) >= e.DV && (t[i + e.t] -= e.DV, t[i + e.t + 1] = 1);
}
t.t > 0 && (t[t.t - 1] += e.am(i, e[i], t, 2 * i, 0, 1)), t.s = 0, t.clamp();
}, t.prototype.divRemTo = function (e, i, r) {
var n = e.abs();
if (!(n.t <= 0)) {
var s = this.abs();
if (s.t < n.t) return null != i && i.fromInt(0), void (null != r && this.copyTo(r));
null == r && (r = N());
var o = N(),
h = this.s,
a = e.s,
u = this.DB - F(n[n.t - 1]);
u > 0 ? (n.lShiftTo(u, o), s.lShiftTo(u, r)) : (n.copyTo(o), s.copyTo(r));
var c = o.t,
f = o[c - 1];
if (0 != f) {
var l = f * (1 << this.F1) + (c > 1 ? o[c - 2] >> this.F2 : 0),
p = this.FV / l,
g = (1 << this.F1) / l,
d = 1 << this.F2,
v = r.t,
m = v - c,
y = null == i ? N() : i;
for (o.dlShiftTo(m, y), r.compareTo(y) >= 0 && (r[r.t++] = 1, r.subTo(y, r)), t.ONE.dlShiftTo(c, y), y.subTo(o, o); o.t < c;) {
o[o.t++] = 0;
}
for (; --m >= 0;) {
var b = r[--v] == f ? this.DM : Math.floor(r[v] * p + (r[v - 1] + d) * g);
if ((r[v] += o.am(0, b, r, m, 0, c)) < b) for (o.dlShiftTo(m, y), r.subTo(y, r); r[v] < --b;) {
r.subTo(y, r);
}
}
null != i && (r.drShiftTo(c, i), h != a && t.ZERO.subTo(i, i)), r.t = c, r.clamp(), u > 0 && r.rShiftTo(u, r), h < 0 && t.ZERO.subTo(r, r);
}
}
}, t.prototype.invDigit = function () {
if (this.t < 1) return 0;
var t = this[0];
if (0 == (1 & t)) return 0;
var e = 3 & t;
return (e = (e = (e = (e = e * (2 - (15 & t) * e) & 15) * (2 - (255 & t) * e) & 255) * (2 - ((65535 & t) * e & 65535)) & 65535) * (2 - t * e % this.DV) % this.DV) > 0 ? this.DV - e : -e;
}, t.prototype.isEven = function () {
return 0 == (this.t > 0 ? 1 & this[0] : this.s);
}, t.prototype.exp = function (e, i) {
if (e > 4294967295 || e < 1) return t.ONE;
var r = N(),
n = N(),
s = i.convert(this),
o = F(e) - 1;
for (s.copyTo(r); --o >= 0;) {
if (i.sqrTo(r, n), (e & 1 << o) > 0) i.mulTo(n, s, r);else {
var h = r;
r = n, n = h;
}
}
return i.revert(r);
}, t.prototype.chunkSize = function (t) {
return Math.floor(Math.LN2 * this.DB / Math.log(t));
}, t.prototype.toRadix = function (t) {
if (null == t && (t = 10), 0 == this.signum() || t < 2 || t > 36) return "0";
var e = this.chunkSize(t),
i = Math.pow(t, e),
r = C(i),
n = N(),
s = N(),
o = "";
for (this.divRemTo(r, n, s); n.signum() > 0;) {
o = (i + s.intValue()).toString(t).substr(1) + o, n.divRemTo(r, n, s);
}
return s.intValue().toString(t) + o;
}, t.prototype.fromRadix = function (e, i) {
this.fromInt(0), null == i && (i = 10);
for (var r = this.chunkSize(i), n = Math.pow(i, r), s = !1, o = 0, h = 0, a = 0; a < e.length; ++a) {
var u = H(e, a);
u < 0 ? "-" == e.charAt(a) && 0 == this.signum() && (s = !0) : (h = i * h + u, ++o >= r && (this.dMultiply(n), this.dAddOffset(h, 0), o = 0, h = 0));
}
o > 0 && (this.dMultiply(Math.pow(i, o)), this.dAddOffset(h, 0)), s && t.ZERO.subTo(this, this);
}, t.prototype.fromNumber = function (e, i, r) {
if ("number" == typeof i) {
if (e < 2) this.fromInt(1);else for (this.fromNumber(e, r), this.testBit(e - 1) || this.bitwiseTo(t.ONE.shiftLeft(e - 1), s, this), this.isEven() && this.dAddOffset(1, 0); !this.isProbablePrime(i);) {
this.dAddOffset(2, 0), this.bitLength() > e && this.subTo(t.ONE.shiftLeft(e - 1), this);
}
} else {
var n = [],
o = 7 & e;
n.length = 1 + (e >> 3), i.nextBytes(n), o > 0 ? n[0] &= (1 << o) - 1 : n[0] = 0, this.fromString(n, 256);
}
}, t.prototype.bitwiseTo = function (t, e, i) {
var r,
n,
s = Math.min(t.t, this.t);
for (r = 0; r < s; ++r) {
i[r] = e(this[r], t[r]);
}
if (t.t < this.t) {
for (n = t.s & this.DM, r = s; r < this.t; ++r) {
i[r] = e(this[r], n);
}
i.t = this.t;
} else {
for (n = this.s & this.DM, r = s; r < t.t; ++r) {
i[r] = e(n, t[r]);
}
i.t = t.t;
}
i.s = e(this.s, t.s), i.clamp();
}, t.prototype.changeBit = function (e, i) {
var r = t.ONE.shiftLeft(e);
return this.bitwiseTo(r, i, r), r;
}, t.prototype.addTo = function (t, e) {
for (var i = 0, r = 0, n = Math.min(t.t, this.t); i < n;) {
r += this[i] + t[i], e[i++] = r & this.DM, r >>= this.DB;
}
if (t.t < this.t) {
for (r += t.s; i < this.t;) {
r += this[i], e[i++] = r & this.DM, r >>= this.DB;
}
r += this.s;
} else {
for (r += this.s; i < t.t;) {
r += t[i], e[i++] = r & this.DM, r >>= this.DB;
}
r += t.s;
}
e.s = r < 0 ? -1 : 0, r > 0 ? e[i++] = r : r < -1 && (e[i++] = this.DV + r), e.t = i, e.clamp();
}, t.prototype.dMultiply = function (t) {
this[this.t] = this.am(0, t - 1, this, 0, 0, this.t), ++this.t, this.clamp();
}, t.prototype.dAddOffset = function (t, e) {
if (0 != t) {
for (; this.t <= e;) {
this[this.t++] = 0;
}
for (this[e] += t; this[e] >= this.DV;) {
this[e] -= this.DV, ++e >= this.t && (this[this.t++] = 0), ++this[e];
}
}
}, t.prototype.multiplyLowerTo = function (t, e, i) {
var r = Math.min(this.t + t.t, e);
for (i.s = 0, i.t = r; r > 0;) {
i[--r] = 0;
}
for (var n = i.t - this.t; r < n; ++r) {
i[r + this.t] = this.am(0, t[r], i, r, 0, this.t);
}
for (n = Math.min(t.t, e); r < n; ++r) {
this.am(0, t[r], i, r, 0, e - r);
}
i.clamp();
}, t.prototype.multiplyUpperTo = function (t, e, i) {
--e;
var r = i.t = this.t + t.t - e;
for (i.s = 0; --r >= 0;) {
i[r] = 0;
}
for (r = Math.max(e - this.t, 0); r < t.t; ++r) {
i[this.t + r - e] = this.am(e - r, t[r], i, 0, 0, this.t + r - e);
}
i.clamp(), i.drShiftTo(1, i);
}, t.prototype.modInt = function (t) {
if (t <= 0) return 0;
var e = this.DV % t,
i = this.s < 0 ? t - 1 : 0;
if (this.t > 0) if (0 == e) i = this[0] % t;else for (var r = this.t - 1; r >= 0; --r) {
i = (e * i + this[r]) % t;
}
return i;
}, t.prototype.millerRabin = function (e) {
var i = this.subtract(t.ONE),
r = i.getLowestSetBit();
if (r <= 0) return !1;
var n = i.shiftRight(r);
(e = e + 1 >> 1) > x.length && (e = x.length);
for (var s = N(), o = 0; o < e; ++o) {
s.fromInt(x[Math.floor(Math.random() * x.length)]);
var h = s.modPow(n, this);
if (0 != h.compareTo(t.ONE) && 0 != h.compareTo(i)) {
for (var a = 1; a++ < r && 0 != h.compareTo(i);) {
if (0 == (h = h.modPowInt(2, this)).compareTo(t.ONE)) return !1;
}
if (0 != h.compareTo(i)) return !1;
}
}
return !0;
}, t.prototype.square = function () {
var t = N();
return this.squareTo(t), t;
}, t.prototype.gcda = function (t, e) {
var i = this.s < 0 ? this.negate() : this.clone(),
r = t.s < 0 ? t.negate() : t.clone();
if (i.compareTo(r) < 0) {
var n = i;
i = r, r = n;
}
var s = i.getLowestSetBit(),
o = r.getLowestSetBit();
if (o < 0) e(i);else {
s < o && (o = s), o > 0 && (i.rShiftTo(o, i), r.rShiftTo(o, r));
var h = function h() {
(s = i.getLowestSetBit()) > 0 && i.rShiftTo(s, i), (s = r.getLowestSetBit()) > 0 && r.rShiftTo(s, r), i.compareTo(r) >= 0 ? (i.subTo(r, i), i.rShiftTo(1, i)) : (r.subTo(i, r), r.rShiftTo(1, r)), i.signum() > 0 ? setTimeout(h, 0) : (o > 0 && r.lShiftTo(o, r), setTimeout(function () {
e(r);
}, 0));
};
setTimeout(h, 10);
}
}, t.prototype.fromNumberAsync = function (e, i, r, n) {
if ("number" == typeof i) {
if (e < 2) this.fromInt(1);else {
this.fromNumber(e, r), this.testBit(e - 1) || this.bitwiseTo(t.ONE.shiftLeft(e - 1), s, this), this.isEven() && this.dAddOffset(1, 0);
var o = this,
h = function h() {
o.dAddOffset(2, 0), o.bitLength() > e && o.subTo(t.ONE.shiftLeft(e - 1), o), o.isProbablePrime(i) ? setTimeout(function () {
n();
}, 0) : setTimeout(h, 0);
};
setTimeout(h, 0);
}
} else {
var a = [],
u = 7 & e;
a.length = 1 + (e >> 3), i.nextBytes(a), u > 0 ? a[0] &= (1 << u) - 1 : a[0] = 0, this.fromString(a, 256);
}
}, t;
}(),
O = function () {
function t() {}
return t.prototype.convert = function (t) {
return t;
}, t.prototype.revert = function (t) {
return t;
}, t.prototype.mulTo = function (t, e, i) {
t.multiplyTo(e, i);
}, t.prototype.sqrTo = function (t, e) {
t.squareTo(e);
}, t;
}(),
A = function () {
function t(t) {
this.m = t;
}
return t.prototype.convert = function (t) {
return t.s < 0 || t.compareTo(this.m) >= 0 ? t.mod(this.m) : t;
}, t.prototype.revert = function (t) {
return t;
}, t.prototype.reduce = function (t) {
t.divRemTo(this.m, null, t);
}, t.prototype.mulTo = function (t, e, i) {
t.multiplyTo(e, i), this.reduce(i);
}, t.prototype.sqrTo = function (t, e) {
t.squareTo(e), this.reduce(e);
}, t;
}(),
V = function () {
function t(t) {
this.m = t, this.mp = t.invDigit(), this.mpl = 32767 & this.mp, this.mph = this.mp >> 15, this.um = (1 << t.DB - 15) - 1, this.mt2 = 2 * t.t;
}
return t.prototype.convert = function (t) {
var e = N();
return t.abs().dlShiftTo(this.m.t, e), e.divRemTo(this.m, null, e), t.s < 0 && e.compareTo(B.ZERO) > 0 && this.m.subTo(e, e), e;
}, t.prototype.revert = function (t) {
var e = N();
return t.copyTo(e), this.reduce(e), e;
}, t.prototype.reduce = function (t) {
for (; t.t <= this.mt2;) {
t[t.t++] = 0;
}
for (var e = 0; e < this.m.t; ++e) {
var i = 32767 & t[e],
r = i * this.mpl + ((i * this.mph + (t[e] >> 15) * this.mpl & this.um) << 15) & t.DM;
for (t[i = e + this.m.t] += this.m.am(0, r, t, e, 0, this.m.t); t[i] >= t.DV;) {
t[i] -= t.DV, t[++i]++;
}
}
t.clamp(), t.drShiftTo(this.m.t, t), t.compareTo(this.m) >= 0 && t.subTo(this.m, t);
}, t.prototype.mulTo = function (t, e, i) {
t.multiplyTo(e, i), this.reduce(i);
}, t.prototype.sqrTo = function (t, e) {
t.squareTo(e), this.reduce(e);
}, t;
}(),
I = function () {
function t(t) {
this.m = t, this.r2 = N(), this.q3 = N(), B.ONE.dlShiftTo(2 * t.t, this.r2), this.mu = this.r2.divide(t);
}
return t.prototype.convert = function (t) {
if (t.s < 0 || t.t > 2 * this.m.t) return t.mod(this.m);
if (t.compareTo(this.m) < 0) return t;
var e = N();
return t.copyTo(e), this.reduce(e), e;
}, t.prototype.revert = function (t) {
return t;
}, t.prototype.reduce = function (t) {
for (t.drShiftTo(this.m.t - 1, this.r2), t.t > this.m.t + 1 && (t.t = this.m.t + 1, t.clamp()), this.mu.multiplyUpperTo(this.r2, this.m.t + 1, this.q3), this.m.multiplyLowerTo(this.q3, this.m.t + 1, this.r2); t.compareTo(this.r2) < 0;) {
t.dAddOffset(1, this.m.t + 1);
}
for (t.subTo(this.r2, t); t.compareTo(this.m) >= 0;) {
t.subTo(this.m, t);
}
}, t.prototype.mulTo = function (t, e, i) {
t.multiplyTo(e, i), this.reduce(i);
}, t.prototype.sqrTo = function (t, e) {
t.squareTo(e), this.reduce(e);
}, t;
}();
function N() {
return new B(null);
}
function P(t, e) {
return new B(t, e);
}
var M = "undefined" != typeof navigator;
M && "Microsoft Internet Explorer" == navigator.appName ? (B.prototype.am = function (t, e, i, r, n, s) {
for (var o = 32767 & e, h = e >> 15; --s >= 0;) {
var a = 32767 & this[t],
u = this[t++] >> 15,
c = h * a + u * o;
n = ((a = o * a + ((32767 & c) << 15) + i[r] + (1073741823 & n)) >>> 30) + (c >>> 15) + h * u + (n >>> 30), i[r++] = 1073741823 & a;
}
return n;
}, S = 30) : M && "Netscape" != navigator.appName ? (B.prototype.am = function (t, e, i, r, n, s) {
for (; --s >= 0;) {
var o = e * this[t++] + i[r] + n;
n = Math.floor(o / 67108864), i[r++] = 67108863 & o;
}
return n;
}, S = 26) : (B.prototype.am = function (t, e, i, r, n, s) {
for (var o = 16383 & e, h = e >> 14; --s >= 0;) {
var a = 16383 & this[t],
u = this[t++] >> 14,
c = h * a + u * o;
n = ((a = o * a + ((16383 & c) << 14) + i[r] + n) >> 28) + (c >> 14) + h * u, i[r++] = 268435455 & a;
}
return n;
}, S = 28), B.prototype.DB = S, B.prototype.DM = (1 << S) - 1, B.prototype.DV = 1 << S, B.prototype.FV = Math.pow(2, 52), B.prototype.F1 = 52 - S, B.prototype.F2 = 2 * S - 52;
var j,
q,
L = [];
for (j = "0".charCodeAt(0), q = 0; q <= 9; ++q) {
L[j++] = q;
}
for (j = "a".charCodeAt(0), q = 10; q < 36; ++q) {
L[j++] = q;
}
for (j = "A".charCodeAt(0), q = 10; q < 36; ++q) {
L[j++] = q;
}
function H(t, e) {
var i = L[t.charCodeAt(e)];
return null == i ? -1 : i;
}
function C(t) {
var e = N();
return e.fromInt(t), e;
}
function F(t) {
var e,
i = 1;
return 0 != (e = t >>> 16) && (t = e, i += 16), 0 != (e = t >> 8) && (t = e, i += 8), 0 != (e = t >> 4) && (t = e, i += 4), 0 != (e = t >> 2) && (t = e, i += 2), 0 != (e = t >> 1) && (t = e, i += 1), i;
}
B.ZERO = C(0), B.ONE = C(1);
var U,
K,
k = function () {
function t() {
this.i = 0, this.j = 0, this.S = [];
}
return t.prototype.init = function (t) {
var e, i, r;
for (e = 0; e < 256; ++e) {
this.S[e] = e;
}
for (i = 0, e = 0; e < 256; ++e) {
i = i + this.S[e] + t[e % t.length] & 255, r = this.S[e], this.S[e] = this.S[i], this.S[i] = r;
}
this.i = 0, this.j = 0;
}, t.prototype.next = function () {
var t;
return this.i = this.i + 1 & 255, this.j = this.j + this.S[this.i] & 255, t = this.S[this.i], this.S[this.i] = this.S[this.j], this.S[this.j] = t, this.S[t + this.S[this.i] & 255];
}, t;
}(),
_ = null;
if (null == _) {
_ = [], K = 0;
var z = void 0;
if (window.crypto && window.crypto.getRandomValues) {
var Z = new Uint32Array(256);
for (window.crypto.getRandomValues(Z), z = 0; z < Z.length; ++z) {
_[K++] = 255 & Z[z];
}
}
var G = 0,
$ = function $(t) {
if ((G = G || 0) >= 256 || K >= 256) window.removeEventListener ? window.removeEventListener("mousemove", $, !1) : window.detachEvent && window.detachEvent("onmousemove", $);else try {
var e = t.x + t.y;
_[K++] = 255 & e, G += 1;
} catch (t) {}
};
window.addEventListener ? window.addEventListener("mousemove", $, !1) : window.attachEvent && window.attachEvent("onmousemove", $);
}
function Y() {
if (null == U) {
for (U = new k(); K < 256;) {
var t = Math.floor(65536 * Math.random());
_[K++] = 255 & t;
}
for (U.init(_), K = 0; K < _.length; ++K) {
_[K] = 0;
}
K = 0;
}
return U.next();
}
var J = function () {
function t() {}
return t.prototype.nextBytes = function (t) {
for (var e = 0; e < t.length; ++e) {
t[e] = Y();
}
}, t;
}(),
X = function () {
function t() {
this.n = null, this.e = 0, this.d = null, this.p = null, this.q = null, this.dmp1 = null, this.dmq1 = null, this.coeff = null;
}
return t.prototype.doPublic = function (t) {
return t.modPowInt(this.e, this.n);
}, t.prototype.doPrivate = function (t) {
if (null == this.p || null == this.q) return t.modPow(this.d, this.n);
for (var e = t.mod(this.p).modPow(this.dmp1, this.p), i = t.mod(this.q).modPow(this.dmq1, this.q); e.compareTo(i) < 0;) {
e = e.add(this.p);
}
return e.subtract(i).multiply(this.coeff).mod(this.p).multiply(this.q).add(i);
}, t.prototype.setPublic = function (t, e) {
null != t && null != e && t.length > 0 && e.length > 0 ? (this.n = P(t, 16), this.e = parseInt(e, 16)) : console.error("Invalid RSA public key");
}, t.prototype.encrypt = function (t) {
var e = this.n.bitLength() + 7 >> 3,
i = function (t, e) {
if (e < t.length + 11) return console.error("Message too long for RSA"), null;
for (var i = [], r = t.length - 1; r >= 0 && e > 0;) {
var n = t.charCodeAt(r--);
n < 128 ? i[--e] = n : n > 127 && n < 2048 ? (i[--e] = 63 & n | 128, i[--e] = n >> 6 | 192) : (i[--e] = 63 & n | 128, i[--e] = n >> 6 & 63 | 128, i[--e] = n >> 12 | 224);
}
i[--e] = 0;
for (var s = new J(), o = []; e > 2;) {
for (o[0] = 0; 0 == o[0];) {
s.nextBytes(o);
}
i[--e] = o[0];
}
return i[--e] = 2, i[--e] = 0, new B(i);
}(t, e);
if (null == i) return null;
var r = this.doPublic(i);
if (null == r) return null;
for (var n = r.toString(16), s = n.length, o = 0; o < 2 * e - s; o++) {
n = "0" + n;
}
return n;
}, t.prototype.setPrivate = function (t, e, i) {
null != t && null != e && t.length > 0 && e.length > 0 ? (this.n = P(t, 16), this.e = parseInt(e, 16), this.d = P(i, 16)) : console.error("Invalid RSA private key");
}, t.prototype.setPrivateEx = function (t, e, i, r, n, s, o, h) {
null != t && null != e && t.length > 0 && e.length > 0 ? (this.n = P(t, 16), this.e = parseInt(e, 16), this.d = P(i, 16), this.p = P(r, 16), this.q = P(n, 16), this.dmp1 = P(s, 16), this.dmq1 = P(o, 16), this.coeff = P(h, 16)) : console.error("Invalid RSA private key");
}, t.prototype.generate = function (t, e) {
var i = new J(),
r = t >> 1;
this.e = parseInt(e, 16);
for (var n = new B(e, 16);;) {
for (; this.p = new B(t - r, 1, i), 0 != this.p.subtract(B.ONE).gcd(n).compareTo(B.ONE) || !this.p.isProbablePrime(10);) {
;
}
for (; this.q = new B(r, 1, i), 0 != this.q.subtract(B.ONE).gcd(n).compareTo(B.ONE) || !this.q.isProbablePrime(10);) {
;
}
if (this.p.compareTo(this.q) <= 0) {
var s = this.p;
this.p = this.q, this.q = s;
}
var o = this.p.subtract(B.ONE),
h = this.q.subtract(B.ONE),
a = o.multiply(h);
if (0 == a.gcd(n).compareTo(B.ONE)) {
this.n = this.p.multiply(this.q), this.d = n.modInverse(a), this.dmp1 = this.d.mod(o), this.dmq1 = this.d.mod(h), this.coeff = this.q.modInverse(this.p);
break;
}
}
}, t.prototype.decrypt = function (t) {
var e = P(t, 16),
i = this.doPrivate(e);
return null == i ? null : function (t, e) {
for (var i = t.toByteArray(), r = 0; r < i.length && 0 == i[r];) {
++r;
}
if (i.length - r != e - 1 || 2 != i[r]) return null;
for (++r; 0 != i[r];) {
if (++r >= i.length) return null;
}
for (var n = ""; ++r < i.length;) {
var s = 255 & i[r];
s < 128 ? n += String.fromCharCode(s) : s > 191 && s < 224 ? (n += String.fromCharCode((31 & s) << 6 | 63 & i[r + 1]), ++r) : (n += String.fromCharCode((15 & s) << 12 | (63 & i[r + 1]) << 6 | 63 & i[r + 2]), r += 2);
}
return n;
}(i, this.n.bitLength() + 7 >> 3);
}, t.prototype.generateAsync = function (t, e, i) {
var r = new J(),
n = t >> 1;
this.e = parseInt(e, 16);
var s = new B(e, 16),
o = this,
h = function h() {
var e = function e() {
if (o.p.compareTo(o.q) <= 0) {
var t = o.p;
o.p = o.q, o.q = t;
}
var e = o.p.subtract(B.ONE),
r = o.q.subtract(B.ONE),
n = e.multiply(r);
0 == n.gcd(s).compareTo(B.ONE) ? (o.n = o.p.multiply(o.q), o.d = s.modInverse(n), o.dmp1 = o.d.mod(e), o.dmq1 = o.d.mod(r), o.coeff = o.q.modInverse(o.p), setTimeout(function () {
i();
}, 0)) : setTimeout(h, 0);
},
a = function a() {
o.q = N(), o.q.fromNumberAsync(n, 1, r, function () {
o.q.subtract(B.ONE).gcda(s, function (t) {
0 == t.compareTo(B.ONE) && o.q.isProbablePrime(10) ? setTimeout(e, 0) : setTimeout(a, 0);
});
});
},
u = function u() {
o.p = N(), o.p.fromNumberAsync(t - n, 1, r, function () {
o.p.subtract(B.ONE).gcda(s, function (t) {
0 == t.compareTo(B.ONE) && o.p.isProbablePrime(10) ? setTimeout(a, 0) : setTimeout(u, 0);
});
});
};
setTimeout(u, 0);
};
setTimeout(h, 0);
}, t.prototype.sign = function (t, e, i) {
var r = function (t, e) {
if (e < t.length + 22) return console.error("Message too long for RSA"), null;
for (var i = e - t.length - 6, r = "", n = 0; n < i; n += 2) {
r += "ff";
}
return P("0001" + r + "00" + t, 16);
}((Q[i] || "") + e(t).toString(), this.n.bitLength() / 4);
if (null == r) return null;
var n = this.doPrivate(r);
if (null == n) return null;
var s = n.toString(16);
return 0 == (1 & s.length) ? s : "0" + s;
}, t.prototype.verify = function (t, e, i) {
var r = P(e, 16),
n = this.doPublic(r);
return null == n ? null : function (t) {
for (var e in Q) {
if (Q.hasOwnProperty(e)) {
var i = Q[e],
r = i.length;
if (t.substr(0, r) == i) return t.substr(r);
}
}
return t;
}(n.toString(16).replace(/^1f+00/, "")) == i(t).toString();
}, t;
}(),
Q = {
md2: "3020300c06082a864886f70d020205000410",
md5: "3020300c06082a864886f70d020505000410",
sha1: "3021300906052b0e03021a05000414",
sha224: "302d300d06096086480165030402040500041c",
sha256: "3031300d060960864801650304020105000420",
sha384: "3041300d060960864801650304020205000430",
sha512: "3051300d060960864801650304020305000440",
ripemd160: "3021300906052b2403020105000414"
},
W = {};
W.lang = {
extend: function extend(t, e, i) {
if (!e || !t) throw new Error("YAHOO.lang.extend failed, please check that all dependencies are included.");
var r = function r() {};
if (r.prototype = e.prototype, t.prototype = new r(), t.prototype.constructor = t, t.superclass = e.prototype, e.prototype.constructor == Object.prototype.constructor && (e.prototype.constructor = e), i) {
var n;
for (n in i) {
t.prototype[n] = i[n];
}
var s = function s() {},
o = ["toString", "valueOf"];
try {
/MSIE/.test(navigator.userAgent) && (s = function s(t, e) {
for (n = 0; n < o.length; n += 1) {
var i = o[n],
r = e[i];
"function" == typeof r && r != Object.prototype[i] && (t[i] = r);
}
});
} catch (t) {}
s(t.prototype, i);
}
}
};
var tt = {};
void 0 !== tt.asn1 && tt.asn1 || (tt.asn1 = {}), tt.asn1.ASN1Util = new function () {
this.integerToByteHex = function (t) {
var e = t.toString(16);
return e.length % 2 == 1 && (e = "0" + e), e;
}, this.bigIntToMinTwosComplementsHex = function (t) {
var e = t.toString(16);
if ("-" != e.substr(0, 1)) e.length % 2 == 1 ? e = "0" + e : e.match(/^[0-7]/) || (e = "00" + e);else {
var i = e.substr(1).length;
i % 2 == 1 ? i += 1 : e.match(/^[0-7]/) || (i += 2);
for (var r = "", n = 0; n < i; n++) {
r += "f";
}
e = new B(r, 16).xor(t).add(B.ONE).toString(16).replace(/^-/, "");
}
return e;
}, this.getPEMStringFromHex = function (t, e) {
return hextopem(t, e);
}, this.newObject = function (t) {
var e = tt.asn1,
i = e.DERBoolean,
r = e.DERInteger,
n = e.DERBitString,
s = e.DEROctetString,
o = e.DERNull,
h = e.DERObjectIdentifier,
a = e.DEREnumerated,
u = e.DERUTF8String,
c = e.DERNumericString,
f = e.DERPrintableString,
l = e.DERTeletexString,
p = e.DERIA5String,
g = e.DERUTCTime,
d = e.DERGeneralizedTime,
v = e.DERSequence,
m = e.DERSet,
y = e.DERTaggedObject,
b = e.ASN1Util.newObject,
T = Object.keys(t);
if (1 != T.length) throw "key of param shall be only one.";
var S = T[0];
if (-1 == ":bool:int:bitstr:octstr:null:oid:enum:utf8str:numstr:prnstr:telstr:ia5str:utctime:gentime:seq:set:tag:".indexOf(":" + S + ":")) throw "undefined key: " + S;
if ("bool" == S) return new i(t[S]);
if ("int" == S) return new r(t[S]);
if ("bitstr" == S) return new n(t[S]);
if ("octstr" == S) return new s(t[S]);
if ("null" == S) return new o(t[S]);
if ("oid" == S) return new h(t[S]);
if ("enum" == S) return new a(t[S]);
if ("utf8str" == S) return new u(t[S]);
if ("numstr" == S) return new c(t[S]);
if ("prnstr" == S) return new f(t[S]);
if ("telstr" == S) return new l(t[S]);
if ("ia5str" == S) return new p(t[S]);
if ("utctime" == S) return new g(t[S]);
if ("gentime" == S) return new d(t[S]);
if ("seq" == S) {
for (var E = t[S], w = [], D = 0; D < E.length; D++) {
var x = b(E[D]);
w.push(x);
}
return new v({
array: w
});
}
if ("set" == S) {
for (E = t[S], w = [], D = 0; D < E.length; D++) {
x = b(E[D]), w.push(x);
}
return new m({
array: w
});
}
if ("tag" == S) {
var R = t[S];
if ("[object Array]" === Object.prototype.toString.call(R) && 3 == R.length) {
var B = b(R[2]);
return new y({
tag: R[0],
explicit: R[1],
obj: B
});
}
var O = {};
if (void 0 !== R.explicit && (O.explicit = R.explicit), void 0 !== R.tag && (O.tag = R.tag), void 0 === R.obj) throw "obj shall be specified for 'tag'.";
return O.obj = b(R.obj), new y(O);
}
}, this.jsonToASN1HEX = function (t) {
return this.newObject(t).getEncodedHex();
};
}(), tt.asn1.ASN1Util.oidHexToInt = function (t) {
for (var e = "", i = parseInt(t.substr(0, 2), 16), r = (e = Math.floor(i / 40) + "." + i % 40, ""), n = 2; n < t.length; n += 2) {
var s = ("00000000" + parseInt(t.substr(n, 2), 16).toString(2)).slice(-8);
r += s.substr(1, 7), "0" == s.substr(0, 1) && (e = e + "." + new B(r, 2).toString(10), r = "");
}
return e;
}, tt.asn1.ASN1Util.oidIntToHex = function (t) {
var e = function e(t) {
var e = t.toString(16);
return 1 == e.length && (e = "0" + e), e;
},
i = function i(t) {
var i = "",
r = new B(t, 10).toString(2),
n = 7 - r.length % 7;
7 == n && (n = 0);
for (var s = "", o = 0; o < n; o++) {
s += "0";
}
for (r = s + r, o = 0; o < r.length - 1; o += 7) {
var h = r.substr(o, 7);
o != r.length - 7 && (h = "1" + h), i += e(parseInt(h, 2));
}
return i;
};
if (!t.match(/^[0-9.]+$/)) throw "malformed oid string: " + t;
var r = "",
n = t.split("."),
s = 40 * parseInt(n[0]) + parseInt(n[1]);
r += e(s), n.splice(0, 2);
for (var o = 0; o < n.length; o++) {
r += i(n[o]);
}
return r;
}, tt.asn1.ASN1Object = function () {
this.getLengthHexFromValue = function () {
if (void 0 === this.hV || null == this.hV) throw "this.hV is null or undefined.";
if (this.hV.length % 2 == 1) throw "value hex must be even length: n=" + "".length + ",v=" + this.hV;
var t = this.hV.length / 2,
e = t.toString(16);
if (e.length % 2 == 1 && (e = "0" + e), t < 128) return e;
var i = e.length / 2;
if (i > 15) throw "ASN.1 length too long to represent by 8x: n = " + t.toString(16);
return (128 + i).toString(16) + e;
}, this.getEncodedHex = function () {
return (null == this.hTLV || this.isModified) && (this.hV = this.getFreshValueHex(), this.hL = this.getLengthHexFromValue(), this.hTLV = this.hT + this.hL + this.hV, this.isModified = !1), this.hTLV;
}, this.getValueHex = function () {
return this.getEncodedHex(), this.hV;
}, this.getFreshValueHex = function () {
return "";
};
}, tt.asn1.DERAbstractString = function (t) {
tt.asn1.DERAbstractString.superclass.constructor.call(this), this.getString = function () {
return this.s;
}, this.setString = function (t) {
this.hTLV = null, this.isModified = !0, this.s = t, this.hV = stohex(this.s);
}, this.setStringHex = function (t) {
this.hTLV = null, this.isModified = !0, this.s = null, this.hV = t;
}, this.getFreshValueHex = function () {
return this.hV;
}, void 0 !== t && ("string" == typeof t ? this.setString(t) : void 0 !== t.str ? this.setString(t.str) : void 0 !== t.hex && this.setStringHex(t.hex));
}, W.lang.extend(tt.asn1.DERAbstractString, tt.asn1.ASN1Object), tt.asn1.DERAbstractTime = function (t) {
tt.asn1.DERAbstractTime.superclass.constructor.call(this), this.localDateToUTC = function (t) {
return utc = t.getTime() + 6e4 * t.getTimezoneOffset(), new Date(utc);
}, this.formatDate = function (t, e, i) {
var r = this.zeroPadding,
n = this.localDateToUTC(t),
s = String(n.getFullYear());
"utc" == e && (s = s.substr(2, 2));
var o = s + r(String(n.getMonth() + 1), 2) + r(String(n.getDate()), 2) + r(String(n.getHours()), 2) + r(String(n.getMinutes()), 2) + r(String(n.getSeconds()), 2);
if (!0 === i) {
var h = n.getMilliseconds();
if (0 != h) {
var a = r(String(h), 3);
o = o + "." + (a = a.replace(/[0]+$/, ""));
}
}
return o + "Z";
}, this.zeroPadding = function (t, e) {
return t.length >= e ? t : new Array(e - t.length + 1).join("0") + t;
}, this.getString = function () {
return this.s;
}, this.setString = function (t) {
this.hTLV = null, this.isModified = !0, this.s = t, this.hV = stohex(t);
}, this.setByDateValue = function (t, e, i, r, n, s) {
var o = new Date(Date.UTC(t, e - 1, i, r, n, s, 0));
this.setByDate(o);
}, this.getFreshValueHex = function () {
return this.hV;
};
}, W.lang.extend(tt.asn1.DERAbstractTime, tt.asn1.ASN1Object), tt.asn1.DERAbstractStructured = function (t) {
tt.asn1.DERAbstractString.superclass.constructor.call(this), this.setByASN1ObjectArray = function (t) {
this.hTLV = null, this.isModified = !0, this.asn1Array = t;
}, this.appendASN1Object = function (t) {
this.hTLV = null, this.isModified = !0, this.asn1Array.push(t);
}, this.asn1Array = new Array(), void 0 !== t && void 0 !== t.array && (this.asn1Array = t.array);
}, W.lang.extend(tt.asn1.DERAbstractStructured, tt.asn1.ASN1Object), tt.asn1.DERBoolean = function () {
tt.asn1.DERBoolean.superclass.constructor.call(this), this.hT = "01", this.hTLV = "0101ff";
}, W.lang.extend(tt.asn1.DERBoolean, tt.asn1.ASN1Object), tt.asn1.DERInteger = function (t) {
tt.asn1.DERInteger.superclass.constructor.call(this), this.hT = "02", this.setByBigInteger = function (t) {
this.hTLV = null, this.isModified = !0, this.hV = tt.asn1.ASN1Util.bigIntToMinTwosComplementsHex(t);
}, this.setByInteger = function (t) {
var e = new B(String(t), 10);
this.setByBigInteger(e);
}, this.setValueHex = function (t) {
this.hV = t;
}, this.getFreshValueHex = function () {
return this.hV;
}, void 0 !== t && (void 0 !== t.bigint ? this.setByBigInteger(t.bigint) : void 0 !== t["int"] ? this.setByInteger(t["int"]) : "number" == typeof t ? this.setByInteger(t) : void 0 !== t.hex && this.setValueHex(t.hex));
}, W.lang.extend(tt.asn1.DERInteger, tt.asn1.ASN1Object), tt.asn1.DERBitString = function (t) {
if (void 0 !== t && void 0 !== t.obj) {
var e = tt.asn1.ASN1Util.newObject(t.obj);
t.hex = "00" + e.getEncodedHex();
}
tt.asn1.DERBitString.superclass.constructor.call(this), this.hT = "03", this.setHexValueIncludingUnusedBits = function (t) {
this.hTLV = null, this.isModified = !0, this.hV = t;
}, this.setUnusedBitsAndHexValue = function (t, e) {
if (t < 0 || 7 < t) throw "unused bits shall be from 0 to 7: u = " + t;
var i = "0" + t;
this.hTLV = null, this.isModified = !0, this.hV = i + e;
}, this.setByBinaryString = function (t) {
var e = 8 - (t = t.replace(/0+$/, "")).length % 8;
8 == e && (e = 0);
for (var i = 0; i <= e; i++) {
t += "0";
}
var r = "";
for (i = 0; i < t.length - 1; i += 8) {
var n = t.substr(i, 8),
s = parseInt(n, 2).toString(16);
1 == s.length && (s = "0" + s), r += s;
}
this.hTLV = null, this.isModified = !0, this.hV = "0" + e + r;
}, this.setByBooleanArray = function (t) {
for (var e = "", i = 0; i < t.length; i++) {
1 == t[i] ? e += "1" : e += "0";
}
this.setByBinaryString(e);
}, this.newFalseArray = function (t) {
for (var e = new Array(t), i = 0; i < t; i++) {
e[i] = !1;
}
return e;
}, this.getFreshValueHex = function () {
return this.hV;
}, void 0 !== t && ("string" == typeof t && t.toLowerCase().match(/^[0-9a-f]+$/) ? this.setHexValueIncludingUnusedBits(t) : void 0 !== t.hex ? this.setHexValueIncludingUnusedBits(t.hex) : void 0 !== t.bin ? this.setByBinaryString(t.bin) : void 0 !== t.array && this.setByBooleanArray(t.array));
}, W.lang.extend(tt.asn1.DERBitString, tt.asn1.ASN1Object), tt.asn1.DEROctetString = function (t) {
if (void 0 !== t && void 0 !== t.obj) {
var e = tt.asn1.ASN1Util.newObject(t.obj);
t.hex = e.getEncodedHex();
}
tt.asn1.DEROctetString.superclass.constructor.call(this, t), this.hT = "04";
}, W.lang.extend(tt.asn1.DEROctetString, tt.asn1.DERAbstractString), tt.asn1.DERNull = function () {
tt.asn1.DERNull.superclass.constructor.call(this), this.hT = "05", this.hTLV = "0500";
}, W.lang.extend(tt.asn1.DERNull, tt.asn1.ASN1Object), tt.asn1.DERObjectIdentifier = function (t) {
var e = function e(t) {
var e = t.toString(16);
return 1 == e.length && (e = "0" + e), e;
},
i = function i(t) {
var i = "",
r = new B(t, 10).toString(2),
n = 7 - r.length % 7;
7 == n && (n = 0);
for (var s = "", o = 0; o < n; o++) {
s += "0";
}
for (r = s + r, o = 0; o < r.length - 1; o += 7) {
var h = r.substr(o, 7);
o != r.length - 7 && (h = "1" + h), i += e(parseInt(h, 2));
}
return i;
};
tt.asn1.DERObjectIdentifier.superclass.constructor.call(this), this.hT = "06", this.setValueHex = function (t) {
this.hTLV = null, this.isModified = !0, this.s = null, this.hV = t;
}, this.setValueOidString = function (t) {
if (!t.match(/^[0-9.]+$/)) throw "malformed oid string: " + t;
var r = "",
n = t.split("."),
s = 40 * parseInt(n[0]) + parseInt(n[1]);
r += e(s), n.splice(0, 2);
for (var o = 0; o < n.length; o++) {
r += i(n[o]);
}
this.hTLV = null, this.isModified = !0, this.s = null, this.hV = r;
}, this.setValueName = function (t) {
var e = tt.asn1.x509.OID.name2oid(t);
if ("" === e) throw "DERObjectIdentifier oidName undefined: " + t;
this.setValueOidString(e);
}, this.getFreshValueHex = function () {
return this.hV;
}, void 0 !== t && ("string" == typeof t ? t.match(/^[0-2].[0-9.]+$/) ? this.setValueOidString(t) : this.setValueName(t) : void 0 !== t.oid ? this.setValueOidString(t.oid) : void 0 !== t.hex ? this.setValueHex(t.hex) : void 0 !== t.name && this.setValueName(t.name));
}, W.lang.extend(tt.asn1.DERObjectIdentifier, tt.asn1.ASN1Object), tt.asn1.DEREnumerated = function (t) {
tt.asn1.DEREnumerated.superclass.constructor.call(this), this.hT = "0a", this.setByBigInteger = function (t) {
this.hTLV = null, this.isModified = !0, this.hV = tt.asn1.ASN1Util.bigIntToMinTwosComplementsHex(t);
}, this.setByInteger = function (t) {
var e = new B(String(t), 10);
this.setByBigInteger(e);
}, this.setValueHex = function (t) {
this.hV = t;
}, this.getFreshValueHex = function () {
return this.hV;
}, void 0 !== t && (void 0 !== t["int"] ? this.setByInteger(t["int"]) : "number" == typeof t ? this.setByInteger(t) : void 0 !== t.hex && this.setValueHex(t.hex));
}, W.lang.extend(tt.asn1.DEREnumerated, tt.asn1.ASN1Object), tt.asn1.DERUTF8String = function (t) {
tt.asn1.DERUTF8String.superclass.constructor.call(this, t), this.hT = "0c";
}, W.lang.extend(tt.asn1.DERUTF8String, tt.asn1.DERAbstractString), tt.asn1.DERNumericString = function (t) {
tt.asn1.DERNumericString.superclass.constructor.call(this, t), this.hT = "12";
}, W.lang.extend(tt.asn1.DERNumericString, tt.asn1.DERAbstractString), tt.asn1.DERPrintableString = function (t) {
tt.asn1.DERPrintableString.superclass.constructor.call(this, t), this.hT = "13";
}, W.lang.extend(tt.asn1.DERPrintableString, tt.asn1.DERAbstractString), tt.asn1.DERTeletexString = function (t) {
tt.asn1.DERTeletexString.superclass.constructor.call(this, t), this.hT = "14";
}, W.lang.extend(tt.asn1.DERTeletexString, tt.asn1.DERAbstractString), tt.asn1.DERIA5String = function (t) {
tt.asn1.DERIA5String.superclass.constructor.call(this, t), this.hT = "16";
}, W.lang.extend(tt.asn1.DERIA5String, tt.asn1.DERAbstractString), tt.asn1.DERUTCTime = function (t) {
tt.asn1.DERUTCTime.superclass.constructor.call(this, t), this.hT = "17", this.setByDate = function (t) {
this.hTLV = null, this.isModified = !0, this.date = t, this.s = this.formatDate(this.date, "utc"), this.hV = stohex(this.s);
}, this.getFreshValueHex = function () {
return void 0 === this.date && void 0 === this.s && (this.date = new Date(), this.s = this.formatDate(this.date, "utc"), this.hV = stohex(this.s)), this.hV;
}, void 0 !== t && (void 0 !== t.str ? this.setString(t.str) : "string" == typeof t && t.match(/^[0-9]{12}Z$/) ? this.setString(t) : void 0 !== t.hex ? this.setStringHex(t.hex) : void 0 !== t.date && this.setByDate(t.date));
}, W.lang.extend(tt.asn1.DERUTCTime, tt.asn1.DERAbstractTime), tt.asn1.DERGeneralizedTime = function (t) {
tt.asn1.DERGeneralizedTime.superclass.constructor.call(this, t), this.hT = "18", this.withMillis = !1, this.setByDate = function (t) {
this.hTLV = null, this.isModified = !0, this.date = t, this.s = this.formatDate(this.date, "gen", this.withMillis), this.hV = stohex(this.s);
}, this.getFreshValueHex = function () {
return void 0 === this.date && void 0 === this.s && (this.date = new Date(), this.s = this.formatDate(this.date, "gen", this.withMillis), this.hV = stohex(this.s)), this.hV;
}, void 0 !== t && (void 0 !== t.str ? this.setString(t.str) : "string" == typeof t && t.match(/^[0-9]{14}Z$/) ? this.setString(t) : void 0 !== t.hex ? this.setStringHex(t.hex) : void 0 !== t.date && this.setByDate(t.date), !0 === t.millis && (this.withMillis = !0));
}, W.lang.extend(tt.asn1.DERGeneralizedTime, tt.asn1.DERAbstractTime), tt.asn1.DERSequence = function (t) {
tt.asn1.DERSequence.superclass.constructor.call(this, t), this.hT = "30", this.getFreshValueHex = function () {
for (var t = "", e = 0; e < this.asn1Array.length; e++) {
t += this.asn1Array[e].getEncodedHex();
}
return this.hV = t, this.hV;
};
}, W.lang.extend(tt.asn1.DERSequence, tt.asn1.DERAbstractStructured), tt.asn1.DERSet = function (t) {
tt.asn1.DERSet.superclass.constructor.call(this, t), this.hT = "31", this.sortFlag = !0, this.getFreshValueHex = function () {
for (var t = new Array(), e = 0; e < this.asn1Array.length; e++) {
var i = this.asn1Array[e];
t.push(i.getEncodedHex());
}
return 1 == this.sortFlag && t.sort(), this.hV = t.join(""), this.hV;
}, void 0 !== t && void 0 !== t.sortflag && 0 == t.sortflag && (this.sortFlag = !1);
}, W.lang.extend(tt.asn1.DERSet, tt.asn1.DERAbstractStructured), tt.asn1.DERTaggedObject = function (t) {
tt.asn1.DERTaggedObject.superclass.constructor.call(this), this.hT = "a0", this.hV = "", this.isExplicit = !0, this.asn1Object = null, this.setASN1Object = function (t, e, i) {
this.hT = e, this.isExplicit = t, this.asn1Object = i, this.isExplicit ? (this.hV = this.asn1Object.getEncodedHex(), this.hTLV = null, this.isModified = !0) : (this.hV = null, this.hTLV = i.getEncodedHex(), this.hTLV = this.hTLV.replace(/^../, e), this.isModified = !1);
}, this.getFreshValueHex = function () {
return this.hV;
}, void 0 !== t && (void 0 !== t.tag && (this.hT = t.tag), void 0 !== t.explicit && (this.isExplicit = t.explicit), void 0 !== t.obj && (this.asn1Object = t.obj, this.setASN1Object(this.isExplicit, this.hT, this.asn1Object)));
}, W.lang.extend(tt.asn1.DERTaggedObject, tt.asn1.ASN1Object);
var _et,
it = (_et = function et(t, e) {
return (_et = Object.setPrototypeOf || {
__proto__: []
} instanceof Array && function (t, e) {
t.__proto__ = e;
} || function (t, e) {
for (var i in e) {
Object.prototype.hasOwnProperty.call(e, i) && (t[i] = e[i]);
}
})(t, e);
}, function (t, e) {
if ("function" != typeof e && null !== e) throw new TypeError("Class extends value " + String(e) + " is not a constructor or null");
function i() {
this.constructor = t;
}
_et(t, e), t.prototype = null === e ? Object.create(e) : (i.prototype = e.prototype, new i());
}),
rt = function (t) {
function e(i) {
var r = t.call(this) || this;
return i && ("string" == typeof i ? r.parseKey(i) : (e.hasPrivateKeyProperty(i) || e.hasPublicKeyProperty(i)) && r.parsePropertiesFrom(i)), r;
}
return it(e, t), e.prototype.parseKey = function (t) {
try {
var e = 0,
i = 0,
r = /^\s*(?:[0-9A-Fa-f][0-9A-Fa-f]\s*)+$/.test(t) ? function (t) {
var e;
if (void 0 === c) {
var i = "0123456789ABCDEF",
r = " \f\n\r\t \u2028\u2029";
for (c = {}, e = 0; e < 16; ++e) {
c[i.charAt(e)] = e;
}
for (i = i.toLowerCase(), e = 10; e < 16; ++e) {
c[i.charAt(e)] = e;
}
for (e = 0; e < r.length; ++e) {
c[r.charAt(e)] = -1;
}
}
var n = [],
s = 0,
o = 0;
for (e = 0; e < t.length; ++e) {
var h = t.charAt(e);
if ("=" == h) break;
if (-1 != (h = c[h])) {
if (void 0 === h) throw new Error("Illegal character at offset " + e);
s |= h, ++o >= 2 ? (n[n.length] = s, s = 0, o = 0) : s <<= 4;
}
}
if (o) throw new Error("Hex encoding incomplete: 4 bits missing");
return n;
}(t) : d.unarmor(t),
n = w.decode(r);
if (3 === n.sub.length && (n = n.sub[2].sub[0]), 9 === n.sub.length) {
e = n.sub[1].getHexStringValue(), this.n = P(e, 16), i = n.sub[2].getHexStringValue(), this.e = parseInt(i, 16);
var s = n.sub[3].getHexStringValue();
this.d = P(s, 16);
var o = n.sub[4].getHexStringValue();
this.p = P(o, 16);
var h = n.sub[5].getHexStringValue();
this.q = P(h, 16);
var a = n.sub[6].getHexStringValue();
this.dmp1 = P(a, 16);
var u = n.sub[7].getHexStringValue();
this.dmq1 = P(u, 16);
var f = n.sub[8].getHexStringValue();
this.coeff = P(f, 16);
} else {
if (2 !== n.sub.length) return !1;
var l = n.sub[1].sub[0];
e = l.sub[0].getHexStringValue(), this.n = P(e, 16), i = l.sub[1].getHexStringValue(), this.e = parseInt(i, 16);
}
return !0;
} catch (t) {
return !1;
}
}, e.prototype.getPrivateBaseKey = function () {
var t = {
array: [new tt.asn1.DERInteger({
"int": 0
}), new tt.asn1.DERInteger({
bigint: this.n
}), new tt.asn1.DERInteger({
"int": this.e
}), new tt.asn1.DERInteger({
bigint: this.d
}), new tt.asn1.DERInteger({
bigint: this.p
}), new tt.asn1.DERInteger({
bigint: this.q
}), new tt.asn1.DERInteger({
bigint: this.dmp1
}), new tt.asn1.DERInteger({
bigint: this.dmq1
}), new tt.asn1.DERInteger({
bigint: this.coeff
})]
};
return new tt.asn1.DERSequence(t).getEncodedHex();
}, e.prototype.getPrivateBaseKeyB64 = function () {
return l(this.getPrivateBaseKey());
}, e.prototype.getPublicBaseKey = function () {
var t = new tt.asn1.DERSequence({
array: [new tt.asn1.DERObjectIdentifier({
oid: "1.2.840.113549.1.1.1"
}), new tt.asn1.DERNull()]
}),
e = new tt.asn1.DERSequence({
array: [new tt.asn1.DERInteger({
bigint: this.n
}), new tt.asn1.DERInteger({
"int": this.e
})]
}),
i = new tt.asn1.DERBitString({
hex: "00" + e.getEncodedHex()
});
return new tt.asn1.DERSequence({
array: [t, i]
}).getEncodedHex();
}, e.prototype.getPublicBaseKeyB64 = function () {
return l(this.getPublicBaseKey());
}, e.wordwrap = function (t, e) {
if (!t) return t;
var i = "(.{1," + (e = e || 64) + "})( +|$\n?)|(.{1," + e + "})";
return t.match(RegExp(i, "g")).join("\n");
}, e.prototype.getPrivateKey = function () {
var t = "-----BEGIN RSA PRIVATE KEY-----\n";
return (t += e.wordwrap(this.getPrivateBaseKeyB64()) + "\n") + "-----END RSA PRIVATE KEY-----";
}, e.prototype.getPublicKey = function () {
var t = "-----BEGIN PUBLIC KEY-----\n";
return (t += e.wordwrap(this.getPublicBaseKeyB64()) + "\n") + "-----END PUBLIC KEY-----";
}, e.hasPublicKeyProperty = function (t) {
return (t = t || {}).hasOwnProperty("n") && t.hasOwnProperty("e");
}, e.hasPrivateKeyProperty = function (t) {
return (t = t || {}).hasOwnProperty("n") && t.hasOwnProperty("e") && t.hasOwnProperty("d") && t.hasOwnProperty("p") && t.hasOwnProperty("q") && t.hasOwnProperty("dmp1") && t.hasOwnProperty("dmq1") && t.hasOwnProperty("coeff");
}, e.prototype.parsePropertiesFrom = function (t) {
this.n = t.n, this.e = t.e, t.hasOwnProperty("d") && (this.d = t.d, this.p = t.p, this.q = t.q, this.dmp1 = t.dmp1, this.dmq1 = t.dmq1, this.coeff = t.coeff);
}, e;
}(X);
var nt = function () {
function t(t) {
t = t || {}, this.default_key_size = t.default_key_size ? parseInt(t.default_key_size, 10) : 1024, this.default_public_exponent = t.default_public_exponent || "010001", this.log = t.log || !1, this.key = null;
}
return t.prototype.setKey = function (t) {
this.log && this.key && console.warn("A key was already set, overriding existing."), this.key = new rt(t);
}, t.prototype.setPrivateKey = function (t) {
this.setKey(t);
}, t.prototype.setPublicKey = function (t) {
this.setKey(t);
}, t.prototype.decrypt = function (t) {
try {
return this.getKey().decrypt(p(t));
} catch (t) {
return !1;
}
}, t.prototype.encrypt = function (t) {
try {
return l(this.getKey().encrypt(t));
} catch (t) {
return !1;
}
}, t.prototype.sign = function (t, e, i) {
try {
return l(this.getKey().sign(t, e, i));
} catch (t) {
return !1;
}
}, t.prototype.verify = function (t, e, i) {
try {
return this.getKey().verify(t, p(e), i);
} catch (t) {
return !1;
}
}, t.prototype.getKey = function (t) {
if (!this.key) {
if (this.key = new rt(), t && "[object Function]" === {}.toString.call(t)) return void this.key.generateAsync(this.default_key_size, this.default_public_exponent, t);
this.key.generate(this.default_key_size, this.default_public_exponent);
}
return this.key;
}, t.prototype.getPrivateKey = function () {
return this.getKey().getPrivateKey();
}, t.prototype.getPrivateKeyB64 = function () {
return this.getKey().getPrivateBaseKeyB64();
}, t.prototype.getPublicKey = function () {
return this.getKey().getPublicKey();
}, t.prototype.getPublicKeyB64 = function () {
return this.getKey().getPublicBaseKeyB64();
}, t.version = "3.1.0", t;
}();
}],
e = {
d: function d(t, i) {
for (var r in i) {
e.o(i, r) && !e.o(t, r) && Object.defineProperty(t, r, {
enumerable: !0,
get: i[r]
});
}
},
o: function o(t, e) {
return Object.prototype.hasOwnProperty.call(t, e);
}
},
i = {};
return t[1](0, i, e), i["default"];
}();
});
var test = function(password) {
var e = ["password"]
var n = new JSEncrypt;
n.setPublicKey("-----BEGIN PUBLIC KEY-----\n MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDl/aCgRl9f/4ON9MewoVnV58OL\n OU2ALBi2FKc5yIsfSpivKxe7A6FitJjHva3WpM7gvVOinMehp6if2UNIkbaN+plW\n f5IwqEVxsNZpeixc4GsbY9dXEk3WtRjwGSyDLySzEESH/kpJVoxO7ijRYqU+2oSR\n wTBNePOk1H+LRQokgQIDAQAB\n -----END PUBLIC KEY-----");
for (var s = 0; s < e.length; s++) {
var t = n.encrypt(password);
return t
}
}
|
}, t.prototype.andNot = function (t) {
var e = N();
return this.bitwiseTo(t, h, e), e;
}, t.prototype.not = function () {
|
FormManagerContext.tsx
|
import React from 'react';
import BaseManager from '../managers/BaseManager';
const FormManagerContext = React.createContext<null | ((name: string) => BaseManager<any>)>(null);
|
export default FormManagerContext;
|
|
Blind Auction.py
|
from replit import clear
from art import logo
print(logo)
bids = {}
bidding_finished = False
def
|
(bidding_record):
highest_bid = 0
winner = ""
for bidder in bidding_record:
bid_amount = bidding_record[bidder]
if bid_amount > highest_bid:
highest_bid = bid_amount
winner = bidder
print(f"The winner is {winner} with a bid of ${highest_bid}")
while not bidding_finished:
name = input("What is your name?: ")
price = int(input("What is your bid?: $"))
bids[name] = price
should_continue = input("Are there any other bidders? Type 'yes or 'no'.\n")
if should_continue == "no":
bidding_finished = True
find_highest_bidder(bids)
elif should_continue == "yes":
clear()
|
find_highest_bidder
|
block.go
|
package bzip2
import (
"errors"
"math"
"github.com/larzconwell/bzip2/internal/bits"
"github.com/larzconwell/bzip2/internal/bwt"
"github.com/larzconwell/bzip2/internal/crc32"
"github.com/larzconwell/bzip2/internal/huffman"
"github.com/larzconwell/bzip2/internal/mtf"
"github.com/larzconwell/bzip2/internal/rle"
"github.com/larzconwell/bzip2/internal/rle2"
"github.com/larzconwell/bzip2/internal/symbols"
)
const (
// blockMagic signifies the beginning of a new block.
blockMagic = 0x314159265359
)
var (
// errBlockSizeReached occurs when the end of
// a block has been reached.
errBlockSizeReached = errors.New("bzip2: Block size reached")
)
// block handles the compression of data up to a set size.
type block struct {
runs *rle.RunList
size int
crc uint32
}
// newBlock creates a compression block for data up to the given size.
func
|
(size int) *block {
return &block{runs: rle.NewRunList(), size: size}
}
// Len returns the number of bytes written to the block.
func (b block) Len() int {
return b.runs.EncodedLen()
}
// Write writes p to the block. If writing p exceeds the blocks size
// only the bytes that can fit will be written and errBlockSizeReached
// is returned.
func (b *block) Write(p []byte) (int, error) {
encodedlen := b.runs.Update(p)
if encodedlen > b.size {
trimmed := b.runs.Trim(encodedlen - b.size)
encodedlen = b.size
p = p[:len(p)-trimmed]
}
var err error
if encodedlen == b.size {
err = errBlockSizeReached
}
b.crc = crc32.Update(b.crc, p)
return len(p), err
}
// WriteBlock compresses the content buffered and writes
// a block to the bit writer given.
func (b *block) WriteBlock(bw *bits.Writer) error {
rleData := b.runs.Encode()
syms, reducedSyms := symbols.Get(rleData)
// BWT step.
bwtData := make([]byte, len(rleData))
bwtidx := bwt.Transform(bwtData, rleData)
// MTF step.
mtfData := bwtData
mtf.Transform(reducedSyms, mtfData, bwtData)
// RLE2 step.
rle2Data := rle2.Encode(reducedSyms, mtfData)
freqs := rle2.GetFrequencies(reducedSyms, rle2Data)
// Setup the huffman trees required to encode rle2Data.
trees, selections := huffman.GenerateTrees(freqs, rle2Data)
// Get the MTF encoded huffman tree selections.
treeSelectionSymbols := make(symbols.ReducedSet, len(trees))
for i := range trees {
treeSelectionSymbols[i] = byte(i)
}
treeSelectionBytes := make([]byte, len(selections))
for i, selection := range selections {
treeSelectionBytes[i] = byte(selection)
}
mtf.Transform(treeSelectionSymbols, treeSelectionBytes, treeSelectionBytes)
// Write the block header.
bw.WriteBits(48, blockMagic)
bw.WriteBits(32, uint64(b.crc))
bw.WriteBits(1, 0)
// Write the contents that build the decoding steps.
bw.WriteBits(24, uint64(bwtidx))
b.writeSymbolBitmaps(bw, syms)
bw.WriteBits(3, uint64(len(trees)))
bw.WriteBits(15, uint64(len(selections)))
b.writeTreeSelections(bw, treeSelectionBytes)
b.writeTreeCodes(bw, trees)
// Write the encoded contents, using the huffman trees generated
// switching them out every 50 symbols.
encoded := 0
idx := 0
tree := trees[selections[idx]]
for _, b := range rle2Data {
if encoded == huffman.TreeSelectionLimit {
encoded = 0
idx++
tree = trees[selections[idx]]
}
code := tree.Codes[b]
bw.WriteBits(uint(code.Len), code.Bits)
encoded++
}
return bw.Err()
}
// writeSymbolBitmaps writes the bitmaps for the used symbols.
func (b *block) writeSymbolBitmaps(bw *bits.Writer, syms symbols.Set) {
rangesUsed := 0
ranges := make([]int, 16)
for i, r := range ranges {
// Toggle the bits for the 16 symbols in the range.
for j := 0; j < 16; j++ {
r = (r << 1) | syms[16*i+j]
}
ranges[i] = r
// Toggle the bit for the range in the bitmap.
present := 0
if r > 0 {
present = 1
}
rangesUsed = (rangesUsed << 1) | present
}
bw.WriteBits(16, uint64(rangesUsed))
for _, r := range ranges {
if r > 0 {
bw.WriteBits(16, uint64(r))
}
}
}
// writeTreeSelections writes the huffman tree selections in unary encoding.
func (b *block) writeTreeSelections(bw *bits.Writer, selections []byte) {
for _, selection := range selections {
for i := byte(0); i < selection; i++ {
bw.WriteBits(1, 1)
}
bw.WriteBits(1, 0)
}
}
// writeTreeCodes writes the delta encoded code-lengths for
// the huffman trees codes.
func (b *block) writeTreeCodes(bw *bits.Writer, trees []*huffman.Tree) {
for _, tree := range trees {
// Get the smallest code-length in the huffman tree.
codelen := 0
for i, code := range tree.Codes {
if i == 0 || code.Len < codelen {
codelen = code.Len
}
}
bw.WriteBits(5, uint64(codelen))
// Write the code-lengths as modifications to the current length.
for _, code := range tree.Codes {
delta := int(math.Abs(float64(codelen - code.Len)))
// 2 is increment, 3 is decrement.
op := uint64(2)
if codelen > code.Len {
op = 3
}
codelen = code.Len
for i := 0; i < delta; i++ {
bw.WriteBits(2, op)
}
bw.WriteBits(1, 0)
}
}
}
|
newBlock
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.