file_name
stringlengths 3
137
| prefix
stringlengths 0
918k
| suffix
stringlengths 0
962k
| middle
stringlengths 0
812k
|
---|---|---|---|
setup.py | from setuptools import setup, find_packages
from pyjamaparty.strutils.string_builder import StringBuilder
description = 'Set of casual python utilities'
long_description = StringBuilder('{}, written standing on shoulders of giants.'.format(description))
long_description += ' Tools include a string builder, singleton decorator'
requirements = []
setup(
name='pyjamaparty',
version='0.2',
description=description,
license="MIT",
long_description=str(long_description),
author='Karthik Rajasekaran',
author_email='[email protected]',
url="http://github.com/krajasek/pyjamaparty",
install_requires=requirements,
packages=find_packages(exclude=('pyjamaparty.tests',)), | python_requires='>=2.7'
) | |
message-command.ts | /* tslint:disable */
/* eslint-disable */
/**
* Jellyfin API
* No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator)
*
* The version of the OpenAPI document: 10.8.0
*
*
* NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
* https://openapi-generator.tech
* Do not edit the class manually.
*/
/**
*
* @export
* @interface MessageCommand
*/
export interface MessageCommand {
/**
* | * @memberof MessageCommand
*/
'Header'?: string | null;
/**
*
* @type {string}
* @memberof MessageCommand
*/
'Text': string;
/**
*
* @type {number}
* @memberof MessageCommand
*/
'TimeoutMs'?: number | null;
} | * @type {string} |
collector.rs | //! Cleans up cached pages according to when they were last accessed
//! and how much space the pages are currently using on the underlying
//! filesystem.
use std::time;
use actix::prelude::*;
use futures::prelude::*;
use futures::Future as _Future;
use log::*;
use tokio::timer::Interval;
use crate::ps::agent::cache::{self, Error, Page, Result};
use crate::ps::agent::config::CacheConfig as Config;
use crate::ps::agent::database::{Database, PageRecord};
use crate::ps::agent::messages::Response;
use crate::ps::agent::types::{ServiceFuture, ServiceId, WithProps, Worker};
use crate::ps::agent::{self, config, messages, server, Future};
use crate::ps::util::actor as a;
use crate::ps::util::futures::*;
/// A collector that cleans up cache pages on the underlying filesystem.
#[derive(Clone, Default, Debug)]
pub struct CachePageCollector;
#[derive(Clone, Debug)]
pub struct Props {
pub db: Database,
pub config: Config,
}
impl Actor for CachePageCollector {
type Context = Context<Self>;
fn started(&mut self, _ctx: &mut Self::Context) {
info!("started {:?} actor", self.id());
}
}
impl WithProps for CachePageCollector {
type Props = Props;
}
impl Supervised for CachePageCollector {}
impl SystemService for CachePageCollector {
fn service_started(&mut self, _ctx: &mut Self::Context) {
info!("started {:?} system service", self.id());
}
}
impl CachePageCollector {
/// Removes the page from the underlying filesystem.
pub fn remove_page(&self, record: &PageRecord) -> Result<()> {
let id = self.id();
self.borrow_props(|props: Option<&Props>| {
debug!(
"Removing page {} - last used on {}",
record.id,
record.str_time()
);
let props: &Props = props.unwrap_or_else(|| panic!("{:?}: missing props", id));
let db = &props.db;
let config = &props.config;
// Removes the page in the database first, then the file system. This ordering
// is important, because if it was reversed, the file system page can be removed,
// then the db delete can fail. This would produce bad data responses. Doing it
// in this order, the worst case scenario is that the underlying page will still
// take up space on the file system, but wouldn't be accounted for in the collector.
// This case is fixed once that page is cached again.
db.delete_page(&record)?;
let (package, channel, _, index) = cache::from_page_key(&record.id);
let page = Page::new(config, &package, &channel, 0, 0, index);
page.delete()
})
}
/// Removes cache pages according to the soft aged records
/// implementation.
pub fn | (&self) -> Result<i64> {
let id = self.id();
self.borrow_props(|props: Option<&Props>| {
let props: &Props = props.unwrap_or_else(|| panic!("{:?}: missing props", id));
let db = &props.db;
let config = &props.config;
let mut current_size = db.get_total_size()?;
info!(
"Running soft recycle - current_size: {} soft_cache_size: {}",
current_size,
config.soft_cache_size()
);
let recycled =
cache::soft_cleanup(self, db, config.soft_cache_size(), &mut current_size)?;
if recycled > 0 {
info!("Soft recycling recaptured {} page(s)", recycled);
}
Ok(current_size)
})
}
/// Removes cache pages according to the hard aged records
/// implementation.
pub fn hard_recycle(&self) -> Result<i64> {
let id = self.id();
self.borrow_props(|props: Option<&Props>| {
let props: &Props = props.unwrap_or_else(|| panic!("{:?}: missing props", id));
let db = &props.db;
let config = &props.config;
let mut current_size = db.get_total_size()?;
info!(
"Running hard recycle - current_size: {} hard_cache_size: {}",
current_size,
config.soft_cache_size()
);
let recycled =
cache::hard_cleanup(self, db, config.hard_cache_size(), &mut current_size)?;
if current_size as u64 > config.hard_cache_size() {
let msg = format!(
"current_size: {} hard_cache_size: {}",
current_size,
config.hard_cache_size()
);
Err(Error::no_space(msg))
} else {
if recycled > 0 {
info!("Hard recycling recaptured {} page(s)", recycled);
}
Ok(current_size)
}
})
}
}
// It is also possible to return a Future here as well (see `ServiceFuture`):
impl Handler<messages::WorkerStartup> for CachePageCollector {
type Result = ();
fn handle(&mut self, _msg: messages::WorkerStartup, _ctx: &mut Self::Context) -> Self::Result {
let id = self.id();
Arbiter::spawn(ServiceFuture::wrap(self.run()).map_err(move |e| {
e.render_with_context(id);
a::send_unconditionally::<server::StatusServer, _>(Response::error(e));
}))
}
}
impl Worker for CachePageCollector {
fn id(&self) -> ServiceId {
ServiceId("CacheCollector")
}
}
impl CachePageCollector {
fn run(&self) -> Future<()> {
// This is needed due to the 'static constraint placed on the returned Future.
// Cloning `Collector` is cheap because copies are just refcounted.
let this = self.clone();
// run one collector step every N minutes
let interval =
time::Duration::from_secs(config::constants::CACHE_COLLECTOR_RUN_INTERVAL_SECS);
let first_run = time::Instant::now() + time::Duration::from_secs(30);
let timer = Interval::new(first_run, interval);
info!(
"Configuring CacheCollector on a {} minute timer",
config::constants::CACHE_COLLECTOR_RUN_INTERVAL_SECS / 60
);
// runs five soft recycles, followed by one hard recycle. This pattern
// is followed indefinitely.
let f = timer
.map_err(Into::<agent::Error>::into)
.fold(0, move |step, _| -> agent::Future<i32> {
if step < 5 {
this.soft_recycle().map(|_| step + 1).or_else(|e| {
warn!("Soft recycle failure {:?}", e);
Ok(step + 1)
})
} else {
this.hard_recycle().map(|_| 0).or_else(|e| {
error!("Hard recycle failure {:?}", e);
Ok(0)
})
}
.into_future()
.into_trait()
})
.map(|_| ())
.into_trait();
to_future_trait(f)
}
}
#[cfg(test)]
#[macro_use]
mod test {
use std::path;
use ::time::{now_utc, Duration};
use lazy_static::lazy_static;
use tempfile::tempdir;
use pennsieve_macros::path;
use super::*;
use crate::ps::agent::cache::PageCreator;
use crate::ps::util;
lazy_static! {
static ref TEMP_DIR: path::PathBuf = tempdir().unwrap().into_path();
}
#[test]
fn soft_recycle_with_deletes() {
let config = Config::new(
&*TEMP_DIR, // base_path
150, // page_size
100, // soft_cache_size
0, // hard_cache_size
);
assert!(cache::create_page_template(&config).is_ok());
let page_creator = PageCreator::new();
let db = util::database::temp().unwrap();
let page = Page {
path: path!(&*TEMP_DIR, "p1", "c_collector_1", "150", "2"; extension => "bin"), // "${TEMPDIR}/p1/c_collector_1/150/2.bin"
start: 0,
end: 0,
size: 5,
id: 2,
};
page_creator
.copy_page_template(&page.path, &config)
.unwrap();
let record1 = PageRecord {
id: String::from("p1.c_collector_1.150.2"),
nan_filled: false,
complete: true,
size: 150,
last_used: now_utc().to_timespec() - Duration::weeks(20),
};
db.upsert_page(&record1).unwrap();
let record2 = PageRecord {
id: String::from("record"),
nan_filled: false,
complete: true,
size: 50,
last_used: now_utc().to_timespec() - Duration::weeks(10),
};
db.upsert_page(&record2).unwrap();
CachePageCollector::with_props(Props { config, db });
assert_eq!(CachePageCollector.soft_recycle().unwrap(), 50);
}
#[test]
fn soft_recycle_no_deletes() {
let config = Config::new(
&*TEMP_DIR, // base_path
0, // page_size
500, // soft_cache_size
0, // hard_cache_size
);
assert!(cache::create_page_template(&config).is_ok());
let db = util::database::temp().unwrap();
let record1 = PageRecord {
id: String::from("record-1"),
nan_filled: false,
complete: true,
size: 150,
last_used: now_utc().to_timespec() - Duration::weeks(20),
};
db.upsert_page(&record1).unwrap();
let record2 = PageRecord {
id: String::from("record-2"),
nan_filled: false,
complete: true,
size: 50,
last_used: now_utc().to_timespec() - Duration::weeks(10),
};
db.upsert_page(&record2).unwrap();
CachePageCollector::with_props(Props { config, db });
assert_eq!(CachePageCollector.soft_recycle().unwrap(), 200);
}
#[test]
fn hard_recycle_with_deletes() {
let config = Config::new(
&*TEMP_DIR, // base_path
150, // page_size
0, // soft_cache_size
100, // hard_cache_size
);
assert!(cache::create_page_template(&config).is_ok());
let page_creator = PageCreator::new();
let page = Page {
path: path!(&*TEMP_DIR, "p1", "c_collector_2", "150", "2"; extension => "bin"), // "${TEMPDIR}/p1/c_collector_2/150/2.bin"
start: 0,
end: 0,
size: 150,
id: 2,
};
page_creator
.copy_page_template(&page.path, &config)
.unwrap();
let db = util::database::temp().unwrap();
let record1 = PageRecord {
id: String::from("p1.c_collector_2.150.2"),
nan_filled: false,
complete: true,
size: 150,
last_used: now_utc().to_timespec() - Duration::days(20),
};
db.upsert_page(&record1).unwrap();
let record2 = PageRecord {
id: String::from("record"),
nan_filled: false,
complete: true,
size: 50,
last_used: now_utc().to_timespec() - Duration::hours(18),
};
db.upsert_page(&record2).unwrap();
CachePageCollector::with_props(Props { config, db });
assert_eq!(CachePageCollector.hard_recycle().unwrap(), 50);
}
#[test]
fn hard_recycle_no_deletes() {
let config = Config::new(
&*TEMP_DIR, // base_path
0, // page_size
0, // soft_cache_size
500, // hard_cache_size
);
assert!(cache::create_page_template(&config).is_ok());
let db = util::database::temp().unwrap();
let record1 = PageRecord {
id: String::from("record-1"),
nan_filled: false,
complete: true,
size: 150,
last_used: now_utc().to_timespec() - Duration::weeks(20),
};
db.upsert_page(&record1).unwrap();
let record2 = PageRecord {
id: String::from("record-2"),
nan_filled: false,
complete: true,
size: 50,
last_used: now_utc().to_timespec() - Duration::weeks(10),
};
db.upsert_page(&record2).unwrap();
CachePageCollector::with_props(Props { config, db });
assert_eq!(CachePageCollector.hard_recycle().unwrap(), 200);
}
#[test]
fn hard_recycle_space_err() {
let config = Config::new(
&*TEMP_DIR, // base_path
0, // page_size
0, // soft_cache_size
10, // hard_cache_size
);
assert!(cache::create_page_template(&config).is_ok());
let db = util::database::temp().unwrap();
let record1 = PageRecord {
id: String::from("record-remove"),
nan_filled: false,
complete: true,
size: 150,
last_used: now_utc().to_timespec() - Duration::hours(10),
};
db.upsert_page(&record1).unwrap();
let record2 = PageRecord {
id: String::from("record"),
nan_filled: false,
complete: true,
size: 50,
last_used: now_utc().to_timespec() - Duration::hours(6),
};
db.upsert_page(&record2).unwrap();
CachePageCollector::with_props(Props { config, db });
assert!(CachePageCollector.hard_recycle().is_err());
}
}
| soft_recycle |
soleka.py | # coding=utf8
from flask import Flask, render_template
from flask_restful.utils import cors
from flask_cors import CORS, cross_origin
import config
import models
from resources_v1.predictions import predictions_api_v1
from templates.templates import home
| @app.route('/')
def index():
return render_template("main.html")
if __name__ == '__main__':
models.initilize()
#app.run(host=config.HOST)
app.run(debug=config.DEBUG, host=config.HOST) | app = Flask(__name__)
CORS(app)
app.register_blueprint(predictions_api_v1, url_prefix='/api/v1')
|
gnet.go | // Copyright 2019 Andy Pan. All rights reserved.
// Copyright 2018 Joshua J Baker. All rights reserved.
// Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
package gnet
import (
"log"
"net"
"os"
"runtime"
"strings"
"time"
"github.com/panjf2000/gnet/internal/netpoll"
)
// Action is an action that occurs after the completion of an event.
type Action int
const (
// None indicates that no action should occur following an event.
None Action = iota
// Close closes the connection.
Close
// Shutdown shutdowns the server.
Shutdown
// Skip indicates that the connection ought to break the "loopReact" and resume.
Skip
)
// Server represents a server context which provides information about the
// running server and has control functions for managing state.
type Server struct {
// Multicore indicates whether the server will be effectively created with multi-cores, if so,
// then you must take care of synchronizing the shared data between all event callbacks, otherwise,
// it will run the server with single thread. The number of threads in the server will be automatically
// assigned to the value of runtime.NumCPU().
Multicore bool
// The Addr parameter is an array of listening addresses that align
// with the addr strings passed to the Serve function.
Addr net.Addr
// NumLoops is the number of loops that the server is using.
NumLoops int
// ReUsePort indicates whether SO_REUSEPORT is enable.
ReUsePort bool
// TCPKeepAlive (SO_KEEPALIVE) socket option.
TCPKeepAlive time.Duration
}
// Conn is a interface of gnet connection.
type Conn interface {
// Context returns a user-defined context.
Context() (ctx interface{})
// SetContext sets a user-defined context.
SetContext(ctx interface{})
// LocalAddr is the connection's local socket address.
LocalAddr() (addr net.Addr)
// RemoteAddr is the connection's remote peer address.
RemoteAddr() (addr net.Addr)
// Wake triggers a React event for this connection.
//Wake()
// ReadFromUDP reads data for UDP socket.
ReadFromUDP() (buf []byte)
// ReadFrame returns either a frame from TCP stream based on codec or nil when there isn't a complete frame yet.
ReadFrame() (buf []byte)
// Read reads all data from inbound ring-buffer without moving "read" pointer, which means
// it does not evict the data from ring-buffer actually and those data will present in ring-buffer until the
// ResetBuffer method is invoked.
Read() (buf []byte)
// ResetBuffer resets the inbound ring-buffer, which means all data in the inbound ring-buffer has been evicted.
ResetBuffer()
// ShiftN shifts "read" pointer in buffer with the given length.
ShiftN(n int) (size int)
// ReadN reads bytes with the given length from inbound ring-buffer and event-loop-buffer, it would move
// "read" pointer, which means it will evict the data from buffer and it can't be revoked (put back to buffer),
// it reads data from the inbound ring-buffer and event-loop-buffer when the length of the available data is equal
// to the given "n", otherwise, it will not read any data from the inbound ring-buffer. So you should use this
// function only if you know exactly the length of subsequent TCP stream based on the protocol, like the
// Content-Length attribute in an HTTP request which indicates you how much data you should read from inbound ring-buffer.
ReadN(n int) (size int, buf []byte)
// BufferLength returns the length of available data in the inbound ring-buffer.
BufferLength() (size int)
// InboundBuffer returns the inbound ring-buffer.
//InboundBuffer() *ringbuffer.RingBuffer
// SendTo writes data for UDP sockets, it allows you to send data back to UDP socket in individual goroutines.
SendTo(buf []byte)
// AsyncWrite writes data to client/connection asynchronously, usually you would invoke it in individual goroutines
// instead of the event-loop goroutines.
AsyncWrite(buf []byte)
// Wake triggers a React event for this connection.
Wake()
}
type (
// EventHandler represents the server events' callbacks for the Serve call.
// Each event has an Action return value that is used manage the state
// of the connection and server.
EventHandler interface {
// OnInitComplete fires when the server is ready for accepting connections.
// The server parameter has information and various utilities.
OnInitComplete(server Server) (action Action)
// OnOpened fires when a new connection has been opened.
// The info parameter has information about the connection such as
// it's local and remote address.
// Use the out return value to write data to the connection.
OnOpened(c Conn) (out []byte, action Action)
// OnClosed fires when a connection has been closed.
// The err parameter is the last known connection error.
OnClosed(c Conn, err error) (action Action)
// PreWrite fires just before any data is written to any client socket.
PreWrite()
// React fires when a connection sends the server data.
// Invoke c.Read() or c.ReadN(n) within the parameter c to read incoming data from client/connection.
// Use the out return value to write data to the client/connection.
React(c Conn) (out []byte, action Action)
// Tick fires immediately after the server starts and will fire again
// following the duration specified by the delay return value.
Tick() (delay time.Duration, action Action)
}
// EventServer is a built-in implementation of EventHandler which sets up each method with a default implementation,
// you can compose it with your own implementation of EventHandler when you don't want to implement all methods in EventHandler.
EventServer struct {
}
)
// OnInitComplete fires when the server is ready for accepting connections.
// The server parameter has information and various utilities.
func (es *EventServer) OnInitComplete(svr Server) (action Action) {
return
}
// OnOpened fires when a new connection has been opened.
// The info parameter has information about the connection such as
// it's local and remote address.
// Use the out return value to write data to the connection.
func (es *EventServer) OnOpened(c Conn) (out []byte, action Action) {
return
}
// OnClosed fires when a connection has been closed.
// The err parameter is the last known connection error.
func (es *EventServer) OnClosed(c Conn, err error) (action Action) {
return
}
// PreWrite fires just before any data is written to any client socket.
func (es *EventServer) PreWrite() {
}
// React fires when a connection sends the server data.
// Invoke c.Read() or c.ReadN(n) within the parameter c to read incoming data from client/connection.
// Use the out return value to write data to the client/connection.
func (es *EventServer) React(c Conn) (out []byte, action Action) {
return
}
// Tick fires immediately after the server starts and will fire again
// following the duration specified by the delay return value.
func (es *EventServer) Tick() (delay time.Duration, action Action) {
return
}
// Serve starts handling events for the specified addresses.
//
// Addresses should use a scheme prefix and be formatted
// like `tcp://192.168.0.10:9851` or `unix://socket`.
// Valid network schemes:
// tcp - bind to both IPv4 and IPv6
// tcp4 - IPv4
// tcp6 - IPv6
// udp - bind to both IPv4 and IPv6
// udp4 - IPv4
// udp6 - IPv6
// unix - Unix Domain Socket
//
// The "tcp" network scheme is assumed when one is not specified.
func | (eventHandler EventHandler, addr string, opts ...Option) error {
var ln listener
defer ln.close()
options := initOptions(opts...)
ln.network, ln.addr = parseAddr(addr)
if ln.network == "unix" {
sniffError(os.RemoveAll(ln.addr))
}
var err error
if ln.network == "udp" {
if options.ReusePort && runtime.GOOS != "windows" {
ln.pconn, err = netpoll.ReusePortListenPacket(ln.network, ln.addr)
} else {
ln.pconn, err = net.ListenPacket(ln.network, ln.addr)
}
} else {
if options.ReusePort && runtime.GOOS != "windows" {
ln.ln, err = netpoll.ReusePortListen(ln.network, ln.addr)
} else {
ln.ln, err = net.Listen(ln.network, ln.addr)
}
}
if err != nil {
return err
}
if ln.pconn != nil {
ln.lnaddr = ln.pconn.LocalAddr()
} else {
ln.lnaddr = ln.ln.Addr()
}
if err := ln.system(); err != nil {
return err
}
return serve(eventHandler, &ln, options)
}
func parseAddr(addr string) (network, address string) {
network = "tcp"
address = addr
if strings.Contains(address, "://") {
parts := strings.Split(address, "://")
network = parts[0]
address = parts[1]
}
return
}
func sniffError(err error) {
if err != nil {
log.Println(err)
}
}
| Serve |
input.rs | extern crate unicode_width;
use super::super::app::{ActiveBlock, App, RouteId};
use crate::event::Key;
use crate::network::IoEvent;
use std::convert::TryInto;
use unicode_width::{UnicodeWidthChar, UnicodeWidthStr};
// Handle event when the search input block is active
pub fn handler(key: Key, app: &mut App) {
match key {
Key::Ctrl('k') => {
app.input.drain(app.input_idx..app.input.len());
}
Key::Ctrl('u') => {
app.input.drain(..app.input_idx);
app.input_idx = 0;
app.input_cursor_position = 0;
}
Key::Ctrl('l') => {
app.input = vec![];
app.input_idx = 0;
app.input_cursor_position = 0;
}
Key::Ctrl('w') => {
if app.input_cursor_position == 0 {
return;
}
let word_end = match app.input[..app.input_idx].iter().rposition(|&x| x != ' ') {
Some(index) => index + 1,
None => 0,
};
let word_start = match app.input[..word_end].iter().rposition(|&x| x == ' ') {
Some(index) => index + 1,
None => 0,
};
let deleted: String = app.input[word_start..app.input_idx].iter().collect();
let deleted_len: u16 = UnicodeWidthStr::width(deleted.as_str()).try_into().unwrap();
app.input.drain(word_start..app.input_idx);
app.input_idx = word_start;
app.input_cursor_position -= deleted_len;
}
Key::End | Key::Ctrl('e') => {
app.input_idx = app.input.len();
let input_string: String = app.input.iter().collect();
app.input_cursor_position = UnicodeWidthStr::width(input_string.as_str())
.try_into()
.unwrap();
}
Key::Home | Key::Ctrl('a') => {
app.input_idx = 0;
app.input_cursor_position = 0;
}
Key::Left | Key::Ctrl('b') => {
if !app.input.is_empty() && app.input_idx > 0 {
let last_c = app.input[app.input_idx - 1];
app.input_idx -= 1;
app.input_cursor_position -= compute_character_width(last_c);
}
}
Key::Right | Key::Ctrl('f') => {
if app.input_idx < app.input.len() {
let next_c = app.input[app.input_idx];
app.input_idx += 1;
app.input_cursor_position += compute_character_width(next_c);
}
}
Key::Esc => {
app.set_current_route_state(Some(ActiveBlock::Empty), Some(ActiveBlock::Library));
}
Key::Enter => {
let input_str: String = app.input.iter().collect();
process_input(app, input_str);
}
Key::Char(c) => {
app.input.insert(app.input_idx, c);
app.input_idx += 1;
app.input_cursor_position += compute_character_width(c);
}
Key::Backspace | Key::Ctrl('h') => {
if !app.input.is_empty() && app.input_idx > 0 {
let last_c = app.input.remove(app.input_idx - 1);
app.input_idx -= 1;
app.input_cursor_position -= compute_character_width(last_c);
}
}
Key::Delete | Key::Ctrl('d') => {
if !app.input.is_empty() && app.input_idx < app.input.len() {
app.input.remove(app.input_idx);
}
}
_ => {}
}
}
fn process_input(app: &mut App, input: String) {
// Don't do anything if there is no input
if input.is_empty() {
return;
}
// On searching for a track, clear the playlist selection
app.selected_playlist_index = Some(0);
if attempt_process_uri(app, &input, "https://open.spotify.com/", "/")
|| attempt_process_uri(app, &input, "spotify:", ":")
{
return;
}
// Default fallback behavior: treat the input as a raw search phrase.
app.dispatch(IoEvent::GetSearchResults(input, app.get_user_country()));
app.push_navigation_stack(RouteId::Search, ActiveBlock::SearchResultBlock);
}
fn spotify_resource_id(base: &str, uri: &str, sep: &str, resource_type: &str) -> (String, bool) {
let uri_prefix = format!("{}{}{}", base, resource_type, sep);
let id_string_with_query_params = uri.trim_start_matches(&uri_prefix);
let query_idx = id_string_with_query_params
.find('?')
.unwrap_or_else(|| id_string_with_query_params.len());
let id_string = id_string_with_query_params[0..query_idx].to_string();
// If the lengths aren't equal, we must have found a match.
let matched = id_string_with_query_params.len() != uri.len() && id_string.len() != uri.len();
(id_string, matched)
}
// Returns true if the input was successfully processed as a Spotify URI.
fn attempt_process_uri(app: &mut App, input: &str, base: &str, sep: &str) -> bool {
let (album_id, matched) = spotify_resource_id(base, input, sep, "album");
if matched {
app.dispatch(IoEvent::GetAlbum(album_id));
return true;
}
let (artist_id, matched) = spotify_resource_id(base, input, sep, "artist");
if matched {
app.get_artist(artist_id, "".to_string());
app.push_navigation_stack(RouteId::Artist, ActiveBlock::ArtistBlock);
return true;
}
let (track_id, matched) = spotify_resource_id(base, input, sep, "track");
if matched {
app.dispatch(IoEvent::GetAlbumForTrack(track_id));
return true;
}
let (playlist_id, matched) = spotify_resource_id(base, input, sep, "playlist");
if matched {
app.dispatch(IoEvent::GetPlaylistTracks(playlist_id, 0));
return true;
}
let (show_id, matched) = spotify_resource_id(base, input, sep, "show");
if matched {
app.dispatch(IoEvent::GetShowEpisodes(show_id));
return true;
}
false
}
fn compute_character_width(character: char) -> u16 {
UnicodeWidthChar::width(character)
.unwrap()
.try_into()
.unwrap()
}
#[cfg(test)]
mod tests {
use super::*;
fn str_to_vec_char(s: &str) -> Vec<char> {
String::from(s).chars().collect()
}
#[test]
fn test_compute_character_width_with_multiple_characters() {
assert_eq!(1, compute_character_width('a'));
assert_eq!(1, compute_character_width('ß'));
assert_eq!(1, compute_character_width('ç'));
}
#[test]
fn test_input_handler_clear_input_on_ctrl_l() {
let mut app = App::default();
app.input = str_to_vec_char("My text");
handler(Key::Ctrl('l'), &mut app);
assert_eq!(app.input, str_to_vec_char(""));
}
#[test]
fn test_input_handler_ctrl_u() {
let mut app = App::default();
app.input = str_to_vec_char("My text");
handler(Key::Ctrl('u'), &mut app);
assert_eq!(app.input, str_to_vec_char("My text"));
app.input_cursor_position = 3;
app.input_idx = 3;
handler(Key::Ctrl('u'), &mut app);
assert_eq!(app.input, str_to_vec_char("text"));
}
#[test]
fn test_input_handler_ctrl_k() {
let mut app = App::default();
app.input = str_to_vec_char("My text");
handler(Key::Ctrl('k'), &mut app);
assert_eq!(app.input, str_to_vec_char(""));
app.input = str_to_vec_char("My text");
app.input_cursor_position = 2;
app.input_idx = 2;
handler(Key::Ctrl('k'), &mut app);
assert_eq!(app.input, str_to_vec_char("My"));
handler(Key::Ctrl('k'), &mut app);
assert_eq!(app.input, str_to_vec_char("My"));
}
#[test]
fn test_input_handler_ctrl_w() {
let mut app = App::default();
app.input = str_to_vec_char("My text");
handler(Key::Ctrl('w'), &mut app);
assert_eq!(app.input, str_to_vec_char("My text"));
app.input_cursor_position = 3;
app.input_idx = 3;
handler(Key::Ctrl('w'), &mut app);
assert_eq!(app.input, str_to_vec_char("text"));
assert_eq!(app.input_cursor_position, 0);
assert_eq!(app.input_idx, 0);
app.input = str_to_vec_char(" ");
app.input_cursor_position = 3;
app.input_idx = 3;
handler(Key::Ctrl('w'), &mut app);
assert_eq!(app.input, str_to_vec_char(" "));
assert_eq!(app.input_cursor_position, 0);
assert_eq!(app.input_idx, 0);
app.input_cursor_position = 1;
app.input_idx = 1;
handler(Key::Ctrl('w'), &mut app);
assert_eq!(app.input, str_to_vec_char(""));
assert_eq!(app.input_cursor_position, 0);
assert_eq!(app.input_idx, 0);
app.input = str_to_vec_char("Hello there ");
app.input_cursor_position = 13;
app.input_idx = 13;
handler(Key::Ctrl('w'), &mut app);
assert_eq!(app.input, str_to_vec_char("Hello "));
assert_eq!(app.input_cursor_position, 6);
assert_eq!(app.input_idx, 6);
}
#[test]
fn test_input_handler_esc_back_to_playlist() {
let mut app = App::default();
app.set_current_route_state(Some(ActiveBlock::MyPlaylists), None);
handler(Key::Esc, &mut app);
let current_route = app.get_current_route();
assert_eq!(current_route.active_block, ActiveBlock::Empty);
}
#[test]
fn test_input_handler_on_enter_text() {
let mut app = App::default();
app.input = str_to_vec_char("My tex");
app.input_cursor_position = app.input.len().try_into().unwrap();
app.input_idx = app.input.len(); |
assert_eq!(app.input, str_to_vec_char("My text"));
}
#[test]
fn test_input_handler_backspace() {
let mut app = App::default();
app.input = str_to_vec_char("My text");
app.input_cursor_position = app.input.len().try_into().unwrap();
app.input_idx = app.input.len();
handler(Key::Backspace, &mut app);
assert_eq!(app.input, str_to_vec_char("My tex"));
// Test that backspace deletes from the cursor position
app.input_idx = 2;
app.input_cursor_position = 2;
handler(Key::Backspace, &mut app);
assert_eq!(app.input, str_to_vec_char("M tex"));
app.input_idx = 1;
app.input_cursor_position = 1;
handler(Key::Ctrl('h'), &mut app);
assert_eq!(app.input, str_to_vec_char(" tex"));
}
#[test]
fn test_input_handler_delete() {
let mut app = App::default();
app.input = str_to_vec_char("My text");
app.input_idx = 3;
app.input_cursor_position = 3;
handler(Key::Delete, &mut app);
assert_eq!(app.input, str_to_vec_char("My ext"));
app.input = str_to_vec_char("ラスト");
app.input_idx = 1;
app.input_cursor_position = 1;
handler(Key::Delete, &mut app);
assert_eq!(app.input, str_to_vec_char("ラト"));
app.input = str_to_vec_char("Rust");
app.input_idx = 2;
app.input_cursor_position = 2;
handler(Key::Ctrl('d'), &mut app);
assert_eq!(app.input, str_to_vec_char("Rut"));
}
#[test]
fn test_input_handler_left_event() {
let mut app = App::default();
app.input = str_to_vec_char("My text");
let input_len = app.input.len().try_into().unwrap();
app.input_idx = app.input.len();
app.input_cursor_position = input_len;
handler(Key::Left, &mut app);
assert_eq!(app.input_cursor_position, input_len - 1);
handler(Key::Left, &mut app);
assert_eq!(app.input_cursor_position, input_len - 2);
handler(Key::Left, &mut app);
assert_eq!(app.input_cursor_position, input_len - 3);
handler(Key::Ctrl('b'), &mut app);
assert_eq!(app.input_cursor_position, input_len - 4);
handler(Key::Ctrl('b'), &mut app);
assert_eq!(app.input_cursor_position, input_len - 5);
// Pretend to smash the left event to test the we have no out-of-bounds crash
for _ in 0..20 {
handler(Key::Left, &mut app);
}
assert_eq!(app.input_cursor_position, 0);
}
#[test]
fn test_input_handler_on_enter_text_non_english_char() {
let mut app = App::default();
app.input = str_to_vec_char("ыа");
app.input_cursor_position = app.input.len().try_into().unwrap();
app.input_idx = app.input.len();
handler(Key::Char('ы'), &mut app);
assert_eq!(app.input, str_to_vec_char("ыаы"));
}
#[test]
fn test_input_handler_on_enter_text_wide_char() {
let mut app = App::default();
app.input = str_to_vec_char("你");
app.input_cursor_position = 2; // 你 is 2 char wide
app.input_idx = 1; // 1 char
handler(Key::Char('好'), &mut app);
assert_eq!(app.input, str_to_vec_char("你好"));
assert_eq!(app.input_idx, 2);
assert_eq!(app.input_cursor_position, 4);
}
mod test_uri_parsing {
use super::*;
const URI_BASE: &str = "spotify:";
const URL_BASE: &str = "https://open.spotify.com/";
fn check_uri_parse(expected_id: &str, parsed: (String, bool)) {
assert_eq!(parsed.1, true);
assert_eq!(parsed.0, expected_id);
}
fn run_test_for_id_and_resource_type(id: &str, resource_type: &str) {
check_uri_parse(
id,
spotify_resource_id(
URI_BASE,
&format!("spotify:{}:{}", resource_type, id),
":",
resource_type,
),
);
check_uri_parse(
id,
spotify_resource_id(
URL_BASE,
&format!("https://open.spotify.com/{}/{}", resource_type, id),
"/",
resource_type,
),
)
}
#[test]
fn artist() {
let expected_artist_id = "2ye2Wgw4gimLv2eAKyk1NB";
run_test_for_id_and_resource_type(expected_artist_id, "artist");
}
#[test]
fn album() {
let expected_album_id = "5gzLOflH95LkKYE6XSXE9k";
run_test_for_id_and_resource_type(expected_album_id, "album");
}
#[test]
fn playlist() {
let expected_playlist_id = "1cJ6lPBYj2fscs0kqBHsVV";
run_test_for_id_and_resource_type(expected_playlist_id, "playlist");
}
#[test]
fn show() {
let expected_show_id = "3aNsrV6lkzmcU1w8u8kA7N";
run_test_for_id_and_resource_type(expected_show_id, "show");
}
#[test]
fn track() {
let expected_track_id = "10igKaIKsSB6ZnWxPxPvKO";
run_test_for_id_and_resource_type(expected_track_id, "track");
}
#[test]
fn invalid_format_doesnt_match() {
let swapped = "show:spotify:3aNsrV6lkzmcU1w8u8kA7N";
let totally_wrong = "hehe-haha-3aNsrV6lkzmcU1w8u8kA7N";
let random = "random string";
let (_, matched) = spotify_resource_id(URI_BASE, swapped, ":", "track");
assert_eq!(matched, false);
let (_, matched) = spotify_resource_id(URI_BASE, totally_wrong, ":", "track");
assert_eq!(matched, false);
let (_, matched) = spotify_resource_id(URL_BASE, totally_wrong, "/", "track");
assert_eq!(matched, false);
let (_, matched) = spotify_resource_id(URL_BASE, random, "/", "track");
assert_eq!(matched, false);
}
#[test]
fn parse_with_query_parameters() {
// If this test ever fails due to some change to the parsing logic, it is likely a sign we
// should just integrate the url crate instead of trying to do things ourselves.
let playlist_url_with_query =
"https://open.spotify.com/playlist/1cJ6lPBYj2fscs0kqBHsVV?si=OdwuJsbsSeuUAOadehng3A";
let playlist_url = "https://open.spotify.com/playlist/1cJ6lPBYj2fscs0kqBHsVV";
let expected_id = "1cJ6lPBYj2fscs0kqBHsVV";
let (actual_id, matched) = spotify_resource_id(URL_BASE, playlist_url, "/", "playlist");
assert_eq!(matched, true);
assert_eq!(actual_id, expected_id);
let (actual_id, matched) =
spotify_resource_id(URL_BASE, playlist_url_with_query, "/", "playlist");
assert_eq!(matched, true);
assert_eq!(actual_id, expected_id);
}
#[test]
fn mismatched_resource_types_do_not_match() {
let playlist_url =
"https://open.spotify.com/playlist/1cJ6lPBYj2fscs0kqBHsVV?si=OdwuJsbsSeuUAOadehng3A";
let (_, matched) = spotify_resource_id(URL_BASE, playlist_url, "/", "album");
assert_eq!(matched, false);
}
}
} |
handler(Key::Char('t'), &mut app); |
transform.rs | use cgmath::Vector2;
use wasm_rgame::Canvas;
pub enum TransformVector {
Relative(Vector2<f32>),
Absolute(Vector2<f32>),
}
#[derive(Clone)]
pub struct Transform {
pub pos: Vector2<f32>,
pub size: Vector2<f32>,
pub pivot: Vector2<f32>,
}
impl Transform {
pub fn new(pos: TransformVector, size: TransformVector, pivot: Vector2<f32>) -> Transform {
Transform {
pos: pos.into_absolute(),
size: size.into_absolute(),
pivot,
}
}
pub fn contains(&self, point: Vector2<f32>) -> bool {
let bottom_left = self.bottom_left();
bottom_left.x <= point.x && point.x <= bottom_left.x + self.size.x &&
bottom_left.y <= point.y && point.y <= bottom_left.y + self.size.y
}
pub fn bottom_left(&self) -> Vector2<f32> {
Vector2 {
x: self.pos.x - (self.pivot.x * self.size.x),
y: self.pos.y - (self.pivot.y * self.size.y),
}
}
pub fn center(&self) -> Vector2<f32> {
self.bottom_left() + (self.size / 2.0)
}
}
impl TransformVector {
fn into_absolute(self) -> Vector2<f32> {
match self {
TransformVector::Absolute(vec) => vec,
TransformVector::Relative(vec) => {
let canvas = Canvas::instance();
Vector2 {
x: vec.x * (canvas.width() as f32),
y: vec.y * (canvas.height() as f32),
}
}
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn bottom_left_works() { | pos: Vector2 { x: 2.0, y: 1.0, },
size: Vector2 { x: 5.5, y: 6.5, },
pivot: Vector2 { x: 0.5, y: 0.5, },
};
assert_eq!(transform.bottom_left(), Vector2 { x: -0.75, y: -2.25, });
let transform2 = Transform {
pos: Vector2 { x: 3.0, y: 5.0, },
size: Vector2 { x: 5.5, y: 6.5, },
pivot: Vector2 { x: 0.0, y: 0.0, },
};
assert_eq!(transform2.bottom_left(), Vector2 { x: 3.0, y: 5.0, });
}
#[test]
fn contains_works() {
let transform = Transform {
pos: Vector2 { x: 2.0, y: 1.0, },
size: Vector2 { x: 5.5, y: 6.5, },
pivot: Vector2 { x: 0.5, y: 0.5, },
};
assert_eq!(transform.bottom_left(), Vector2 { x: -0.75, y: -2.25, });
// top_right -> x: 4.75, y: 4.25
assert_eq!(transform.contains(Vector2 { x: -0.76, y: -2.25, }), false);
assert_eq!(transform.contains(Vector2 { x: -0.75, y: -2.25, }), true);
assert_eq!(transform.contains(Vector2 { x: 0.0, y: 0.0, }), true);
assert_eq!(transform.contains(Vector2 { x: 2.0, y: 1.0, }), true);
assert_eq!(transform.contains(Vector2 { x: 4.75, y: 0.0, }), true);
assert_eq!(transform.contains(Vector2 { x: 5.0, y: 0.0, }), false);
}
} | let transform = Transform { |
ad_group_criterion_customizer_error.py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package="google.ads.googleads.v9.errors",
marshal="google.ads.googleads.v9",
manifest={"AdGroupCriterionCustomizerErrorEnum",},
)
class | (proto.Message):
r"""Container for enum describing possible ad group criterion
customizer errors.
"""
class AdGroupCriterionCustomizerError(proto.Enum):
r"""Enum describing possible ad group criterion customizer
errors.
"""
UNSPECIFIED = 0
UNKNOWN = 1
CRITERION_IS_NOT_KEYWORD = 2
__all__ = tuple(sorted(__protobuf__.manifest))
| AdGroupCriterionCustomizerErrorEnum |
index.stories.tsx | import React from 'react'
import { Story } from '@storybook/react/types-6-0'
import { Shadow, Props } from '.'
export default {
component: Shadow,
title: 'Atoms/Shadow',
}
const Template: Story<Props> = (args) => (
<Shadow {...args}>
<div
style={{
height: 100,
width: 100,
borderRadius: 15,
margin: 32,
background: 'white',
display: 'block',
}}
/>
</Shadow>
)
export const Small = Template.bind({}) | }
export const Normal = Template.bind({})
Normal.args = {
size: 'normal',
}
export const Medium = Template.bind({})
Medium.args = {
size: 'medium',
}
export const Large = Template.bind({})
Large.args = {
size: 'large',
}
export const Soft = Template.bind({})
Soft.args = {
size: 'soft',
} |
Small.args = {
size: 'small', |
members.rs | // Copyright (c) 2016 Daniel Grunwald
//
// Permission is hereby granted, free of charge, to any person obtaining a copy of this
// software and associated documentation files (the "Software"), to deal in the Software
// without restriction, including without limitation the rights to use, copy, modify, merge,
// publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons
// to whom the Software is furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all copies or
// substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
// INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
// PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE
// FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
// OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
use std::marker;
use python::{Python, PythonObject};
use conversion::ToPyObject;
use objects::PyObject;
use err::{self, PyResult};
use ffi;
/// Represents something that can be added as a member to a Python class/type.
///
/// T: type of rust class used for instances of the Python class/type.
pub trait TypeMember<T> where T: PythonObject {
/// Convert the type member into a python object
/// that can be stored in the type dict.
///
/// Because the member may expect `self` values to be of type `T`,
/// `ty` must be T::type_object() or a derived class.
/// (otherwise the behavior is undefined)
unsafe fn into_descriptor(self, py: Python, ty: *mut ffi::PyTypeObject) -> PyResult<PyObject>;
}
impl <T, S> TypeMember<T> for S where T: PythonObject, S: ToPyObject {
#[inline]
unsafe fn into_descriptor(self, py: Python, _ty: *mut ffi::PyTypeObject) -> PyResult<PyObject> {
Ok(self.into_py_object(py).into_object())
}
}
#[macro_export]
#[doc(hidden)]
macro_rules! py_class_init_members {
($class:ident, $py:ident, $type_object: ident, { }) => {{}};
($class:ident, $py:ident, $type_object: ident, { $( $name:ident = $init:expr; )+ }) => {{
let dict = $crate::PyDict::new($py);
$( {
// keep $init out of unsafe block; it might contain user code
let init = $init;
let descriptor = unsafe {
$crate::py_class::members::TypeMember::<$class>::into_descriptor(init, $py, &mut $type_object)
}?;
let name = stringify!($name);
let name = if name.starts_with("r#") {
&name[2..]
} else {
name
};
dict.set_item($py, name, descriptor)?;
})*
unsafe {
assert!($type_object.tp_dict.is_null());
$type_object.tp_dict = $crate::PythonObject::into_object(dict).steal_ptr();
}
}};
}
#[macro_export(local_inner_macros)]
#[doc(hidden)]
macro_rules! py_class_instance_method {
($py:ident, $class:ident :: $f:ident [ $( { $pname:ident : $ptype:ty = $detail:tt } )* ]) => {{
py_class_instance_method!($py, $class::$f, { "" } [ $( { $pname : $ptype = $detail } )* ])
}};
($py:ident, $class:ident :: $f:ident, { $doc:expr } [ $( { $pname:ident : $ptype:ty = $detail:tt } )* ]) => {{
unsafe extern "C" fn wrap_instance_method(
slf: *mut $crate::_detail::ffi::PyObject, | -> *mut $crate::_detail::ffi::PyObject
{
const LOCATION: &'static str = _cpython__py_class__members__concat!(_cpython__py_class__members__stringify!($class), ".", _cpython__py_class__members__stringify!($f), "()");
$crate::_detail::handle_callback(
LOCATION, $crate::_detail::PyObjectCallbackConverter,
|py| {
py_argparse_raw!(py, Some(LOCATION), args, kwargs,
[ $( { $pname : $ptype = $detail } )* ]
{
let slf = $crate::PyObject::from_borrowed_ptr(py, slf).unchecked_cast_into::<$class>();
let ret = slf.$f(py $(, $pname )* );
$crate::PyDrop::release_ref(slf, py);
ret
})
})
}
unsafe {
let method_def = py_method_def!(_cpython__py_class__members__stringify!($f), 0, wrap_instance_method, $doc);
$crate::py_class::members::create_instance_method_descriptor::<$class>(method_def)
}
}}
}
pub struct InstanceMethodDescriptor<T>(*mut ffi::PyMethodDef, marker::PhantomData<fn(&T)>);
#[inline]
pub unsafe fn create_instance_method_descriptor<T>(method_def: *mut ffi::PyMethodDef)
-> InstanceMethodDescriptor<T>
{
InstanceMethodDescriptor(method_def, marker::PhantomData)
}
impl <T> TypeMember<T> for InstanceMethodDescriptor<T> where T: PythonObject {
#[inline]
unsafe fn into_descriptor(self, py: Python, ty: *mut ffi::PyTypeObject) -> PyResult<PyObject> {
err::result_from_owned_ptr(py, ffi::PyDescr_NewMethod(ty, self.0))
}
}
#[macro_export]
#[doc(hidden)]
macro_rules! py_class_class_method {
($py:ident, $class:ident :: $f:ident [ $( { $pname:ident : $ptype:ty = $detail:tt } )* ]) => {{
py_class_class_method!($py, $class::$f, { "" } [ $( { $pname : $ptype = $detail } )* ])
}};
($py:ident, $class:ident :: $f:ident, { $doc:expr } [ $( { $pname:ident : $ptype:ty = $detail:tt } )* ]) => {{
unsafe extern "C" fn wrap_class_method(
cls: *mut $crate::_detail::ffi::PyObject,
args: *mut $crate::_detail::ffi::PyObject,
kwargs: *mut $crate::_detail::ffi::PyObject)
-> *mut $crate::_detail::ffi::PyObject
{
const LOCATION: &'static str = _cpython__py_class__members__concat!(_cpython__py_class__members__stringify!($class), ".", _cpython__py_class__members__stringify!($f), "()");
$crate::_detail::handle_callback(
LOCATION, $crate::_detail::PyObjectCallbackConverter,
|py| {
py_argparse_raw!(py, Some(LOCATION), args, kwargs,
[ $( { $pname : $ptype = $detail } )* ]
{
let cls = $crate::PyObject::from_borrowed_ptr(py, cls).unchecked_cast_into::<$crate::PyType>();
let ret = $class::$f(&cls, py $(, $pname )* );
$crate::PyDrop::release_ref(cls, py);
ret
})
})
}
unsafe {
let method_def = py_method_def!(_cpython__py_class__members__stringify!($f),
$crate::_detail::ffi::METH_CLASS,
wrap_class_method,
$doc);
$crate::py_class::members::create_class_method_descriptor(method_def)
}
}}
}
pub struct ClassMethodDescriptor(*mut ffi::PyMethodDef);
#[inline]
pub unsafe fn create_class_method_descriptor(method_def: *mut ffi::PyMethodDef)
-> ClassMethodDescriptor
{
ClassMethodDescriptor(method_def)
}
impl <T> TypeMember<T> for ClassMethodDescriptor where T: PythonObject {
#[inline]
unsafe fn into_descriptor(self, py: Python, ty: *mut ffi::PyTypeObject) -> PyResult<PyObject> {
err::result_from_owned_ptr(py, ffi::PyDescr_NewClassMethod(ty, self.0))
}
}
#[macro_export]
#[doc(hidden)]
macro_rules! py_class_static_method {
($py:ident, $class:ident :: $f:ident [ $( { $pname:ident : $ptype:ty = $detail:tt } )* ]) => {{
py_class_static_method!($py, $class::$f, { "" } [ $( { $pname : $ptype = $detail } )* ])
}};
($py:ident, $class:ident :: $f:ident, { $doc:expr } [ $( { $pname:ident : $ptype:ty = $detail:tt } )* ]) => {{
unsafe extern "C" fn wrap_static_method(
_slf: *mut $crate::_detail::ffi::PyObject,
args: *mut $crate::_detail::ffi::PyObject,
kwargs: *mut $crate::_detail::ffi::PyObject)
-> *mut $crate::_detail::ffi::PyObject
{
const LOCATION: &'static str = _cpython__py_class__members__concat!(_cpython__py_class__members__stringify!($class), ".", _cpython__py_class__members__stringify!($f), "()");
$crate::_detail::handle_callback(
LOCATION, $crate::_detail::PyObjectCallbackConverter,
|py| {
py_argparse_raw!(py, Some(LOCATION), args, kwargs,
[ $( { $pname : $ptype = $detail } )* ]
{
$class::$f(py $(, $pname )* )
})
})
}
unsafe {
let method_def = py_method_def!(_cpython__py_class__members__stringify!($f),
$crate::_detail::ffi::METH_STATIC,
wrap_static_method,
$doc);
$crate::_detail::py_fn_impl($py, method_def)
}
}}
}
// Rust 2018 support
#[macro_export]
#[doc(hidden)]
macro_rules! _cpython__py_class__members__stringify {
($($inner:tt)*) => {
stringify! { $($inner)* }
}
}
#[macro_export]
#[doc(hidden)]
macro_rules! _cpython__py_class__members__concat {
($($inner:tt)*) => {
concat! { $($inner)* }
}
} | args: *mut $crate::_detail::ffi::PyObject,
kwargs: *mut $crate::_detail::ffi::PyObject) |
operations.rs | #![doc = "generated by AutoRust 0.1.0"]
#![allow(unused_mut)]
#![allow(unused_variables)]
#![allow(unused_imports)]
use super::{models, API_VERSION};
#[derive(Clone)]
pub struct Client {
endpoint: String,
credential: std::sync::Arc<dyn azure_core::TokenCredential>,
scopes: Vec<String>,
pipeline: azure_core::pipeline::Pipeline,
}
#[derive(Clone)]
pub struct ClientBuilder {
credential: std::sync::Arc<dyn azure_core::TokenCredential>,
endpoint: Option<String>,
scopes: Option<Vec<String>>,
}
pub const DEFAULT_ENDPOINT: &str = azure_core::resource_manager_endpoint::AZURE_PUBLIC_CLOUD;
impl ClientBuilder {
pub fn new(credential: std::sync::Arc<dyn azure_core::TokenCredential>) -> Self {
Self {
credential,
endpoint: None,
scopes: None,
}
}
pub fn endpoint(mut self, endpoint: impl Into<String>) -> Self {
self.endpoint = Some(endpoint.into());
self
}
pub fn scopes(mut self, scopes: &[&str]) -> Self {
self.scopes = Some(scopes.iter().map(|scope| (*scope).to_owned()).collect());
self
}
pub fn build(self) -> Client {
let endpoint = self.endpoint.unwrap_or_else(|| DEFAULT_ENDPOINT.to_owned());
let scopes = self.scopes.unwrap_or_else(|| vec![format!("{}/", endpoint)]);
Client::new(endpoint, self.credential, scopes)
}
}
impl Client {
pub(crate) fn endpoint(&self) -> &str {
self.endpoint.as_str()
}
pub(crate) fn token_credential(&self) -> &dyn azure_core::TokenCredential {
self.credential.as_ref()
}
pub(crate) fn scopes(&self) -> Vec<&str> {
self.scopes.iter().map(String::as_str).collect()
}
pub(crate) async fn send(&self, request: impl Into<azure_core::Request>) -> Result<azure_core::Response, azure_core::Error> {
let mut context = azure_core::Context::default();
let mut request = request.into();
self.pipeline.send(&mut context, &mut request).await
}
pub fn new(endpoint: impl Into<String>, credential: std::sync::Arc<dyn azure_core::TokenCredential>, scopes: Vec<String>) -> Self {
let endpoint = endpoint.into();
let pipeline = azure_core::pipeline::Pipeline::new(
option_env!("CARGO_PKG_NAME"),
option_env!("CARGO_PKG_VERSION"),
azure_core::ClientOptions::default(),
Vec::new(),
Vec::new(),
);
Self {
endpoint,
credential,
scopes,
pipeline,
}
}
pub fn operations(&self) -> operations::Client {
operations::Client(self.clone())
}
pub fn support_plan_types(&self) -> support_plan_types::Client {
support_plan_types::Client(self.clone())
}
}
#[non_exhaustive]
#[derive(Debug, thiserror :: Error)]
#[allow(non_camel_case_types)]
pub enum Error {
#[error(transparent)]
Operations_List(#[from] operations::list::Error),
#[error(transparent)]
SupportPlanTypes_Get(#[from] support_plan_types::get::Error),
#[error(transparent)]
SupportPlanTypes_CreateOrUpdate(#[from] support_plan_types::create_or_update::Error),
#[error(transparent)]
SupportPlanTypes_Delete(#[from] support_plan_types::delete::Error),
#[error(transparent)]
SupportPlanTypes_ListInfo(#[from] support_plan_types::list_info::Error),
}
pub mod operations {
use super::{models, API_VERSION};
pub struct Client(pub(crate) super::Client);
impl Client {
pub fn list(&self) -> list::Builder {
list::Builder { client: self.0.clone() }
}
}
pub mod list {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorDefinition,
},
#[error("Failed to parse request URL: {0}")]
ParseUrl(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequest(http::Error),
#[error("Failed to serialize request body: {0}")]
Serialize(serde_json::Error),
#[error("Failed to get access token: {0}")]
GetToken(azure_core::Error),
#[error("Failed to execute request: {0}")]
SendRequest(azure_core::Error),
#[error("Failed to get response bytes: {0}")]
ResponseBytes(azure_core::StreamError),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
Deserialize(serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::OperationListValue, Error>> {
Box::pin(async move {
let url_str = &format!("{}/providers/Microsoft.Addons/operations", self.client.endpoint(),);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await | req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::OperationListValue =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::ErrorDefinition =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
}
pub mod support_plan_types {
use super::{models, API_VERSION};
pub struct Client(pub(crate) super::Client);
impl Client {
pub fn get(
&self,
subscription_id: impl Into<String>,
provider_name: impl Into<String>,
plan_type_name: impl Into<String>,
) -> get::Builder {
get::Builder {
client: self.0.clone(),
subscription_id: subscription_id.into(),
provider_name: provider_name.into(),
plan_type_name: plan_type_name.into(),
}
}
pub fn create_or_update(
&self,
subscription_id: impl Into<String>,
provider_name: impl Into<String>,
plan_type_name: impl Into<String>,
) -> create_or_update::Builder {
create_or_update::Builder {
client: self.0.clone(),
subscription_id: subscription_id.into(),
provider_name: provider_name.into(),
plan_type_name: plan_type_name.into(),
}
}
pub fn delete(
&self,
subscription_id: impl Into<String>,
provider_name: impl Into<String>,
plan_type_name: impl Into<String>,
) -> delete::Builder {
delete::Builder {
client: self.0.clone(),
subscription_id: subscription_id.into(),
provider_name: provider_name.into(),
plan_type_name: plan_type_name.into(),
}
}
pub fn list_info(&self, subscription_id: impl Into<String>) -> list_info::Builder {
list_info::Builder {
client: self.0.clone(),
subscription_id: subscription_id.into(),
}
}
}
pub mod get {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Error response #response_type")]
NotFound404 {},
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorDefinition,
},
#[error("Failed to parse request URL: {0}")]
ParseUrl(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequest(http::Error),
#[error("Failed to serialize request body: {0}")]
Serialize(serde_json::Error),
#[error("Failed to get access token: {0}")]
GetToken(azure_core::Error),
#[error("Failed to execute request: {0}")]
SendRequest(azure_core::Error),
#[error("Failed to get response bytes: {0}")]
ResponseBytes(azure_core::StreamError),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
Deserialize(serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) subscription_id: String,
pub(crate) provider_name: String,
pub(crate) plan_type_name: String,
}
impl Builder {
pub fn into_future(
self,
) -> futures::future::BoxFuture<'static, std::result::Result<models::CanonicalSupportPlanResponseEnvelope, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/providers/Microsoft.Addons/supportProviders/{}/supportPlanTypes/{}",
self.client.endpoint(),
&self.subscription_id,
&self.provider_name,
&self.plan_type_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::GET);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CanonicalSupportPlanResponseEnvelope =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
http::StatusCode::NOT_FOUND => Err(Error::NotFound404 {}),
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::ErrorDefinition =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod create_or_update {
use super::{models, API_VERSION};
#[derive(Debug)]
pub enum Response {
Ok200(models::CanonicalSupportPlanResponseEnvelope),
Created201(models::CanonicalSupportPlanResponseEnvelope),
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Error response #response_type")]
NotFound404 {},
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorDefinition,
},
#[error("Failed to parse request URL: {0}")]
ParseUrl(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequest(http::Error),
#[error("Failed to serialize request body: {0}")]
Serialize(serde_json::Error),
#[error("Failed to get access token: {0}")]
GetToken(azure_core::Error),
#[error("Failed to execute request: {0}")]
SendRequest(azure_core::Error),
#[error("Failed to get response bytes: {0}")]
ResponseBytes(azure_core::StreamError),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
Deserialize(serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) subscription_id: String,
pub(crate) provider_name: String,
pub(crate) plan_type_name: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/providers/Microsoft.Addons/supportProviders/{}/supportPlanTypes/{}",
self.client.endpoint(),
&self.subscription_id,
&self.provider_name,
&self.plan_type_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::PUT);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CanonicalSupportPlanResponseEnvelope =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(Response::Ok200(rsp_value))
}
http::StatusCode::CREATED => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CanonicalSupportPlanResponseEnvelope =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(Response::Created201(rsp_value))
}
http::StatusCode::NOT_FOUND => Err(Error::NotFound404 {}),
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::ErrorDefinition =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod delete {
use super::{models, API_VERSION};
#[derive(Debug)]
pub enum Response {
Accepted202(models::CanonicalSupportPlanResponseEnvelope),
NoContent204,
}
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorDefinition,
},
#[error("Failed to parse request URL: {0}")]
ParseUrl(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequest(http::Error),
#[error("Failed to serialize request body: {0}")]
Serialize(serde_json::Error),
#[error("Failed to get access token: {0}")]
GetToken(azure_core::Error),
#[error("Failed to execute request: {0}")]
SendRequest(azure_core::Error),
#[error("Failed to get response bytes: {0}")]
ResponseBytes(azure_core::StreamError),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
Deserialize(serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) subscription_id: String,
pub(crate) provider_name: String,
pub(crate) plan_type_name: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<Response, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/providers/Microsoft.Addons/supportProviders/{}/supportPlanTypes/{}",
self.client.endpoint(),
&self.subscription_id,
&self.provider_name,
&self.plan_type_name
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::DELETE);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::ACCEPTED => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CanonicalSupportPlanResponseEnvelope =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(Response::Accepted202(rsp_value))
}
http::StatusCode::NO_CONTENT => Ok(Response::NoContent204),
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::ErrorDefinition =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
pub mod list_info {
use super::{models, API_VERSION};
#[derive(Debug, thiserror :: Error)]
pub enum Error {
#[error("Error response #response_type")]
NotFound404 {},
#[error("HTTP status code {}", status_code)]
DefaultResponse {
status_code: http::StatusCode,
value: models::ErrorDefinition,
},
#[error("Failed to parse request URL: {0}")]
ParseUrl(url::ParseError),
#[error("Failed to build request: {0}")]
BuildRequest(http::Error),
#[error("Failed to serialize request body: {0}")]
Serialize(serde_json::Error),
#[error("Failed to get access token: {0}")]
GetToken(azure_core::Error),
#[error("Failed to execute request: {0}")]
SendRequest(azure_core::Error),
#[error("Failed to get response bytes: {0}")]
ResponseBytes(azure_core::StreamError),
#[error("Failed to deserialize response: {0}, body: {1:?}")]
Deserialize(serde_json::Error, bytes::Bytes),
}
#[derive(Clone)]
pub struct Builder {
pub(crate) client: super::super::Client,
pub(crate) subscription_id: String,
}
impl Builder {
pub fn into_future(self) -> futures::future::BoxFuture<'static, std::result::Result<models::CanonicalSupportPlanInfo, Error>> {
Box::pin(async move {
let url_str = &format!(
"{}/subscriptions/{}/providers/Microsoft.Addons/supportProviders/canonical/listSupportPlanInfo",
self.client.endpoint(),
&self.subscription_id
);
let mut url = url::Url::parse(url_str).map_err(Error::ParseUrl)?;
let mut req_builder = http::request::Builder::new();
req_builder = req_builder.method(http::Method::POST);
let credential = self.client.token_credential();
let token_response = credential
.get_token(&self.client.scopes().join(" "))
.await
.map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY);
req_builder = req_builder.header(http::header::CONTENT_LENGTH, 0);
req_builder = req_builder.uri(url.as_str());
let req = req_builder.body(req_body).map_err(Error::BuildRequest)?;
let rsp = self.client.send(req).await.map_err(Error::SendRequest)?;
let (rsp_status, rsp_headers, rsp_stream) = rsp.deconstruct();
match rsp_status {
http::StatusCode::OK => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::CanonicalSupportPlanInfo =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Ok(rsp_value)
}
http::StatusCode::NOT_FOUND => Err(Error::NotFound404 {}),
status_code => {
let rsp_body = azure_core::collect_pinned_stream(rsp_stream).await.map_err(Error::ResponseBytes)?;
let rsp_value: models::ErrorDefinition =
serde_json::from_slice(&rsp_body).map_err(|source| Error::Deserialize(source, rsp_body.clone()))?;
Err(Error::DefaultResponse {
status_code,
value: rsp_value,
})
}
}
})
}
}
}
} | .map_err(Error::GetToken)?;
req_builder = req_builder.header(http::header::AUTHORIZATION, format!("Bearer {}", token_response.token.secret()));
url.query_pairs_mut().append_pair("api-version", super::API_VERSION);
let req_body = bytes::Bytes::from_static(azure_core::EMPTY_BODY); |
configurationsListByServerSample.js | /*
* Copyright (c) Microsoft Corporation.
* Licensed under the MIT License.
*
* Code generated by Microsoft (R) AutoRest Code Generator.
* Changes may cause incorrect behavior and will be lost if the code is regenerated.
*/
// Copyright (c) Microsoft Corporation.
// Licensed under the MIT License.
const { PostgreSQLManagementFlexibleServerClient } = require("@azure/arm-postgresql-flexible");
const { DefaultAzureCredential } = require("@azure/identity");
/**
* This sample demonstrates how to List all the configurations in a given server.
*
* @summary List all the configurations in a given server.
* x-ms-original-file: specification/postgresql/resource-manager/Microsoft.DBforPostgreSQL/stable/2021-06-01/examples/ConfigurationListByServer.json
*/
async function | () {
const subscriptionId = "ffffffff-ffff-ffff-ffff-ffffffffffff";
const resourceGroupName = "testrg";
const serverName = "testserver";
const credential = new DefaultAzureCredential();
const client = new PostgreSQLManagementFlexibleServerClient(credential, subscriptionId);
const resArray = new Array();
for await (let item of client.configurations.listByServer(resourceGroupName, serverName)) {
resArray.push(item);
}
console.log(resArray);
}
configurationList().catch(console.error);
| configurationList |
hashsum.js | // Based on https://github.com/remko/gulp-hashsum/blob/master/index.js
'use strict';
const crypto = require('crypto');
const gutil = require('gulp-util');
const _ = require('lodash');
const mkdirp = require('mkdirp');
const slash = require('slash');
const through = require('through');
const fs = require('fs');
const path = require('path');
module.exports = function hashsum(options) {
options = _.defaults(options || {}, {
filename: 'static-hashes.properties',
dest: process.cwd(),
hash: 'md5',
force: false,
delimiter: '=',
maxLength: 12,
salt: 'a',
});
const hashesFilePath = path.resolve(options.dest, options.filename);
let hashes = {};
const base = options.base || path.dirname(hashesFilePath);
function | (file) {
if (file.isNull()) {
return;
}
if (file.isStream()) {
this.emit('error', new gutil.PluginError('gulp-hashsum', 'Streams not supported'));
return;
}
const filePath = path.resolve(options.base || options.dest, file.path);
let hash =
crypto.createHash(options.hash)
.update(file.contents, 'binary')
.update(options.salt)
.digest('hex');
hash = parseInt(hash, 16);
if (hash < 0) hash = -hash;
hash = hash.toPrecision(21).replace(/[^\d]/g, '');
if (options.maxLength && hash.length > options.maxLength) {
hash = hash.substring(0, options.maxLength);
}
hashes[slash(path.relative(base, filePath))] = hash;
this.push(file);
}
function writeSums() {
const lines = _.keys(hashes).sort().map(function (key) {
return key + options.delimiter + hashes[key] + '\n';
});
const contents = lines.join('');
const data = new Buffer(contents);
if (options.force || !fs.existsSync(hashesFilePath) || Buffer.compare(fs.readFileSync(hashesFilePath), data) !== 0) {
mkdirp.sync(path.dirname(hashesFilePath));
fs.writeFileSync(hashesFilePath, data);
}
this.emit('end');
}
return through(processFile, writeSums);
} | processFile |
analyze.py | from numpy.core.fromnumeric import reshape
import torch
import numpy as np
import pickle
from itertools import combinations, permutations
from sklearn.decomposition import PCA
from sklearn.manifold import MDS, TSNE
from scipy.stats import pearsonr, ttest_ind
import statsmodels.api as sm
from dataset import get_loaders, WineGrid
def analyze_episodic(model, test_data, args):
# Collect attention weights for each sample in test set
model.eval()
m, x_ = test_data[0] # only 1 episode in test data
m = m.to(args.device) # m: [1, n_train, sample_dim]
x = x_[:,:,:-1].to(args.device) # x: [1, n_test, sample_dim]
y = x_[:,:,-1].type(torch.long).to(args.device)
y = y.squeeze() # y: [1, n_test]
with torch.no_grad():
y_hat, attention = model(x, m)
attention = attention[0] # first (only) memory layer
attention = np.squeeze(attention)
# attention: [n_train, n_test]
# Check the retrieval weights of relevant vs. irrelevant training samples
grid = test_data.grid
train = grid.train # train *samples* in test *episode*
test = grid.test # test *samples* in test *episode*
n_train = len(train)
n_test = len(test)
rel_ids = grid.hub_sample_ids # relevant memory ids (train samples)
attn_ranks = np.zeros_like(attention)
for i in range(n_test):
argsorted_attn = np.argsort(attention[i])
ranks = np.zeros([n_train])
ranks[argsorted_attn] = np.arange(n_train)
attn_ranks[i] = ranks
relevant = []
irrelevant = []
for i in range(n_test):
for j in range(n_train):
if j in rel_ids[i]:
relevant.append(attn_ranks[i,j])
else:
irrelevant.append(attn_ranks[i,j])
rank_data = {"relevant": relevant, "irrelevant": irrelevant}
# Check how often a legitimate "path" was retrieved in the top 5%
k = 8 # top k memories with highest weights (k = 8 means 5 percent)
used_hub = []
for i in range(n_test):
highest_attn = np.argsort(attention[i])[-k:]
test_f1, test_f2, test_ctx, test_y = test[i]
# Get relevant hubs for current test sample
hubs = []
for rel_id in rel_ids[i]:
train_sample = train[rel_id]
train_f1, train_f2 = train_sample[0], train_sample[1]
if train_f1 in [test_f1, test_f2]:
hubs.append(train_f2)
if train_f2 in [test_f1, test_f2]:
hubs.append(train_f1)
hubs = list(set(hubs))
hubs_dict = {h:[] for h in hubs}
assert len(hubs) == 2, "shouldn't be more than 2 hubs?"
# Check if one of the hubs appears with f1 and f2
attended_train = [train[idx] for idx in highest_attn]
for sample in attended_train:
train_f1, train_f2, train_ctx, train_y = sample
if train_ctx != test_ctx:
continue # must be samples testing the same axis to be relevant
if hubs[0] == train_f1:
hubs_dict[hubs[0]].append(sample[1])
if hubs[1] == sample[0]:
hubs_dict[hubs[1]].append(sample[1])
if hubs[0] == sample[1]:
hubs_dict[hubs[0]].append(sample[0])
if hubs[1] == sample[1]:
hubs_dict[hubs[1]].append(sample[0])
if test_f1 in hubs_dict[hubs[0]] and test_f2 in hubs_dict[hubs[0]]:
used_hub.append(True)
elif test_f1 in hubs_dict[hubs[1]] and test_f2 in hubs_dict[hubs[1]]:
used_hub.append(True)
else:
used_hub.append(False)
p_used_hub = np.mean(used_hub)
print("Proportion that episodic system retrieved a hub path:", p_used_hub)
results = {"rank_data":rank_data, "p_used_hub": p_used_hub}
return results
def analyze_cortical(model, test_data, analyze_loader, args):
# Useful dictionaries from test dataset
n_states = test_data.n_states
loc2idx = test_data.loc2idx
idx2loc = {idx:loc for loc, idx in loc2idx.items()}
idxs = [idx for idx in range(n_states)]
# locs = [idx2loc[idx] for idx in idxs]
idx2tensor = test_data.idx2tensor
model.eval()
# Get embeddings from model for each face
face_embedding = model.face_embedding
face_embedding.to(args.device)
embeddings = []
# Get hiddens from the recurrent model for each face
# if the model was stepwisemlp
if args.cortical_model=='stepwisemlp':
hiddens = [[] for i in range(2)]
hiddens_cong = [[] for i in range(2)]
hiddens_incong = [[] for i in range(2)]
hiddens_ctxs = [[[] for j in range(args.N_contexts)] for i in range(2)]
else:
hiddens = [] # hidden reps. for both contexts
hiddens_incong = []
hiddens_cong = []
hiddens_ctxs = [[] for i in range(args.N_contexts)]
idxs1 = []
idxs2 = []
idxs1_ctxs = [[] for i in range(args.N_contexts)]
idxs2_ctxs = [[] for i in range(args.N_contexts)]
samples = []
samples_ctxs = [[] for i in range(args.N_contexts)]
samples_cong = []
samples_incong = []
with torch.no_grad():
for idx in range(n_states):
face_tensor = idx2tensor[idx].unsqueeze(0).to(args.device)
embedding = face_embedding(face_tensor) # [1, state_dim]
embedding = embedding.cpu().numpy()
embeddings.append(embedding)
embeddings = np.concatenate(embeddings, axis=0) # [n_states, state_dim]
for batch in analyze_loader:
if args.cortical_task == 'face_task':
f1, f2, ctx, out, idx1, idx2 = batch
elif args.cortical_task == 'wine_task':
f1, f2, ctx, out1, out2, idx1, idx2 = batch
idx1 = idx1[0]
idx2 = idx2[0]
samples.append(batch)
(x1, y1), (x2, y2) = idx2loc[idx1], idx2loc[idx2]
f1 = f1.to(args.device)
f2 = f2.to(args.device)
ctx = ctx.to(args.device)
# create congruent and incongruent groups
grid_angle = np.arctan2((y2-y1),(x2-x1))
phi = np.sin(2*grid_angle)
if np.abs(phi)<1e-5:
# for congrunet trials,
# zero out those very close to zero angles
# so it won't turn into 1 or -1 by sign
cong = 0
else:
cong = np.sign(phi) # 1: congruent, -1:incongruent, 0:none
# get the hidden reps.
y_hat, out = model(f1, f2, ctx)
# y_hat: [1, 2]
# rnn_out: [seq_length, 1, hidden_dim]: [3, 1, 128]
# mlp_out: [1, hidden_dim]: [1, 128]
if args.order_ctx == 'first':
f1_ind = 1
f2_ind = 2
elif args.order_ctx == 'last':
f1_ind = 0
f2_ind = 1
if args.cortical_model=='stepwisemlp':
out1, out2 = out
out1 = out1.cpu().numpy()
out2 = out2.cpu().numpy()
hiddens[0].append(out1)
hiddens[1].append(out2)
hiddens_ctxs[0][ctx].append(out1)
hiddens_ctxs[1][ctx].append(out2)
else:
out = out.cpu().numpy()
hiddens.append(out)
hiddens_ctxs[ctx].append(out)
ctx = ctx[0].cpu().numpy()
idxs1.append(idx1)
idxs2.append(idx2)
idxs1_ctxs[ctx].append(idx1)
idxs2_ctxs[ctx].append(idx2)
samples_ctxs[ctx].append(batch)
if ((cong==1) and ((ctx==0) or (ctx==1))):
if args.cortical_model=='stepwisemlp':
hiddens_cong[0].append(out1)
hiddens_cong[1].append(out2)
else:
hiddens_cong.append(out)
samples_cong.append(batch)
elif ((cong==-1) and ((ctx==0) or (ctx==1))):
if args.cortical_model=='stepwisemlp':
hiddens_incong[0].append(out1)
hiddens_incong[1].append(out2)
else:
hiddens_incong.append(out)
samples_incong.append(batch)
hiddens = np.asarray(hiddens).squeeze()
# for n_ctx=2, data_len = 16*12*2=384 (n_states:16, n_states-ties:12, permutation:2)
# rnn hiddens: [data_len, seq_length, hidden_dim] : [384, 3, 128]
# mlp hiddens: [data_len, hidden_dim]: [384, 128]
# stepwisemlp hiddens: [num_hidds, data_len, hidden_dim]: [2, 384, 128]
# with diagonals - wine task = data_len = (n_ctx-n_diag)*192+n_diag*212
# [n_ctx:2, data_len:384], [n_ctx:4, data_len:768], [n_ctx:8, data_len: 1616]
hiddens_incong = np.asarray(hiddens_incong).squeeze()
hiddens_cong = np.asarray(hiddens_cong).squeeze()
# rnn hiddens_cong/incong: [144, 3, 128]
# mlp hiddens_cong/incong: [144, 128]
# stepwise mlp hiddens_cong/incong: [2, 144, 128]
# hiddens_ctx: even tho it is 384, but it is ordered based on the contexts
if args.cortical_model=='stepwisemlp':
hiddens_ctx = np.concatenate(np.asarray(hiddens_ctxs).squeeze(), axis=1)
# hiddens_ctxs: [n_hidds=2, n_ctx, 192, 1, 128]
# hiddens_ctx: [n_hidds=2, 384, 128]
hiddens_inc_c = np.concatenate((hiddens_incong, hiddens_cong), axis=1)
# hiddens_inc_c: [n_hidds, 384-ties, 128]: [2, 288, 128]
else:
hiddens_ctx = np.concatenate(hiddens_ctxs, axis = 0).squeeze()
# mlp hiddens_ctxs: [n_ctx, 192, 1, 128]
# rnn hiddens_ctxs: [n_ctx, n_trials=192, 3, 1, 128]
# rnn hiddens_ctx: [384, 3, 128]
# mlp hiddens_ctx: [384, 128]
hiddens_inc_c = np.concatenate((hiddens_incong, hiddens_cong), axis=0)
# rnn hiddens_inc_c: [384-ties, seq_length, 128]: [288, 3, 128]
# mlp hiddens_inc_c: [384-ties, 128]: [288, 128]
if ((args.cortical_model=='rnn') or (args.cortical_model=='rnncell')):
hiddens_ctx = hiddens_ctx[:, -1, :] # [384, 128]
hiddens_inc_c = hiddens_inc_c[:, -1, :] #[288, 128]
samples_inc_c = np.concatenate((samples_incong, samples_cong), axis=0)
if args.cortical_model=='stepwisemlp':
avg_hidden = np.zeros([2, n_states, hiddens.shape[-1]])
avg_hidden_ctxs = np.zeros([2, args.N_contexts, n_states, hiddens.shape[-1]])
else:
avg_hidden = np.zeros([n_states, hiddens.shape[-1]])
avg_hidden_ctxs = np.zeros([args.N_contexts, n_states, hiddens.shape[-1]])
if ((args.cortical_model=='rnn') or (args.cortical_model=='rnncell')):
hiddens_ctxs = np.asarray(hiddens_ctxs).squeeze() # [n_ctx, n_tirals=192, seq_len=3, hidd_dim=128]
# Take average for each face based on its location
for f in range(n_states):
temp1 = [np.expand_dims(hiddens[i,f1_ind,:], axis=0)
for i, idx1 in enumerate(idxs1) if idx1==f]
temp2 = [np.expand_dims(hiddens[i,f2_ind,:], axis=0)
for i, idx2 in enumerate(idxs2) if idx2==f]
if len(temp1 + temp2)>1:
avg_hidden[f] = np.concatenate(temp1 + temp2, axis=0).mean(axis=0)
for ctx in range(args.N_contexts):
temp1_ctxs = [hiddens_ctxs[ctx,i,f1_ind,:]
for i, idx1 in enumerate(idxs1_ctxs[ctx]) if idx1==f]
temp2_ctxs = [hiddens_ctxs[ctx,i,f2_ind,:]
for i, idx2 in enumerate(idxs2_ctxs[ctx]) if idx2==f]
if len(temp1_ctxs + temp2_ctxs)>1:
m = np.zeros([2,hiddens_ctxs.shape[-1]])
m[0] = np.mean(np.asarray(temp1_ctxs), axis=0)
m[1] = np.mean(np.asarray(temp2_ctxs), axis=0)
avg_hidden_ctxs[ctx, f, :] = np.mean(m, axis=0)
# avg_hidden_ctxs[ctx, f, :] = np.concatenate(temp1_ctxs + temp2_ctxs, axis=0).mean(axis=0)
# avg_hidden_ctxs: [n_ctx, n_states, hidden_dim]: [2, 16, 128]
avg_hidden_ctx = np.concatenate(avg_hidden_ctxs, axis=0)
elif args.cortical_model in ['mlp', 'mlp_cc']:
for f in range(n_states):
temp = [hiddens[i,:]
for i, (idx1, idx2) in enumerate(zip(idxs1, idxs2))
if ((idx1==f) | (idx2==f))]
if len(temp)>1:
avg_hidden[f] = np.mean(temp, axis=0)
for ctx in range(args.N_contexts):
temp_ctxs = [hiddens_ctxs[ctx][i]
for i, (idx1, idx2) in enumerate(zip(idxs1_ctxs[ctx], idxs2_ctxs[ctx]))
if ((idx1==f) | (idx2==f))]
if len(temp_ctxs)>1:
avg_hidden_ctxs[ctx, f, :] = np.mean(temp_ctxs, axis=0)
# avg_hidden_ctxs: [n_contexts, n_states, hidden_dim]: [2, 16, 128]
avg_hidden_ctx = np.concatenate(avg_hidden_ctxs, axis=0)
elif args.cortical_model=='stepwisemlp':
# todo: how to do the averaging? over both hidden reps?
# hiddens_ctxs anf hiddens_inc_c for the pca results should have two dimensions,
hiddens_ctxs = np.asarray(hiddens_ctxs).squeeze()
for f in range(n_states):
temp1 = [hiddens[0,i,:]
for i, idx1 in enumerate(idxs1) if idx1==f]
temp2 = [hiddens[1,i,:]
for i, idx2 in enumerate(idxs2) if idx2==f]
if len(temp1)>1:
avg_hidden[0,f,:] = np.mean(temp1, axis=0)
if len(temp2)>1:
avg_hidden[1,f,:] = np.mean(temp2, axis=0)
# avg_hidden: [n_hidd, n_states, hidd_dim]: [2,16,128]
for ctx in range(args.N_contexts):
temp1_ctxs = [hiddens_ctxs[0,ctx,i,:]
for i, idx1 in enumerate(idxs1_ctxs[ctx]) if idx1==f]
temp2_ctxs = [hiddens_ctxs[1,ctx,i,:]
for i, idx2 in enumerate(idxs2_ctxs[ctx]) if idx2==f]
if len(temp1_ctxs)>1:
avg_hidden_ctxs[0,ctx,f,:] = np.mean(temp1_ctxs, axis=0)
if len(temp2_ctxs)>1:
avg_hidden_ctxs[1,ctx,f,:] = np.mean(temp2_ctxs, axis=0)
# avg_hidden_ctxs: [n_hidd, n_contexts, n_states, hidden_dim]: [2, 2, 16, 128]
avg_hidden_ctx = np.concatenate(avg_hidden_ctxs, axis=1)
samples_res = {'samples': samples,
'samples_ctxs': samples_ctxs,
'samples_inc_c': samples_inc_c}
results = {'samples_res':samples_res,
'idxs1': idxs1, 'idxs2': idxs2,
'embeddings': embeddings, # [16, 32]
'hiddens_ctx':hiddens_ctx, # mlp/rnn: [384,128] or in stepwisedmlp: [2,384,128]
'hiddens_ctxs':hiddens_ctxs, # mlp: [n_ctx, 192, 1, 128], rnn: [n_ctx, 192, 3, 128]
'avg_hidden':avg_hidden, # [16, 128] or [n_hidd=2, 16, 128]
'avg_hidden_ctx':avg_hidden_ctx, # mlp/rnn: [32, 128] or stepwisedmlp: [n_hidd=2, 32, 128]
# the reaosn to have these is because the concat for each model is diff and want to deal with it here
'avg_hidden_ctxs':avg_hidden_ctxs, # [mlp/rnn: n_ctx, 16, 128] or stepwisedmlp: [n_hidd=2, n_ctx, 16, 128]
'hiddens_inc_c': hiddens_inc_c} # mlp/rnn: [288, 128] or stepwisedmlp: [n_hidd=2, 288, 128]
return results
def analyze_accs(args, test_data, cortical_result, dist_results):
resutls = {'train_acc': cortical_result['train_acc'],
'test_acc': cortical_result['test_acc'],
'cong_train_acc': cortical_result['cong_train_acc'],
'incong_train_acc': cortical_result['incong_train_acc'],
'cong_test_acc': cortical_result['cong_test_acc'],
'incong_test_acc': cortical_result['incong_test_acc']}
return resutls
# cortical_analyze_acc = cortical_result['analyze_acc']
# cortical_analyze_correct = cortical_result['analyze_correct']
def analyze_credit_assignment(args, test_data, cortical_result, dist_results):
resutls = {'grad_ctx': cortical_result['grad_ctx'],
'grad_f1': cortical_result['grad_f1'],
'grad_f2': cortical_result['grad_f2'],
'grad_ctx_cong': cortical_result['grad_ctx_cong'],
'grad_f1_cong': cortical_result['grad_f1_cong'],
'grad_f2_cong': cortical_result['grad_f2_cong'],
'grad_ctx_incong': cortical_result['grad_ctx_incong'],
'grad_f1_incong': cortical_result['grad_f1_incong'],
'grad_f2_incong': cortical_result['grad_f2_incong']
}
return resutls
def proportions(args, test_data, cortical_result, dist_results):
hiddens_ctxs = cortical_result['hiddens_ctxs'] # list of len [n_ctx]
hiddens_ctxs = [np.concatenate(h, axis=0) for h in hiddens_ctxs] # list of len [n_ctx] each has either [192,128] or [224,128]
# when n_ctx=8, we have diff number of ties, therefore,
# in the first 4 contexts we have [192, 128], and in
# the second 4 contexts (diagonals) we have [224, 128]
# that is why we go over each of the hiddens in hiddens_ctxs
# and then concat them to create [n_trials, hidden_dim] for each
ps = []
p_pies = []
for h in hiddens_ctxs: # h: [n_trials, hidden_dim]
p_pies.append(np.any(h>0, axis=0)) # list of len [n_ctx], each shape [128,]
ps.append(np.mean(h>0, axis=0)) # [n_ctx, 128]
ps = np.asarray(ps)
# ps: [n_ctx, 128]
# avg num of the trials that were active for each unit, and for each context
s = np.sum(ps, axis=0, keepdims=True)
# s: [1, hidden_dim], overall activity of each hidden unit,
# if that unit was active at all, over all trials (regardless of the context)
n = ps / s
# n: [n_ctx, hidden_dim]
# normalized - how much each unit is active for each ctx over trials
# normalized by the overall activity of that unit for all ctx and trials
# f = n > threshold
# there are some NaNs
prop_results = {'hiddens_ctxs': hiddens_ctxs,
'p_pies': p_pies, # which trials are active for each hidden unit,
'ps': ps, # on average, how many trials were active for each hidden unit
'n': n}
return prop_results
def calc_dist_ctx(args, test_data, cortical_result, dist_results):
N_contexts = 2 #ToDo: for now it works only for x and y, because of the angles
# Useful dictionaries from test dataset
n_states = test_data.n_states
loc2idx = test_data.loc2idx
idx2loc = {idx:loc for loc, idx in loc2idx.items()}
idxs = [idx for idx in range(n_states)]
N_contexts = args.N_contexts
N_responses = args.N_responses
avg_hidden_ctxs = cortical_result['avg_hidden_ctxs'] # [2, 16, 128]
# Correlation
grid_dists = []
hidd_dists_ctxs = [[] for i in range(N_contexts)]
grid_1ds_ctxs = [[] for i in range(N_contexts)]
grid_angles = []
samples = []
for idx1, idx2 in combinations(idxs, 2):
(x1, y1), (x2, y2) = idx2loc[idx1], idx2loc[idx2]
samples.append((idx1, idx2))
grid_dist = np.sqrt((x1-x2)**2 + (y1-y2)**2)
grid_dists.append(grid_dist)
for ctx in range(N_contexts):
# Euclidean distance between hidden reps. in context ctx
if args.cortical_model=='stepwisemlp':
hidd_dist = np.zeros([2])
hidd1, hidd2 = avg_hidden_ctxs[0,ctx,idx1,:], avg_hidden_ctxs[0,ctx,idx2,:]
hidd_dist[0] = np.linalg.norm(hidd1 - hidd2)
hidd1, hidd2 = avg_hidden_ctxs[1,ctx,idx1,:], avg_hidden_ctxs[1,ctx,idx2,:]
hidd_dist[1] = np.linalg.norm(hidd1 - hidd2)
else:
hidd1, hidd2 = avg_hidden_ctxs[ctx][idx1], avg_hidden_ctxs[ctx][idx2]
hidd_dist = np.linalg.norm(hidd1 - hidd2)
hidd_dists_ctxs[ctx].append(hidd_dist)
# 1D rank - Manhattan distance
loc1 = [x1, y1]
loc2 = [x2, y2]
winegrid = WineGrid(N_responses, N_contexts)
r1, r2 = winegrid.ctx_to_r(ctx, loc1, loc2)
grid_1ds_ctxs[ctx].append(np.abs(r1-r2))
# create on and off diagonal groups
grid_angle = np.arctan2((y2-y1),(x2-x1))
grid_angles.append(grid_angle)
grid_dists = np.array(grid_dists) # [(n_states*(nstates-1))/2]: [120]
grid_angles = np.array(grid_angles) # [120]
samples = np.array(samples)
hidd_dists_ctxs = np.array(hidd_dists_ctxs) # [n_ctx, sampels, n_hidds]: in mlp: [2,120], in stepwisemlp: [2,120,2]
phi = np.sin(2*grid_angles)
binary_phi = np.sign(phi)
for i, p in enumerate(phi):
if np.abs(p)<1e-5:
binary_phi[i] = 0
angle_results = {'grid_angles': grid_angles,
'phi': phi,
'binary_phi': binary_phi}
dist_results = {'samples': samples,
'hidd_dists_ctxs': hidd_dists_ctxs,
'grid_1ds_ctxs': grid_1ds_ctxs,
'grid_dists': grid_dists,
'angle_results': angle_results}
return dist_results
def calc_dist(args, test_data, cortical_result, dist_results=None):
# Useful dictionaries from test dataset
n_states = test_data.n_states
loc2idx = test_data.loc2idx
idx2loc = {idx:loc for loc, idx in loc2idx.items()}
idxs = [idx for idx in range(n_states)]
# Correlation
grid_dists = []
cong_grid_dists = []
incong_grid_dists = []
embed_dists = []
hidd_dists = []
cong_hidd_dists = []
incong_hidd_dists = []
cong_embed_dists = []
incong_embed_dists = []
grid_angles = []
cong_grid_angles = []
incong_grid_angles = []
samples = []
embeddings = cortical_result['embeddings']
avg_hidden = cortical_result['avg_hidden'] # [16, 128]
for idx1, idx2 in combinations(idxs, 2):
(x1, y1), (x2, y2) = idx2loc[idx1], idx2loc[idx2]
samples.append((idx1, idx2))
grid_dist = np.sqrt((x1-x2)**2 + (y1-y2)**2)
grid_dists.append(grid_dist)
# Euclidean distance between embeddings
emb1, emb2 = embeddings[idx1], embeddings[idx2]
embed_dist = np.linalg.norm(emb1 - emb2)
embed_dists.append(embed_dist)
# Euclidean distance between hidden reps.
if args.cortical_model=='stepwisemlp':
hidd_dist = np.zeros([2])
hidd1, hidd2 = avg_hidden[0,idx1], avg_hidden[0,idx2]
hidd_dist[0] = np.linalg.norm(hidd1 - hidd2)
hidd1, hidd2 = avg_hidden[1,idx1], avg_hidden[1,idx2]
hidd_dist[1] = np.linalg.norm(hidd1 - hidd2)
else:
hidd1, hidd2 = avg_hidden[idx1], avg_hidden[idx2]
hidd_dist = np.linalg.norm(hidd1 - hidd2)
hidd_dists.append(hidd_dist)
# create on and off diagonal groups
grid_angle = np.arctan2((y2-y1),(x2-x1))
grid_angles.append(grid_angle)
phi = np.sin(2*grid_angle)
if np.abs(phi)<1e-5:
# for congrunet trials,
# zero out those very close to zero angles
# so it won't turn into 1 or -1 by sign
cong = 0
else:
cong = np.sign(phi) # 1: congruent, -1:incongruent, 0:none
if cong==1:
cong_hidd_dists.append(hidd_dist)
cong_grid_dists.append(grid_dist)
cong_embed_dists.append(embed_dist)
cong_grid_angles.append(grid_angle)
if cong==-1:
incong_hidd_dists.append(hidd_dist)
incong_grid_dists.append(grid_dist)
incong_embed_dists.append(embed_dist)
incong_grid_angles.append(grid_angle)
grid_dists = np.array(grid_dists) # [(n_states*(nstates-1))/2]: [120]
embed_dists = np.array(embed_dists)
hidd_dists = np.array(hidd_dists)
cong_grid_dists = np.array(cong_grid_dists) # [36]
incong_grid_dists = np.array(incong_grid_dists) # [36]
cong_hidd_dists = np.array(cong_hidd_dists)
incong_hidd_dists = np.array(incong_hidd_dists)
cong_embed_dists = np.array(cong_embed_dists)
incong_embed_dists = np.array(incong_embed_dists)
grid_angles = np.array(grid_angles) # [120]
cong_grid_angles = np.array(cong_grid_angles) # [36]
incong_grid_angles = np.array(incong_grid_angles) # [36]
samples = np.array(samples)
phi = np.sin(2*grid_angles)
binary_phi = np.sign(phi)
for i, p in enumerate(phi):
if np.abs(p)<1e-5:
binary_phi[i] = 0
cong_dist_results = {'cong_grid_dists': cong_grid_dists,
'cong_hidd_dists': cong_hidd_dists,
'cong_embed_dists': cong_embed_dists}
incong_dist_results = {'incong_grid_dists': incong_grid_dists,
'incong_hidd_dists': incong_hidd_dists,
'incong_embed_dists': incong_embed_dists}
angle_results = {'grid_angles': grid_angles,
'cong_grid_angles': cong_grid_angles,
'incong_grid_angles': incong_grid_angles,
'phi': phi,
'binary_phi': binary_phi}
dist_results = {'samples': samples,
'grid_dists': grid_dists,
'embed_dists': embed_dists,
'hidd_dists':hidd_dists,
'cong_dist_results': cong_dist_results,
'incong_dist_results': incong_dist_results,
'angle_results': angle_results}
return dist_results
def analyze_dim_red(args, test_data, cortical_result, dist_results, n_components=2):
method = args.dimred_method
n_states = test_data.n_states
loc2idx = test_data.loc2idx
idx2loc = {idx:loc for loc, idx in loc2idx.items()}
idxs = [idx for idx in range(n_states)]
locs = [idx2loc[idx] for idx in idxs]
embeddings = cortical_result['embeddings'] # [16, 32]
hiddens_ctx = cortical_result['hiddens_ctx'] # [384, 128] or in stepwisemlp: [2,384,128]
avg_hidden = cortical_result['avg_hidden'] # [16, 128] or in stepwisemlp: [2,16,128]
avg_hidden_ctx = cortical_result['avg_hidden_ctx'] # [32, 128] or in stepwisemlp: [2,32,128]
hiddens_inc_c = cortical_result['hiddens_inc_c'] # [288, 128] or in stepwisemlp: [2,288,128]
# hiddens_ctx = np.asarray(hiddens_ctxs)
# hiddens_ctxs = np.concatenate(hiddens_ctxs, axis=0).squeeze() # [384, 128] or [384, 3, 128]
# if ((args.cortical_model == 'rnn') or (args.cortical_model == 'rnncell')):
# hiddens_ctx = hiddens_ctx[:,-1, :]
# avg_hidden_ctxs = np.concatenate(avg_hidden_ctxs, axis=0) # [32, 128]
results = {}
# PCA
if method == 'pca':
pca = PCA(n_components=n_components)
pca_2d_embed = pca.fit_transform(embeddings)
if args.cortical_model=='stepwisemlp':
pca_2d_hidd = np.zeros([hiddens_ctx.shape[0], hiddens_ctx.shape[1], n_components])
pca_2d_avg_hidd = np.zeros([avg_hidden.shape[0], avg_hidden.shape[1], n_components])
pca_2d_ctx_hidd = np.zeros([avg_hidden_ctx.shape[0], avg_hidden_ctx.shape[1], n_components])
pca_2d_incong_cong = np.zeros([hiddens_inc_c.shape[0], hiddens_inc_c.shape[1], n_components])
for h in range(hiddens_ctx.shape[0]):
pca_2d_hidd[h,:,:] = pca.fit_transform(hiddens_ctx[h,:,:]) # this is all the hiddens, no averaging for each face
pca_2d_avg_hidd[h,:,:] = pca.fit_transform(avg_hidden[h,:,:])
pca_2d_ctx_hidd[h,:,:] = pca.fit_transform(avg_hidden_ctx[h,:,:])
pca_2d_incong_cong[h,:,:] = pca.fit_transform(hiddens_inc_c[h,:,:])
else:
pca_2d_hidd = pca.fit_transform(hiddens_ctx) # this is all the hiddens, no averaging for each face
pca_2d_avg_hidd = pca.fit_transform(avg_hidden) # I might need to save this at all
pca_2d_ctx_hidd = pca.fit_transform(avg_hidden_ctx)
pca_2d_incong_cong = pca.fit_transform(hiddens_inc_c)
results = {'embed_2d': pca_2d_embed,
'hidd_2d': pca_2d_hidd,
'avg_hidd_2d': pca_2d_avg_hidd,
'ctx_hidd_2d': pca_2d_ctx_hidd,
'incong_cong_2d': pca_2d_incong_cong,
'grid_locations': locs,
'samples_res': cortical_result['samples_res']}
elif method == 'mds':
# MDS
mds = MDS(n_components=n_components)
mds_2d_embed = mds.fit_transform(embeddings)
mds_2d_hidd = mds.fit_transform(hiddens_ctx) # this is all the hiddens, no averaging for each face
mds_2d_avg_hidd = mds.fit_transform(avg_hidden) # I might need to save this at all
mds_2d_ctx_hidd = mds.fit_transform(avg_hidden_ctx)
mds_2d_incong_cong = mds.fit_transform(hiddens_inc_c)
results = {'embed_2d': mds_2d_embed,
'hidd_2d': mds_2d_hidd,
'avg_hidd_2d': mds_2d_avg_hidd,
'ctx_hidd_2d': mds_2d_ctx_hidd,
'incong_cong_2d': mds_2d_incong_cong}
elif method == 'tsne':
# tSNE
tsne = TSNE(n_components=n_components)
tsne_2d_embed = tsne.fit_transform(embeddings)
tsne_2d_hidd = tsne.fit_transform(hiddens_ctx) # this is all the hiddens, no averaging for each face
tsne_2d_avg_hidd = tsne.fit_transform(avg_hidden) # I might need to save this at all
tsne_2d_ctx_hidd = tsne.fit_transform(avg_hidden_ctx)
tsne_2d_incong_cong = tsne.fit_transform(hiddens_inc_c)
results = {'embed_2d': tsne_2d_embed,
'hidd_2d': tsne_2d_hidd,
'avg_hidd_2d': tsne_2d_avg_hidd,
'ctx_hidd_2d': tsne_2d_ctx_hidd,
'incong_cong_2d': tsne_2d_incong_cong}
return results
def hist_data(args, test_data, cortical_result, dist_results):
# embeddings
cong_embed_dists = dist_results['cong_dist_results']['cong_embed_dists']
incong_embed_dists = dist_results['incong_dist_results']['incong_embed_dists']
# hiddens
cong_hidd_dists = dist_results['cong_dist_results']['cong_hidd_dists']
incong_hidd_dists = dist_results['incong_dist_results']['incong_hidd_dists']
dist_c_inc_results = {'cong_embed_dist': cong_embed_dists,
'incong_embed_dist': incong_embed_dists,
'cong_hidd_dist': cong_hidd_dists,
'incong_hidd_dist': incong_hidd_dists}
return dist_c_inc_results
def calc_ratio(args, test_data, cortical_result, dist_results):
# embeddings
cong_embed_dists = dist_results['cong_dist_results']['cong_embed_dists']
incong_embed_dists = dist_results['incong_dist_results']['incong_embed_dists']
avg_cong_embed = np.mean(cong_embed_dists)
avg_incong_embed = np.mean(incong_embed_dists)
ratio_embed = (avg_cong_embed/avg_incong_embed)
# hiddens
cong_hidd_dists = dist_results['cong_dist_results']['cong_hidd_dists']
incong_hidd_dists = dist_results['incong_dist_results']['incong_hidd_dists']
avg_cong_hidd = np.mean(cong_hidd_dists, axis=0)
avg_incong_hidd = np.mean(incong_hidd_dists, axis=0)
# ratio_hidd = (avg_cong_hidd/avg_incong_hidd)
ratio_hidd = (avg_incong_hidd/avg_cong_hidd)
ratio_results = {'ratio_embed': ratio_embed, 'ratio_hidd': ratio_hidd,\
'avg_cong_hidd': avg_cong_hidd, 'avg_incong_hidd': avg_incong_hidd}
return ratio_results
def extract_hidd_dist(dist_results):
# hiddens
cong_hidd_dists = dist_results['cong_dist_results']['cong_hidd_dists']
incong_hidd_dists = dist_results['incong_dist_results']['incong_hidd_dists']
dist_result_hidd = {'cong_hidd_dists': cong_hidd_dists, 'incong_hidd_dists': incong_hidd_dists}
return dist_result_hidd
def analyze_ttest(args, test_data, cortical_result, dist_results):
cong_res = dist_results['cong_dist_results']
incong_res = dist_results['incong_dist_results']
incong_hidd_dists = incong_res['incong_hidd_dists']
cong_hidd_dists = cong_res['cong_hidd_dists']
if args.cortical_model == 'stepwisemlp':
t_hidd, t_p_val_hidd = np.zeros([2]), np.zeros([2])
for h in range(2):
t_hidd[h], t_p_val_hidd[h] = ttest_ind(cong_hidd_dists[:,h], incong_hidd_dists[:,h])
else:
t_hidd, t_p_val_hidd = ttest_ind(cong_res['cong_hidd_dists'],
incong_res['incong_hidd_dists'])
t_embed, t_p_val_embed = ttest_ind(cong_res['cong_embed_dists'],
incong_res['incong_embed_dists'])
t_grid, t_p_val_grid = ttest_ind(cong_res['cong_grid_dists'],
incong_res['incong_grid_dists'])
ttest_results = {'t_stat_hidd':t_hidd, 't_p_val_hidd': t_p_val_hidd,
't_stat_embed':t_embed, 't_p_val_embed': t_p_val_embed,
't_grid':t_grid, 't_p_val_grid': t_p_val_grid}
return ttest_results
def analyze_corr(args, test_data, cortical_result, dist_results):
grid_dists = dist_results['grid_dists']
embed_dists = dist_results['embed_dists']
hidd_dists = dist_results['hidd_dists']
cong_res = dist_results['cong_dist_results']
incong_res = dist_results['incong_dist_results']
r_embed, p_val_embed = pearsonr(grid_dists, embed_dists)
if args.cortical_model == 'stepwisemlp':
r_hidd, p_val_hidd = np.zeros([2]), np.zeros([2])
r_cong_hidd, p_val_cong_hidd, r_incong_hidd, p_val_incong_hidd = \
np.zeros([2]), np.zeros([2]), np.zeros([2]), np.zeros([2])
cong_hidd_dists, incong_hidd_dists = cong_res['cong_hidd_dists'], \
incong_res['incong_hidd_dists']
for h in range(2):
r_hidd[h], p_val_hidd[h] = pearsonr(grid_dists, hidd_dists[:,h])
r_cong_hidd[h], p_val_cong_hidd[h] = pearsonr(cong_res['cong_grid_dists'],
cong_hidd_dists[:,h])
r_incong_hidd[h], p_val_incong_hidd[h] = pearsonr(incong_res['incong_grid_dists'],
incong_hidd_dists[:,h])
else:
r_hidd, p_val_hidd = pearsonr(grid_dists, hidd_dists)
r_cong_hidd, p_val_cong_hidd = pearsonr(cong_res['cong_grid_dists'],
cong_res['cong_hidd_dists'])
r_incong_hidd, p_val_incong_hidd = pearsonr(incong_res['incong_grid_dists'],
incong_res['incong_hidd_dists'])
r_cong_embed, p_val_cong_embed = pearsonr(cong_res['cong_grid_dists'],
cong_res['cong_embed_dists'])
r_incong_embed, p_val_incong_embed = pearsonr(incong_res['incong_grid_dists'],
incong_res['incong_embed_dists'])
corr_results = {'r_embed': r_embed, 'p_val_embed': p_val_embed,
'r_cong_embed': r_cong_embed,
'p_val_cong_embed': p_val_cong_embed,
'r_incong_embed': r_incong_embed,
'p_val_incong_embed': p_val_incong_embed, | 'p_val_incong_hidd': p_val_incong_hidd}
return corr_results
def analyze_regression(args, test_data, cortical_result, dist_results):
hidd_dists = dist_results['hidd_dists']
grid_dists = dist_results['grid_dists']
phi = dist_results['angle_results']['phi']
binary_phi = dist_results['angle_results']['binary_phi']
# prepare data for the regression analysis
x_cat = np.concatenate((grid_dists.reshape((-1,1)), binary_phi.reshape((-1,1))),axis=1)
x_con = np.concatenate((grid_dists.reshape((-1,1)), phi.reshape((-1,1))),axis=1)
# categorical regression analysis
x_cat = sm.add_constant(x_cat)
if args.cortical_model == 'stepwisemlp':
p_val, t_val, param, bse = ([[] for i in range(2)] for i in range(4))
y_hat_E = np.zeros(hidd_dists.shape)
y = np.zeros(hidd_dists.shape)
for h in range(2):
y[:,h] = hidd_dists[:,h]
y_hat_E[:,h], p_val[h], t_val[h], param[h], bse[h] = run_regression(x_cat,y[:,h],grid_dists)
else:
y = hidd_dists
y_hat_E, p_val, t_val, param, bse = run_regression(x_cat,y,grid_dists)
cat_reg = {'p_val': p_val,
't_val': t_val,
'param': param,
'y_hat_E': y_hat_E,
'y': y,
'bse': bse}
# continuous regression analysis
x_con = sm.add_constant(x_con)
if args.cortical_model == 'stepwisemlp':
p_val, t_val, param, bse = ([[] for i in range(2)] for i in range(4))
y_hat_E = np.zeros(hidd_dists.shape)
y = np.zeros(hidd_dists.shape)
for h in range(2):
y[:,h] = hidd_dists[:,h]
y_hat_E[:,h], p_val[h], t_val[h], param[h], bse[h] = run_regression(x_con,y[:,h],grid_dists)
else:
y = hidd_dists
y_hat_E, p_val, t_val, param, bse = run_regression(x_con,y,grid_dists)
con_reg = {'p_val': p_val,
't_val': t_val,
'param': param,
'y_hat_E': y_hat_E,
'y': y,
'bse': bse}
reg_results = {'cat_reg': cat_reg,
'con_reg': con_reg}
return reg_results
def run_regression(x,y,grid_dist):
stats_model = sm.OLS(y,x).fit()
y_hat_E = stats_model.params[0] + (stats_model.params[1]*grid_dist)
p_val, t_val, param, bse = stats_model.pvalues, stats_model.tvalues, \
stats_model.params, stats_model.bse
return y_hat_E, p_val, t_val, param, bse
def analyze_regression_1D(args, test_data, cortical_result, dist_results):
# make sure dist_results is dist_ctx_results
hidd_dists_ctxs = dist_results['hidd_dists_ctxs']
hidd_dists_ctx0 = hidd_dists_ctxs[0]
hidd_dists_ctx1 = hidd_dists_ctxs[1]
grid_1ds_ctxs = dist_results['grid_1ds_ctxs']
grid_1ds_ctx0 = grid_1ds_ctxs[0]
grid_1ds_ctx1 = grid_1ds_ctxs[1]
grid_dists = dist_results['grid_dists']
phi = dist_results['angle_results']['phi']
binary_phi = dist_results['angle_results']['binary_phi']
hidd_dists_ctx = np.concatenate((hidd_dists_ctx0, hidd_dists_ctx1), axis=0)
grid_1ds_ctx = np.concatenate((grid_1ds_ctx0, grid_1ds_ctx1), axis=0)
grid_dists_ctx = np.concatenate((grid_dists, grid_dists), axis=0)
binary_phi_ctx = np.concatenate((binary_phi, binary_phi), axis=0)
phi_ctx = np.concatenate((phi, phi), axis=0)
# prepare data for the regression analysis
x_cat = np.concatenate((grid_dists_ctx.reshape((-1,1)), grid_1ds_ctx.reshape((-1,1)),
binary_phi_ctx.reshape((-1,1))),axis=1) # [240, 3]
x_con = np.concatenate((grid_dists_ctx.reshape((-1,1)), grid_1ds_ctx.reshape((-1,1)),
phi_ctx.reshape((-1,1))),axis=1)
# categorical regression analysis
x_cat = sm.add_constant(x_cat)
if args.cortical_model == 'stepwisemlp':
p_val, t_val, param, y_hat_E, y, bse = ([[] for i in range(2)] for i in range(6))
y_hat_E = np.zeros(hidd_dists_ctx.shape)
y = np.zeros(hidd_dists_ctx.shape)
for h in range(2):
y[:,h] = hidd_dists_ctx[:,h]
y_hat_E[:,h], p_val[h], t_val[h], param[h], bse[h] = run_regression(x_cat,y[:,h],grid_dists_ctx)
else:
y = hidd_dists_ctx
y_hat_E, p_val, t_val, param, bse = run_regression(x_cat,y,grid_dists_ctx)
cat_reg = {'p_val': p_val,
't_val': t_val,
'param': param,
'y_hat_E': y_hat_E,
'y': y,
'bse': bse}
# continuous regression analysis
x_con = sm.add_constant(x_con)
if args.cortical_model == 'stepwisemlp':
p_val, t_val, param, bse = ([[] for i in range(2)] for i in range(4))
y_hat_E = np.zeros(hidd_dists_ctx.shape)
y = np.zeros(hidd_dists_ctx.shape)
for h in range(2):
y[:,h] = hidd_dists_ctx[:,h]
y_hat_E[:,h], p_val[h], t_val[h], param[h], bse[h] = run_regression(x_con,y[:,h],grid_dists_ctx)
else:
y = hidd_dists_ctx
y_hat_E, p_val, t_val, param, bse = run_regression(x_con,y,grid_dists_ctx)
con_reg = {'p_val': p_val,
't_val': t_val,
'param': param,
'y_hat_E': y_hat_E,
'y': y,
'bse': bse}
reg_results = {'cat_reg': cat_reg,
'con_reg': con_reg}
return reg_results
def analyze_regression_exc(args, test_data, cortical_result, dist_results):
# Useful dictionaries from test dataset
n_states = test_data.n_states
hidd_dists = dist_results['hidd_dists'] #[n_combinations]: [120]
grid_dists = dist_results['grid_dists']
binary_phi = dist_results['angle_results']['binary_phi'] # [120]
samples = dist_results['samples'] # [120, 2]
states=[]
if args.cortical_model=='stepwisemlp':
p_vals, t_vals, params, bses = ([[] for i in range(2)] for i in range(4))
else:
p_vals, t_vals, params, bses = ([] for i in range(4))
for state in range(n_states):
s_idxs = [i for i, sample in enumerate(samples) if state not in sample] # [105]
# prepare data for the regression analysis
x_cat = np.concatenate((grid_dists[s_idxs].reshape((-1,1)), binary_phi[s_idxs].reshape((-1,1))),axis=1)
# regression analysis
x_cat = sm.add_constant(x_cat)
if args.cortical_model == 'stepwisemlp':
for h in range(2):
y = hidd_dists[s_idxs,h]
_ , p_val, t_val, param, bse = run_regression(x_cat,y,grid_dists)
p_vals[h].append(p_val)
t_vals[h].append(t_val)
params[h].append(param)
bses[h].append(bse)
else:
y = hidd_dists[s_idxs]
_, p_val, t_val, param, bse = run_regression(x_cat,y,grid_dists)
p_vals.append(p_val)
t_vals.append(t_val)
params.append(param)
bses.append(bse)
states.append(state)
# regression analysis - after removing (0,0) and (3,3)
s_idxs = [i for i, sample in enumerate(samples) if ((0 not in sample) & (15 not in sample))] # [91]
x_cat = np.concatenate((grid_dists[s_idxs].reshape((-1,1)), binary_phi[s_idxs].reshape((-1,1))),axis=1)
x_cat = sm.add_constant(x_cat)
if args.cortical_model == 'stepwisemlp':
for h in range(2):
y = hidd_dists[s_idxs,h]
_, p_val, t_val, param, bse = run_regression(x_cat,y,grid_dists)
p_vals[h].append(p_val)
t_vals[h].append(t_val)
params[h].append(param)
bses[h].append(bse)
else:
y = hidd_dists[s_idxs]
_, p_val, t_val, param, bse = run_regression(x_cat,y,grid_dists)
p_vals.append(p_val)
t_vals.append(t_val)
params.append(param)
bses.append(bse)
states.append(16)
# regression analysis - after removing (0,0) and (3,3), (3,0) and (0.3)
s_idxs = [i for i, sample in enumerate(samples) if ((0 not in sample) & (15 not in sample) &
(3 not in sample) & (12 not in sample))] #[66]
x_cat = np.concatenate((grid_dists[s_idxs].reshape((-1,1)), binary_phi[s_idxs].reshape((-1,1))),axis=1)
x_cat = sm.add_constant(x_cat)
if args.cortical_model == 'stepwisemlp':
for h in range(2):
y = hidd_dists[s_idxs,h]
_, p_val, t_val, param, bse = run_regression(x_cat,y,grid_dists)
p_vals[h].append(p_val)
t_vals[h].append(t_val)
params[h].append(param)
bses[h].append(bse)
else:
y = hidd_dists[s_idxs]
_, p_val, t_val, param, bse = run_regression(x_cat,y,grid_dists)
p_vals.append(p_val)
t_vals.append(t_val)
params.append(param)
bses.append(bse)
states.append(17)
states = np.array(states)
p_vals = np.array(p_vals)
t_vals = np.array(t_vals)
params = np.array(params)
bses = np.array(bses)
exc_reg_results = {'excluded_states': states,
'p_vals': p_vals,
't_vals': t_vals,
'params': params,
'bses': bses}
return exc_reg_results
def analyze_test_seq(args, test_data, cortical_result, dist_results):
import sys
sys.path.append("..")
data = get_loaders(batch_size=32, meta=False,
use_images=True, image_dir='./images/',
n_episodes=None,
N_responses=args.N_responses, N_contexts=args.N_contexts,
cortical_task = args.cortical_task, #ToDo:check why it was set to cortical_task='face_task',
balanced = args.balanced)
train_data, train_loader, test_data, test_loader, analyze_data, analyze_loader = data
idx2loc = {idx:loc for loc, idx in test_data.loc2idx.items()}
# ctx_order = 'first'
# ctx_order_str = 'ctxF'
analyze_correct = cortical_result['analyze_correct'] # [n_trials, time_steps]: [384, 3]
analyze_correct = np.asarray(analyze_correct).squeeze()
hidd_t_idx = 1 # at what time step, t = 1 means at the time of face1
# and t = 2 means at the time of face2
# in axis First (axis is at t=0), it should be t = 1
# create groups based on the row or columns
# e.g, for context0 (xaxis), first column is group 1, sec col is group 2, and so on.
# 4 groups for each axis/context; total 8 groups
# ToDo: why it is always loc1???
ctx0_g0=[]
ctx0_g1=[]
ctx0_g2=[]
ctx0_g3=[]
ctx1_g0=[]
ctx1_g1=[]
ctx1_g2=[]
ctx1_g3=[]
for i, batch in enumerate(analyze_loader):
if args.cortical_task == 'face_task':
f1, f2, ctx, y, idx1, idx2 = batch # face1, face2, context, y, index1, index2
elif args.cortical_task == 'wine_task':
f1, f2, ctx, y1, y2, idx1, idx2 = batch # face1, face2, context, y1, y2, index1, index2
msg = 'analyze_test_seq is only implemented for one response, two contexts'
assert args.N_responses == 'one' and args.N_contexts == 2, msg
if args.N_responses == 'one':
y = y1
# f1, f2, ax, y, idx1, idx2 = batch
acc = analyze_correct[i][hidd_t_idx]
ctx = ctx.cpu().numpy().squeeze()
idx1 = idx1[0]
idx2 = idx2[0]
loc1 = idx2loc[idx1]
loc2 = idx2loc[idx2]
if ctx==0:
if loc1[ctx]==0: ctx0_g0.append(acc) # (len(all_perms)/2) / 4 = [48]
elif loc1[ctx]==1: ctx0_g1.append(acc)
elif loc1[ctx]==2: ctx0_g2.append(acc)
elif loc1[ctx]==3: ctx0_g3.append(acc)
elif ctx==1:
if loc1[ctx]==0: ctx1_g0.append(acc)
elif loc1[ctx]==1: ctx1_g1.append(acc)
elif loc1[ctx]==2: ctx1_g2.append(acc)
elif loc1[ctx]==3: ctx1_g3.append(acc)
ctx0_accs = [np.mean(ctx0_g0), np.mean(ctx0_g1),
np.mean(ctx0_g2), np.mean(ctx0_g3) ]
ctx1_accs = [np.mean(ctx1_g0), np.mean(ctx1_g1),
np.mean(ctx1_g2), np.mean(ctx1_g3) ]
# print('Accuracy at t=%s (face%s) contex 0:' %(hidd_t_idx,hidd_t_idx), ctx0_accs)
# print('Accuracy at t=%s (face%s) contex 1:' %(hidd_t_idx,hidd_t_idx), ctx1_accs)
return ctx0_accs, ctx1_accs | 'r_hidd': r_hidd, 'p_val_hidd': p_val_hidd,
'r_cong_hidd': r_cong_hidd,
'p_val_cong_hidd': p_val_cong_hidd,
'r_incong_hidd': r_incong_hidd, |
Parser.py | # !/usr/bin/env python
# -*- coding:utf-8 -*-
from bs4 import BeautifulSoup
import urlparse
import re
class | (object):
def _pre_test(self, url, soup):
bad_links = {}
if soup.title == "手机搜狐" and url != 'http://m.sohu.com':
bad_links[url] = 404
# todo:坏链处理。。。
return False, bad_links
return True, None
# todo:js去掉
def _get_new_urls(self, soup):
new_urls = []
for link in soup.find_all('a'):
# todo: 删掉这个try
# try:
link_href = unicode(link.get('href')).encode('utf-8')
# except:
# print link.get('href')
# todo:处理外链
if link_href.startswith('/'):
new_url = 'http://m.sohu.com'+''.join(link_href)
elif link_href.startswith('http://m.sohu.com'):
new_url = link_href
# else:
# break
try:
new_urls.append(new_url)
except:
print u'外链', link_href
return new_urls
def parse(self, page_url, html_content):
if html_content is None:
return
soup = BeautifulSoup(html_content, 'html.parser', from_encoding='utf-8')
state, bad_links = self._pre_test(page_url, soup)
if state:
new_urls = self._get_new_urls(soup)
return True, new_urls
else:
return False, bad_links
| Parser |
object_reference.rs | // Generated from definition io.k8s.api.core.v1.ObjectReference
/// ObjectReference contains enough information to let you inspect or modify the referred object.
#[derive(Clone, Debug, Default, PartialEq)]
pub struct ObjectReference {
/// API version of the referent.
pub api_version: Option<String>,
/// If referring to a piece of an object instead of an entire object, this string should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers\[2\]. For example, if the object reference is to a container within a pod, this would take on a value like: "spec.containers{name}" (where "name" refers to the name of the container that triggered the event) or if no container name is specified "spec.containers\[2\]" (container with index 2 in this pod). This syntax is chosen only to have some well-defined way of referencing a part of an object.
pub field_path: Option<String>,
/// Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
pub kind: Option<String>,
/// Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
pub name: Option<String>,
/// Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/
pub namespace: Option<String>,
/// Specific resourceVersion to which this reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency
pub resource_version: Option<String>,
/// UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids
pub uid: Option<String>,
}
impl<'de> crate::serde::Deserialize<'de> for ObjectReference {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: crate::serde::Deserializer<'de> {
#[allow(non_camel_case_types)]
enum Field {
Key_api_version,
Key_field_path,
Key_kind,
Key_name,
Key_namespace,
Key_resource_version,
Key_uid,
Other,
}
impl<'de> crate::serde::Deserialize<'de> for Field {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: crate::serde::Deserializer<'de> {
struct Visitor;
impl<'de> crate::serde::de::Visitor<'de> for Visitor {
type Value = Field;
fn expecting(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.write_str("field identifier")
}
fn visit_str<E>(self, v: &str) -> Result<Self::Value, E> where E: crate::serde::de::Error {
Ok(match v {
"apiVersion" => Field::Key_api_version,
"fieldPath" => Field::Key_field_path,
"kind" => Field::Key_kind,
"name" => Field::Key_name,
"namespace" => Field::Key_namespace,
"resourceVersion" => Field::Key_resource_version,
"uid" => Field::Key_uid,
_ => Field::Other,
})
}
}
deserializer.deserialize_identifier(Visitor)
}
}
struct Visitor;
impl<'de> crate::serde::de::Visitor<'de> for Visitor {
type Value = ObjectReference;
fn expecting(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.write_str("ObjectReference")
}
fn visit_map<A>(self, mut map: A) -> Result<Self::Value, A::Error> where A: crate::serde::de::MapAccess<'de> {
let mut value_api_version: Option<String> = None;
let mut value_field_path: Option<String> = None;
let mut value_kind: Option<String> = None;
let mut value_name: Option<String> = None;
let mut value_namespace: Option<String> = None;
let mut value_resource_version: Option<String> = None;
let mut value_uid: Option<String> = None;
while let Some(key) = crate::serde::de::MapAccess::next_key::<Field>(&mut map)? {
match key {
Field::Key_api_version => value_api_version = crate::serde::de::MapAccess::next_value(&mut map)?,
Field::Key_field_path => value_field_path = crate::serde::de::MapAccess::next_value(&mut map)?,
Field::Key_kind => value_kind = crate::serde::de::MapAccess::next_value(&mut map)?,
Field::Key_name => value_name = crate::serde::de::MapAccess::next_value(&mut map)?,
Field::Key_namespace => value_namespace = crate::serde::de::MapAccess::next_value(&mut map)?,
Field::Key_resource_version => value_resource_version = crate::serde::de::MapAccess::next_value(&mut map)?,
Field::Key_uid => value_uid = crate::serde::de::MapAccess::next_value(&mut map)?,
Field::Other => { let _: crate::serde::de::IgnoredAny = crate::serde::de::MapAccess::next_value(&mut map)?; },
}
}
Ok(ObjectReference {
api_version: value_api_version,
field_path: value_field_path,
kind: value_kind,
name: value_name,
namespace: value_namespace,
resource_version: value_resource_version,
uid: value_uid,
})
}
}
deserializer.deserialize_struct(
"ObjectReference",
&[
"apiVersion",
"fieldPath",
"kind",
"name",
"namespace",
"resourceVersion",
"uid",
],
Visitor,
)
}
}
impl crate::serde::Serialize for ObjectReference {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where S: crate::serde::Serializer {
let mut state = serializer.serialize_struct(
"ObjectReference",
self.api_version.as_ref().map_or(0, |_| 1) +
self.field_path.as_ref().map_or(0, |_| 1) +
self.kind.as_ref().map_or(0, |_| 1) +
self.name.as_ref().map_or(0, |_| 1) +
self.namespace.as_ref().map_or(0, |_| 1) +
self.resource_version.as_ref().map_or(0, |_| 1) +
self.uid.as_ref().map_or(0, |_| 1),
)?;
if let Some(value) = &self.api_version {
crate::serde::ser::SerializeStruct::serialize_field(&mut state, "apiVersion", value)?;
}
if let Some(value) = &self.field_path {
crate::serde::ser::SerializeStruct::serialize_field(&mut state, "fieldPath", value)?;
}
if let Some(value) = &self.kind {
crate::serde::ser::SerializeStruct::serialize_field(&mut state, "kind", value)?;
}
if let Some(value) = &self.name {
crate::serde::ser::SerializeStruct::serialize_field(&mut state, "name", value)?;
}
if let Some(value) = &self.namespace {
crate::serde::ser::SerializeStruct::serialize_field(&mut state, "namespace", value)?;
}
if let Some(value) = &self.resource_version {
crate::serde::ser::SerializeStruct::serialize_field(&mut state, "resourceVersion", value)?;
}
if let Some(value) = &self.uid {
crate::serde::ser::SerializeStruct::serialize_field(&mut state, "uid", value)?;
}
crate::serde::ser::SerializeStruct::end(state)
}
}
#[cfg(feature = "schemars")]
impl crate::schemars::JsonSchema for ObjectReference {
fn schema_name() -> String {
"io.k8s.api.core.v1.ObjectReference".to_owned()
}
fn json_schema(__gen: &mut crate::schemars::gen::SchemaGenerator) -> crate::schemars::schema::Schema {
crate::schemars::schema::Schema::Object(crate::schemars::schema::SchemaObject {
metadata: Some(Box::new(crate::schemars::schema::Metadata {
description: Some("ObjectReference contains enough information to let you inspect or modify the referred object.".to_owned()),
..Default::default()
})),
instance_type: Some(crate::schemars::schema::SingleOrVec::Single(Box::new(crate::schemars::schema::InstanceType::Object))),
object: Some(Box::new(crate::schemars::schema::ObjectValidation {
properties: IntoIterator::into_iter([
(
"apiVersion".to_owned(),
crate::schemars::schema::Schema::Object(crate::schemars::schema::SchemaObject {
metadata: Some(Box::new(crate::schemars::schema::Metadata {
description: Some("API version of the referent.".to_owned()),
..Default::default()
})),
instance_type: Some(crate::schemars::schema::SingleOrVec::Single(Box::new(crate::schemars::schema::InstanceType::String))),
..Default::default()
}),
),
(
"fieldPath".to_owned(),
crate::schemars::schema::Schema::Object(crate::schemars::schema::SchemaObject {
metadata: Some(Box::new(crate::schemars::schema::Metadata {
description: Some("If referring to a piece of an object instead of an entire object, this string should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. For example, if the object reference is to a container within a pod, this would take on a value like: \"spec.containers{name}\" (where \"name\" refers to the name of the container that triggered the event) or if no container name is specified \"spec.containers[2]\" (container with index 2 in this pod). This syntax is chosen only to have some well-defined way of referencing a part of an object.".to_owned()),
..Default::default()
})),
instance_type: Some(crate::schemars::schema::SingleOrVec::Single(Box::new(crate::schemars::schema::InstanceType::String))),
..Default::default()
}),
),
(
"kind".to_owned(),
crate::schemars::schema::Schema::Object(crate::schemars::schema::SchemaObject {
metadata: Some(Box::new(crate::schemars::schema::Metadata {
description: Some("Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds".to_owned()),
..Default::default()
})),
instance_type: Some(crate::schemars::schema::SingleOrVec::Single(Box::new(crate::schemars::schema::InstanceType::String))),
..Default::default()
}),
),
(
"name".to_owned(),
crate::schemars::schema::Schema::Object(crate::schemars::schema::SchemaObject {
metadata: Some(Box::new(crate::schemars::schema::Metadata {
description: Some("Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names".to_owned()),
..Default::default()
})),
instance_type: Some(crate::schemars::schema::SingleOrVec::Single(Box::new(crate::schemars::schema::InstanceType::String))),
..Default::default()
}),
),
(
"namespace".to_owned(),
crate::schemars::schema::Schema::Object(crate::schemars::schema::SchemaObject {
metadata: Some(Box::new(crate::schemars::schema::Metadata {
description: Some("Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/".to_owned()),
..Default::default()
})),
instance_type: Some(crate::schemars::schema::SingleOrVec::Single(Box::new(crate::schemars::schema::InstanceType::String))),
..Default::default()
}),
),
(
"resourceVersion".to_owned(),
crate::schemars::schema::Schema::Object(crate::schemars::schema::SchemaObject {
metadata: Some(Box::new(crate::schemars::schema::Metadata {
description: Some("Specific resourceVersion to which this reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency".to_owned()),
..Default::default()
})),
instance_type: Some(crate::schemars::schema::SingleOrVec::Single(Box::new(crate::schemars::schema::InstanceType::String))), | }),
),
(
"uid".to_owned(),
crate::schemars::schema::Schema::Object(crate::schemars::schema::SchemaObject {
metadata: Some(Box::new(crate::schemars::schema::Metadata {
description: Some("UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids".to_owned()),
..Default::default()
})),
instance_type: Some(crate::schemars::schema::SingleOrVec::Single(Box::new(crate::schemars::schema::InstanceType::String))),
..Default::default()
}),
),
]).collect(),
..Default::default()
})),
..Default::default()
})
}
} | ..Default::default() |
getOptions.ts | import * as _ from 'lodash';
import defaultOptions from 'defaultOptions';
import {ConvertOptions} from 'typeScript/interfaces/ConvertInterfaces';
/**
* Получить опции конверирования.
* @param {Object} options - Опции, выбранные пользователем.
* @return {Object} Опции конвертирования.
*/
const getOptions = (options = {}): ConvertOptions => {
// Опции по умолчанию
const resultOptions = _.cloneDeep(defaultOptions);
// Заменить опции по умолчанию выбранными опциями, если они правильно указаны
const updateOptions = (currentOptions: any, newOptions: any) => {
Object.keys(currentOptions).forEach((key) => {
// Если нужно изменить эту опцию
if (newOptions[key] !== undefined) {
// Если эта опция - объект
if (
typeof currentOptions[key] === 'object' &&
key !== 'currency'
) {
// Перейти внутрь этого объекта (рекурсия)
updateOptions(currentOptions[key], newOptions[key]);
} else {
// Если эта опция - не объект
// Если тип данных одинаковый или currency[string/object] | if (
typeof currentOptions[key] === typeof newOptions[key] ||
(
key === 'currency' &&
(
typeof newOptions[key] === 'string' ||
typeof newOptions[key] === 'object'
)
)
) {
// Заменить новым значением
currentOptions[key] = newOptions[key];
}
}
}
});
};
updateOptions(resultOptions, options);
return resultOptions;
};
export default getOptions; | |
index.js | // import PropTypes from 'prop-types';
import { useGetAllGamesQuery } from 'hooks/api/games';
import GameNewButton from 'containers/game-new-button';
import Spinner from 'components/spinner'; |
function GameList() {
const { data, isLoading } = useGetAllGamesQuery();
if (isLoading) return <Spinner />;
return (
<>
<h2>Games</h2>
<GameNewButton className="corner-btn" />
{!data || data?.length === 0 ? (
<p>No games have been added for this developer.</p>
) : (
<p>Below is your list of games.</p>
)}
<div className="grid-container">
{data.map((item) => {
const { _id: id, _coverImage: coverImage } = item;
return <GameRow key={id} coverImage={coverImage} {...item} />;
})}
</div>
</>
);
}
GameList.propTypes = {};
GameList.defaultProps = {};
export default GameList; | import GameRow from './game-row'; |
test_volumes_list.py | # Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.compute import base
from tempest.common.utils import data_utils
from tempest.common import waiters
from tempest import config
from tempest import test
CONF = config.CONF
class VolumesTestJSON(base.BaseV2ComputeTest):
# NOTE: This test creates a number of 1G volumes. To run successfully,
# ensure that the backing file for the volume group that Nova uses
# has space for at least 3 1G volumes!
# If you are running a Devstack environment, ensure that the
# VOLUME_BACKING_FILE_SIZE is at least 4G in your localrc
@classmethod
def skip_checks(cls):
super(VolumesTestJSON, cls).skip_checks()
if not CONF.service_available.cinder:
skip_msg = ("%s skipped as Cinder is not available" % cls.__name__)
raise cls.skipException(skip_msg)
@classmethod
def setup_clients(cls):
super(VolumesTestJSON, cls).setup_clients()
cls.client = cls.volumes_extensions_client
@classmethod
def resource_setup(cls):
super(VolumesTestJSON, cls).resource_setup()
# Create 3 Volumes
cls.volume_list = []
cls.volume_id_list = []
for i in range(3):
v_name = data_utils.rand_name('volume')
metadata = {'Type': 'work'}
try:
volume = cls.client.create_volume(size=CONF.volume.volume_size,
display_name=v_name,
metadata=metadata)['volume']
waiters.wait_for_volume_status(cls.client,
volume['id'], 'available')
volume = cls.client.show_volume(volume['id'])['volume']
cls.volume_list.append(volume)
cls.volume_id_list.append(volume['id'])
except Exception:
if cls.volume_list:
# We could not create all the volumes, though we were able
# to create *some* of the volumes. This is typically
# because the backing file size of the volume group is
# too small. So, here, we clean up whatever we did manage
# to create and raise a SkipTest
for volume in cls.volume_list:
cls.delete_volume(volume['id'])
msg = ("Failed to create ALL necessary volumes to run "
"test. This typically means that the backing file "
"size of the nova-volumes group is too small to "
"create the 3 volumes needed by this test case")
raise cls.skipException(msg)
raise
@classmethod
def resource_cleanup(cls):
# Delete the created Volumes
for volume in cls.volume_list:
cls.delete_volume(volume['id'])
super(VolumesTestJSON, cls).resource_cleanup()
@test.idempotent_id('bc2dd1a0-15af-48e5-9990-f2e75a48325d')
def test_volume_list(self):
# Should return the list of Volumes
# Fetch all Volumes
fetched_list = self.client.list_volumes()['volumes']
# Now check if all the Volumes created in setup are in fetched list
missing_volumes = [
v for v in self.volume_list if v not in fetched_list
]
self.assertFalse(missing_volumes,
"Failed to find volume %s in fetched list" %
', '.join(m_vol['displayName']
for m_vol in missing_volumes))
@test.idempotent_id('bad0567a-5a4f-420b-851e-780b55bb867c')
def test_volume_list_with_details(self):
# Should return the list of Volumes with details
# Fetch all Volumes
fetched_list = self.client.list_volumes(detail=True)['volumes']
# Now check if all the Volumes created in setup are in fetched list
missing_volumes = [
v for v in self.volume_list if v not in fetched_list
]
self.assertFalse(missing_volumes,
"Failed to find volume %s in fetched list" %
', '.join(m_vol['displayName']
for m_vol in missing_volumes))
@test.idempotent_id('1048ed81-2baf-487a-b284-c0622b86e7b8')
def test_volume_list_param_limit(self): | self.assertEqual(len(fetched_vol_list), params['limit'],
"Failed to list volumes by limit set")
@test.idempotent_id('33985568-4965-49d5-9bcc-0aa007ca5b7a')
def test_volume_list_with_detail_param_limit(self):
# Return the list of volumes with details based on limit set.
params = {'limit': 2}
fetched_vol_list = self.client.list_volumes(detail=True,
**params)['volumes']
self.assertEqual(len(fetched_vol_list), params['limit'],
"Failed to list volume details by limit set")
@test.idempotent_id('51c22651-a074-4ea7-af0b-094f9331303e')
def test_volume_list_param_offset_and_limit(self):
# Return the list of volumes based on offset and limit set.
# get all volumes list
all_vol_list = self.client.list_volumes()['volumes']
params = {'offset': 1, 'limit': 1}
fetched_vol_list = self.client.list_volumes(**params)['volumes']
# Validating length of the fetched volumes
self.assertEqual(len(fetched_vol_list), params['limit'],
"Failed to list volumes by offset and limit")
# Validating offset of fetched volume
for index, volume in enumerate(fetched_vol_list):
self.assertEqual(volume['id'],
all_vol_list[index + params['offset']]['id'],
"Failed to list volumes by offset and limit")
@test.idempotent_id('06b6abc4-3f10-48e9-a7a1-3facc98f03e5')
def test_volume_list_with_detail_param_offset_and_limit(self):
# Return the list of volumes details based on offset and limit set.
# get all volumes list
all_vol_list = self.client.list_volumes(detail=True)['volumes']
params = {'offset': 1, 'limit': 1}
fetched_vol_list = self.client.list_volumes(detail=True,
**params)['volumes']
# Validating length of the fetched volumes
self.assertEqual(len(fetched_vol_list), params['limit'],
"Failed to list volume details by offset and limit")
# Validating offset of fetched volume
for index, volume in enumerate(fetched_vol_list):
self.assertEqual(volume['id'],
all_vol_list[index + params['offset']]['id'],
"Failed to list volume details by "
"offset and limit") | # Return the list of volumes based on limit set
params = {'limit': 2}
fetched_vol_list = self.client.list_volumes(**params)['volumes']
|
tests.rs | use crate::client::HttpClientBuilder;
use jsonrpsee_types::{
error::Error,
jsonrpc::{self, ErrorCode, JsonValue, Params},
traits::Client,
};
use jsonrpsee_test_utils::helpers::*;
use jsonrpsee_test_utils::types::Id;
#[tokio::test]
async fn method_call_works() {
let result = run_request_with_response(ok_response("hello".into(), Id::Num(0))).await.unwrap();
assert_eq!(JsonValue::String("hello".into()), result);
}
#[tokio::test]
async fn notification_works() {
let server_addr = http_server_with_hardcoded_response(String::new()).await;
let uri = format!("http://{}", server_addr);
let client = HttpClientBuilder::default().build(&uri).unwrap();
client
.notification("i_dont_care_about_the_response_because_the_server_should_not_respond", Params::None)
.await
.unwrap();
}
#[tokio::test]
async fn | () {
let err = run_request_with_response(ok_response("hello".into(), Id::Num(99))).await.unwrap_err();
assert!(matches!(err, Error::InvalidRequestId));
}
#[tokio::test]
async fn response_method_not_found() {
let err = run_request_with_response(method_not_found(Id::Num(0))).await.unwrap_err();
assert_jsonrpc_error_response(err, ErrorCode::MethodNotFound, METHOD_NOT_FOUND.into());
}
#[tokio::test]
async fn response_parse_error() {
let err = run_request_with_response(parse_error(Id::Num(0))).await.unwrap_err();
assert_jsonrpc_error_response(err, ErrorCode::ParseError, PARSE_ERROR.into());
}
#[tokio::test]
async fn invalid_request_works() {
let err = run_request_with_response(invalid_request(Id::Num(0_u64))).await.unwrap_err();
assert_jsonrpc_error_response(err, ErrorCode::InvalidRequest, INVALID_REQUEST.into());
}
#[tokio::test]
async fn invalid_params_works() {
let err = run_request_with_response(invalid_params(Id::Num(0_u64))).await.unwrap_err();
assert_jsonrpc_error_response(err, ErrorCode::InvalidParams, INVALID_PARAMS.into());
}
#[tokio::test]
async fn internal_error_works() {
let err = run_request_with_response(internal_error(Id::Num(0_u64))).await.unwrap_err();
assert_jsonrpc_error_response(err, ErrorCode::InternalError, INTERNAL_ERROR.into());
}
#[tokio::test]
async fn subscription_response_to_request() {
let req = r#"{"jsonrpc":"2.0","method":"subscribe_hello","params":{"subscription":"3px4FrtxSYQ1zBKW154NoVnrDhrq764yQNCXEgZyM6Mu","result":"hello my friend"}}"#.to_string();
let err = run_request_with_response(req).await.unwrap_err();
assert!(matches!(err, Error::InvalidResponse(_)));
}
#[tokio::test]
async fn batch_request_works() {
let batch_request = vec![
("say_hello".to_string(), Params::None),
("say_goodbye".to_string(), Params::Array(vec![0.into(), 1.into(), 2.into()])),
("get_swag".to_string(), Params::None),
];
let server_response = r#"[{"jsonrpc":"2.0","result":"hello","id":0}, {"jsonrpc":"2.0","result":"goodbye","id":1}, {"jsonrpc":"2.0","result":"here's your swag","id":2}]"#.to_string();
let response = run_batch_request_with_response(batch_request, server_response).await.unwrap();
assert_eq!(response, vec!["hello".to_string(), "goodbye".to_string(), "here's your swag".to_string()]);
}
#[tokio::test]
async fn batch_request_out_of_order_response() {
let batch_request = vec![
("say_hello".to_string(), Params::None),
("say_goodbye".to_string(), Params::Array(vec![0.into(), 1.into(), 2.into()])),
("get_swag".to_string(), Params::None),
];
let server_response = r#"[{"jsonrpc":"2.0","result":"here's your swag","id":2}, {"jsonrpc":"2.0","result":"hello","id":0}, {"jsonrpc":"2.0","result":"goodbye","id":1}]"#.to_string();
let response = run_batch_request_with_response(batch_request, server_response).await.unwrap();
assert_eq!(response, vec!["hello".to_string(), "goodbye".to_string(), "here's your swag".to_string()]);
}
async fn run_batch_request_with_response(batch: Vec<(String, Params)>, response: String) -> Result<Vec<String>, Error> {
let server_addr = http_server_with_hardcoded_response(response).await;
let uri = format!("http://{}", server_addr);
let client = HttpClientBuilder::default().build(&uri).unwrap();
client.batch_request(batch).await
}
async fn run_request_with_response(response: String) -> Result<JsonValue, Error> {
let server_addr = http_server_with_hardcoded_response(response).await;
let uri = format!("http://{}", server_addr);
let client = HttpClientBuilder::default().build(&uri).unwrap();
client.request("say_hello", Params::None).await
}
fn assert_jsonrpc_error_response(response: Error, code: ErrorCode, message: String) {
let expected = jsonrpc::Error { code, message, data: None };
match response {
Error::Request(err) => {
assert_eq!(err, expected);
}
e @ _ => panic!("Expected error: \"{}\", got: {:?}", expected, e),
};
}
| response_with_wrong_id |
microsoftgraphcalendar_permission.py | # coding=utf-8
# --------------------------------------------------------------------------
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class | (Model):
"""MicrosoftgraphcalendarPermission.
:param id:
:type id: str
:param email_address:
:type email_address: ~users.models.MicrosoftgraphemailAddress
:param is_removable:
:type is_removable: bool
:param is_inside_organization:
:type is_inside_organization: bool
:param role: Possible values include: 'none', 'freeBusyRead',
'limitedRead', 'read', 'write', 'delegateWithoutPrivateEventAccess',
'delegateWithPrivateEventAccess', 'custom'
:type role: str or ~users.models.enum
:param allowed_roles:
:type allowed_roles: list[str]
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'email_address': {'key': 'emailAddress', 'type': 'MicrosoftgraphemailAddress'},
'is_removable': {'key': 'isRemovable', 'type': 'bool'},
'is_inside_organization': {'key': 'isInsideOrganization', 'type': 'bool'},
'role': {'key': 'role', 'type': 'str'},
'allowed_roles': {'key': 'allowedRoles', 'type': '[str]'},
}
def __init__(self, id=None, email_address=None, is_removable=None, is_inside_organization=None, role=None, allowed_roles=None):
super(MicrosoftgraphcalendarPermission, self).__init__()
self.id = id
self.email_address = email_address
self.is_removable = is_removable
self.is_inside_organization = is_inside_organization
self.role = role
self.allowed_roles = allowed_roles
| MicrosoftgraphcalendarPermission |
GPUComputationRenderer.js | /**
* @author yomboprime https://github.com/yomboprime
*
* GPUComputationRenderer, based on SimulationRenderer by zz85
*
* The GPUComputationRenderer uses the concept of variables. These variables are RGBA float textures that hold 4 floats
* for each compute element (texel)
*
* Each variable has a fragment shader that defines the computation made to obtain the variable in question.
* You can use as many variables you need, and make dependencies so you can use textures of other variables in the shader
* (the sampler uniforms are added automatically) Most of the variables will need themselves as dependency.
*
* The renderer has actually two render targets per variable, to make ping-pong. Textures from the current frame are used
* as inputs to render the textures of the next frame.
*
* The render targets of the variables can be used as input textures for your visualization shaders.
*
* Variable names should be valid identifiers and should not collide with THREE GLSL used identifiers.
* a common approach could be to use 'texture' prefixing the variable name; i.e texturePosition, textureVelocity...
*
* The size of the computation (sizeX * sizeY) is defined as 'resolution' automatically in the shader. For example:
* #DEFINE resolution vec2( 1024.0, 1024.0 )
*
* -------------
*
* Basic use:
*
* // Initialization...
*
* // Create computation renderer
* var gpuCompute = new GPUComputationRenderer( 1024, 1024, renderer );
*
* // Create initial state float textures
* var pos0 = gpuCompute.createTexture();
* var vel0 = gpuCompute.createTexture();
* // and fill in here the texture data...
*
* // Add texture variables
* var velVar = gpuCompute.addVariable( "textureVelocity", fragmentShaderVel, pos0 );
* var posVar = gpuCompute.addVariable( "texturePosition", fragmentShaderPos, vel0 );
*
* // Add variable dependencies
* gpuCompute.setVariableDependencies( velVar, [ velVar, posVar ] );
* gpuCompute.setVariableDependencies( posVar, [ velVar, posVar ] );
*
* // Add custom uniforms
* velVar.material.uniforms.time = { value: 0.0 };
*
* // Check for completeness
* var error = gpuCompute.init();
* if ( error !== null ) {
* console.error( error );
* }
*
*
* // In each frame...
*
* // Compute!
* gpuCompute.compute();
*
* // Update texture uniforms in your visualization materials with the gpu renderer output
* myMaterial.uniforms.myTexture.value = gpuCompute.getCurrentRenderTarget( posVar ).texture;
*
* // Do your rendering
* renderer.render( myScene, myCamera );
*
* -------------
*
* Also, you can use utility functions to create ShaderMaterial and perform computations (rendering between textures)
* Note that the shaders can have multiple input textures.
*
* var myFilter1 = gpuCompute.createShaderMaterial( myFilterFragmentShader1, { theTexture: { value: null } } );
* var myFilter2 = gpuCompute.createShaderMaterial( myFilterFragmentShader2, { theTexture: { value: null } } );
*
* var inputTexture = gpuCompute.createTexture();
*
* // Fill in here inputTexture...
*
* myFilter1.uniforms.theTexture.value = inputTexture;
*
* var myRenderTarget = gpuCompute.createRenderTarget();
* myFilter2.uniforms.theTexture.value = myRenderTarget.texture;
*
* var outputRenderTarget = gpuCompute.createRenderTarget();
*
* // Now use the output texture where you want:
* myMaterial.uniforms.map.value = outputRenderTarget.texture;
*
* // And compute each frame, before rendering to screen:
* gpuCompute.doRenderTarget( myFilter1, myRenderTarget );
* gpuCompute.doRenderTarget( myFilter2, outputRenderTarget );
*
*
*
* @param {int} sizeX Computation problem size is always 2d: sizeX * sizeY elements.
* @param {int} sizeY Computation problem size is always 2d: sizeX * sizeY elements.
* @param {WebGLRenderer} renderer The renderer
*/
export default function GPUComputationRenderer( sizeX, sizeY, renderer ) {
this.variables = [];
this.currentTextureIndex = 0;
var scene = new THREE.Scene();
var camera = new THREE.Camera();
camera.position.z = 1;
var passThruUniforms = {
texture: { value: null }
};
var passThruShader = createShaderMaterial( getPassThroughFragmentShader(), passThruUniforms );
var mesh = new THREE.Mesh( new THREE.PlaneBufferGeometry( 2, 2 ), passThruShader );
scene.add( mesh );
this.addVariable = function( variableName, computeFragmentShader, initialValueTexture ) {
var material = this.createShaderMaterial( computeFragmentShader );
var variable = {
name: variableName,
initialValueTexture: initialValueTexture,
material: material,
dependencies: null,
renderTargets: [],
wrapS: null,
wrapT: null,
minFilter: THREE.NearestFilter,
magFilter: THREE.NearestFilter
};
this.variables.push( variable );
return variable;
};
this.setVariableDependencies = function( variable, dependencies ) {
variable.dependencies = dependencies;
};
this.init = function() {
if ( ! renderer.extensions.get( "OES_texture_float" ) ) {
return "No OES_texture_float support for float textures.";
}
if ( renderer.capabilities.maxVertexTextures === 0 ) {
return "No support for vertex shader textures.";
}
for ( var i = 0; i < this.variables.length; i++ ) {
var variable = this.variables[ i ];
// Creates rendertargets and initialize them with input texture
variable.renderTargets[ 0 ] = this.createRenderTarget( sizeX, sizeY, variable.wrapS, variable.wrapT, variable.minFilter, variable.magFilter );
variable.renderTargets[ 1 ] = this.createRenderTarget( sizeX, sizeY, variable.wrapS, variable.wrapT, variable.minFilter, variable.magFilter );
this.renderTexture( variable.initialValueTexture, variable.renderTargets[ 0 ] );
this.renderTexture( variable.initialValueTexture, variable.renderTargets[ 1 ] );
// Adds dependencies uniforms to the ShaderMaterial
var material = variable.material;
var uniforms = material.uniforms;
if ( variable.dependencies !== null ) {
for ( var d = 0; d < variable.dependencies.length; d++ ) {
var depVar = variable.dependencies[ d ];
if ( depVar.name !== variable.name ) {
// Checks if variable exists
var found = false;
for ( var j = 0; j < this.variables.length; j++ ) {
if ( depVar.name === this.variables[ j ].name ) {
found = true;
break;
}
}
if ( ! found ) {
return "Variable dependency not found. Variable=" + variable.name + ", dependency=" + depVar.name;
}
}
uniforms[ depVar.name ] = { value: null };
material.fragmentShader = "\nuniform sampler2D " + depVar.name + ";\n" + material.fragmentShader;
}
}
}
this.currentTextureIndex = 0;
return null;
};
this.compute = function() {
var currentTextureIndex = this.currentTextureIndex;
var nextTextureIndex = this.currentTextureIndex === 0 ? 1 : 0;
for ( var i = 0, il = this.variables.length; i < il; i++ ) {
var variable = this.variables[ i ];
// Sets texture dependencies uniforms
if ( variable.dependencies !== null ) {
var uniforms = variable.material.uniforms;
for ( var d = 0, dl = variable.dependencies.length; d < dl; d++ ) {
var depVar = variable.dependencies[ d ];
uniforms[ depVar.name ].value = depVar.renderTargets[ currentTextureIndex ].texture;
}
}
// Performs the computation for this variable
this.doRenderTarget( variable.material, variable.renderTargets[ nextTextureIndex ] );
}
this.currentTextureIndex = nextTextureIndex;
};
this.getCurrentRenderTarget = function( variable ) {
return variable.renderTargets[ this.currentTextureIndex ];
};
this.getAlternateRenderTarget = function( variable ) {
return variable.renderTargets[ this.currentTextureIndex === 0 ? 1 : 0 ];
};
function addResolutionDefine( materialShader ) {
materialShader.defines.resolution = 'vec2( ' + sizeX.toFixed( 1 ) + ', ' + sizeY.toFixed( 1 ) + " )";
}
this.addResolutionDefine = addResolutionDefine;
// The following functions can be used to compute things manually
function createShaderMaterial( computeFragmentShader, uniforms ) {
uniforms = uniforms || {};
var material = new THREE.ShaderMaterial( {
uniforms: uniforms,
vertexShader: getPassThroughVertexShader(),
fragmentShader: computeFragmentShader
} );
addResolutionDefine( material );
return material;
}
this.createShaderMaterial = createShaderMaterial;
this.createRenderTarget = function( sizeXTexture, sizeYTexture, wrapS, wrapT, minFilter, magFilter ) {
sizeXTexture = sizeXTexture || sizeX;
sizeYTexture = sizeYTexture || sizeY;
wrapS = wrapS || THREE.ClampToEdgeWrapping;
wrapT = wrapT || THREE.ClampToEdgeWrapping;
minFilter = minFilter || THREE.NearestFilter;
magFilter = magFilter || THREE.NearestFilter;
var renderTarget = new THREE.WebGLRenderTarget( sizeXTexture, sizeYTexture, {
wrapS: wrapS,
wrapT: wrapT,
minFilter: minFilter,
magFilter: magFilter,
format: THREE.RGBAFormat,
type: ( /(iPad|iPhone|iPod)/g.test( navigator.userAgent ) ) ? THREE.HalfFloatType : THREE.FloatType,
stencilBuffer: false
} );
return renderTarget;
};
this.createTexture = function( sizeXTexture, sizeYTexture ) {
sizeXTexture = sizeXTexture || sizeX;
sizeYTexture = sizeYTexture || sizeY;
var a = new Float32Array( sizeXTexture * sizeYTexture * 4 );
var texture = new THREE.DataTexture( a, sizeX, sizeY, THREE.RGBAFormat, THREE.FloatType );
texture.needsUpdate = true;
return texture;
};
this.renderTexture = function( input, output ) {
// Takes a texture, and render out in rendertarget
// input = Texture
// output = RenderTarget
passThruUniforms.texture.value = input;
this.doRenderTarget( passThruShader, output);
passThruUniforms.texture.value = null;
};
this.doRenderTarget = function( material, output ) {
mesh.material = material;
renderer.render( scene, camera, output );
mesh.material = passThruShader;
};
// Shaders
function getPassThroughVertexShader() {
return "void main() {\n" +
"\n" +
" gl_Position = vec4( position, 1.0 );\n" +
"\n" +
"}\n";
}
function | () {
return "uniform sampler2D texture;\n" +
"\n" +
"void main() {\n" +
"\n" +
" vec2 uv = gl_FragCoord.xy / resolution.xy;\n" +
"\n" +
" gl_FragColor = texture2D( texture, uv );\n" +
"\n" +
"}\n";
}
}
| getPassThroughFragmentShader |
mod.rs | use std::fmt::{Display, Debug};
pub mod kucherov;
pub mod valimaki;
pub type Mode = Box<dyn IsMode>;
/*
"interface" for new filtering and partition schemes.
1. Create any struct that implements these functions
2. Add your new struct to the code in setup.rs so that the solver will use it when the arg is used
*/
pub trait IsMode: Sync + Display + Debug {
/*
filtering scheme. Return the number of permitted errors for a query search node with given properties
"completed_blocks" : number of fully-matched blocks so far in THIS query search
"patt_blocks" : number of blocks the pattern this search is for is divided into
"blind_blocks" : number of blocks to the LEFT of this search i.e. not involved in the search
*/
fn filter_func(&self, completed_blocks : i32, patt_blocks : i32, blind_blocks : i32) -> i32;
// partition scheme. For a pattern of given length and alg parameters, return a vector of block lengths. order will be respected
fn get_block_lengths(&self, patt_len : i32, err_rate : f32, thresh : i32) -> Vec<i32>;
// return true IFF a node with the properties represented by the args should generate candidates
fn candidate_condition(&self,generous_overlap_len : i32, completed_blocks : i32, thresh : i32, errors : i32 ) -> bool;
// The pattern will only create query searches for pattern-block-sequence suffixes of this length or more
fn get_fewest_suff_blocks(&self) -> i32;
// Used by testing.rs for the cargo testing
fn get_guaranteed_extra_blocks(&self) -> i32;
}
/*
Add your custom modes in this switch statement so that
they will be used when the solver is run with the appropriate -m flag arg.
*/
pub fn get_mode(arg : &str) -> Mode {
let tokens : Vec<&str> = arg.split('_').collect();
if tokens.len() == 0 {
panic!("")
}
let mode_args = &tokens[1..];
match tokens[0] {
"valimaki" => Box::new(valimaki::ValimakiMode::new()),
"kucherov" => Box::new(kucherov::KucherovMode::new(mode_args)),
/*
NEW MODE OPTIONS GO IN THIS BLOCK
CATCH the name you want it to be associated with, whatever you like.
return a box contining your IsMode-implementing struct like this:
your_mod_rs_file::YourStruct::new(mode_args)
("IsMode" trait is defined above)
You can also leave out the mode_args if your new() is defined as requiring no parameter.
*/
// YOUR MODES GO HERE ^^^^
_ => panic!("No mode with the given name found!"),
}
}
pub fn default_mode() -> Mode | {
Box::new(kucherov::KucherovMode::new(&vec!["2"]))
} |
|
delete-token.ts | import * as admin from 'firebase-admin';
export async function deleteToken(userRecord: admin.auth.UserRecord) {
if (!userRecord || !userRecord.uid || userRecord.uid === undefined || userRecord.uid === '') {
return;
}
try {
const userId: string = userRecord.uid;
await deletePlatformDoc(userId);
} catch (err) {
console.error(err); | }
}
function deletePlatformDoc(userId: string): Promise<void> {
return new Promise<void>(async (resolve, reject) => {
try {
const collectionRef: admin.firestore.CollectionReference = admin.firestore().collection(`/platforms/`);
const doc: admin.firestore.DocumentReference = collectionRef.doc(userId);
await doc.delete();
resolve();
} catch (err) {
reject(err);
}
});
} | |
ajax.js | $.ajaxSetup({
headers: {
'X-CSRF-TOKEN': $('meta[name="csrf-token"]').attr('content')
}
});
$(document).ready(function(){
$(document).on('click','.pagination a',function(e){
e.preventDefault();
var page = $(this).attr('href').split('page=')[1];
var query = $('#search').val();
var sort = $('#hidden_quantity_type').val();
fetch_data(page,query,sort)
});
function fetch_data(page,query,sort){
$.ajax({
type:"get",
url : "product/fetch_data?page="+page+"&query="+query+"&sort="+sort,
success:function(data)
{
$('#table_data').html(data);
if (sort == 1 ) {
$('#hidden_quantity_type').attr('value','1');
} else if (sort == 2 )
{
$('#hidden_quantity_type').attr('value','2');
} else if (sort == 3 ) {
$('#hidden_quantity_type').attr('value','3');
}else if (sort == 4 ) {
$('#hidden_quantity_type').attr('value','4');
}
}
})
}
$('#form-search').submit(function(e){
e.preventDefault();
var query = $('#search').val();
var page = $('#hidden_page').val();
var sort = $('#hidden_quantity_type').attr('value','');
fetch_data(page,query,sort);
})
$(document).on('click', '.quantity', function(e){
var query = $('#search').val();
var page = $('#hidden_page').val();
var sort = $('#remaining').val();
fetch_data(page,query,sort);
})
$(document).on('click', '.quantity_out', function(e){
var query = $('#search').val();
var page = $('#hidden_page').val();
var sort = $('#out').val();
fetch_data(page,query,sort);
})
$(document).on('click', '.love', function(e){
var query = $('#search').val();
var page = $('#hidden_page').val();
var sort = $('#love').val();
fetch_data(page,query,sort);
})
$(document).on('click', '.notLove', function(e){
var query = $('#search').val();
var page = $('#hidden_page').val();
var sort = $('#notLove').val();
fetch_data(page,query,sort);
})
$(document).on('click', '.edit-product', function(e){
e.preventDefault();
$('.error1').hide();
$('.error2').hide();
$('.error3').hide();
var url = $(this).attr('data-url');
e.preventDefault();
$.ajax({
type: 'get',
url: url,
success: function (response) {
$('.tittle').text(response.data.name);
$('#name-edit').val(response.data.name);
$('#quantity-edit').val(response.data.quantity);
$('#price-edit').val(response.data.price);
var html ='';
$.each(response.suppliers,function($key,$value){
if ($value['id']==response.data.supplier_id) {
html +='<option value='+$value['id']+' selected>';
html += $value['name'];
html += '</option>';
}else{
html +='<option value='+$value['id']+' >';
html += $value['name'];
html += '</option>';
}
});
$('.idSupplier').html(html);
var html1 ='';
$.each(response.categories,function($key,$value){
if ($value['id']==response.data.category_id) {
html1 +='<option value='+$value['id']+' selected>';
html1 += $value['name'];
html1 += '</option>';
}else{
html1 +='<option value='+$value['id']+' >';
html1 += $value['name'];
html1 += '</option>';
}
});
$('.idCategory').html(html1);
$('#ram-edit').val(response.data.RAM);
$('#vga-edit').val(response.data.VGA);
$('#operating_system-edit').val(response.data.operating_system);
$('#cpu-edit').val(response.data.CPU);
$('#guarantee-edit').val(response.data.guarantee);
// CKEDITOR.instances['description-edit'].setData(response.data.description);
$('#description-edit').val(response.data.description);
$('#sales_volume-edit').val(response.data.sales_volume);
var html2 ='';
$.each(response.product_image,function($key,$value){
html2 +='<img src=/uploads/'+$value['path']+' style="margin-right: 16px;width:100px; "/>';
});
$('.idImage').html(html2);
$('#form-edit').attr('data-url','/admin/product/'+response.data.id);
},
error: function (error) {
}
})
})
$('#form-edit').submit(function(e){
e.preventDefault();
var url=$(this).attr('data-url');
$.ajax({ | 'quantity': $('#quantity-edit').val(),
'price': $('#price-edit').val(),
'supplier_id': $('.idSupplier').val(),
'category_id': $('.idCategory').val(),
'RAM': $('#ram-edit').val(),
'VGA': $('#vga-edit').val(),
'operating_system': $('#operating_system-edit').val(),
'CPU': $('#cpu-edit').val(),
'guarantee': $('#guarantee-edit').val(),
// 'description': CKEDITOR.instances['description-edit'].getData(),
'description' : $('#description-edit').val(),
'sales_volume': $('#sales_volume-edit').val(),
'_method':'put',
},
success: function($resuld) {
if($resuld.error == 'true'){
if ($resuld.mess.name) {
$('.error1').show();
$('.error1').text($resuld.mess.name[0]);
} else {
$('.error1').hide();
}
if ($resuld.mess.quantity) {
$('.error2').show();
$('.error2').text($resuld.mess.quantity);
} else {
$('.error2').hide();
}
if ($resuld.mess.price) {
$('.error3').show();
$('.error3').text($resuld.mess.price);
} else {
$('.error3').hide();
}
}else{
var cate = '';
$.each($resuld.categories,function($key,$value){
if ($resuld.data.category_id == $value['id'])
{
cate = $value['name'];
}
});
var no = '';
if ($resuld.data.note == null || $resuld.data.note == 0)
{
no ='<button data-url="http://127.0.0.1/admin/product/like/'+$resuld.data.id+'" class="notlike"><i class="fa fa-star-o" aria-hidden="true"></i></button>';
}
else {
no ='<button data-url="http://127.0.0.1/admin/product/like/'+$resuld.data.id+'" class="like"><i class="fa fa-star" aria-hidden="true"></i></button>';
}
toastr.success($resuld.success,'Thông báo',{timeOut: 5000});
$('#name_'+$resuld.data.id).text($resuld.data.name);
$('#category_id_'+$resuld.data.id).text(cate);
$('#quantity_'+$resuld.data.id).text($resuld.data.quantity);
$('#price_'+$resuld.data.id).text($resuld.data.price);
$('#RAM_'+$resuld.data.id).text($resuld.data.RAM);
$('#VGA_'+$resuld.data.id).text($resuld.data.VGA);
$('#operating_system_'+$resuld.data.id).text($resuld.data.operating_system);
$('#CPU_'+$resuld.data.id).text($resuld.data.CPU);
$('#description_'+$resuld.data.id).text($resuld.data.description);
$('#sales_volume_'+$resuld.data.id).text($resuld.data.sales_volume);
$('#note_'+$resuld.data.id).html(no);
// location.reload();
}
},
})
})
$(document).on('click', '.btn-delete', function(e){
var url = $(this).attr('data-url');
var _this = $(this);
$('.del').click(function(){
$.ajax({
type: 'delete',
url: url,
success: function($resuld) {
// $('#delete').hide();
toastr.success($resuld.success,'Thông báo',{timeOut: 5000});
_this.parent().parent().remove();
},
})
})
})
$(document).on('click', '.notlike', function(e){
var url = $(this).attr('data-url');
e.preventDefault();
$.ajax({
type: 'get',
url: url,
success: function ($resuld) {
var no = '';
if ($resuld.data.note == null || $resuld.data.note == 0)
{
no ='<button data-url="http://127.0.0.1/admin/product/like/'+$resuld.data.id+'" class="notlike"><i class="fa fa-star-o" aria-hidden="true"></i></button>';
}
else {
no ='<button data-url="http://127.0.0.1/admin/product/like/'+$resuld.data.id+'" class="like"><i class="fa fa-star" aria-hidden="true"></i></button>';
}
$('#note_'+$resuld.data.id).html(no);
}
})
})
$(document).on('click', '.like', function(e){
var url = $(this).attr('data-url');
e.preventDefault();
$.ajax({
type: 'get',
url: url,
success: function ($resuld) {
var no = '';
if ($resuld.data.note == null || $resuld.data.note == 0)
{
no ='<button data-url="http://127.0.0.1/admin/product/like/'+$resuld.data.id+'" class="notlike"><i class="fa fa-star-o" aria-hidden="true"></i></button>';
}
else {
no ='<button data-url="http://127.0.0.1/admin/product/like/'+$resuld.data.id+'" class="like"><i class="fa fa-star" aria-hidden="true"></i></button>';
}
$('#note_'+$resuld.data.id).html(no);
}
})
})
// PRODUCT IMAGES
$(document).on('click', '.btn-del-image', function(e){
var url = $(this).attr('data-url');
var _this = $(this);
$('.del').click(function(){
$.ajax({
type: 'delete',
url: url,
success: function($resuld) {
toastr.success($resuld.success,'Thông báo',{timeOut: 5000});
_this.parent().remove();
},
})
})
})
}); | type: "PUT",
url: url,
data: {
'name': $('#name-edit').val(), |
config.rs | // SPDX-License-Identifier: Apache-2.0
use std::num::NonZeroU32;
use anyhow::{anyhow, Result};
use goblin::elf::program_header::{PF_R, PF_W, PF_X};
use sallyport::elf;
use sgx::page::{Class, Flags, SecInfo};
use sgx::parameters::{Masked, Parameters};
#[derive(Debug)]
pub struct Config {
pub parameters: Parameters,
pub ssap: NonZeroU32,
pub size: usize,
pub sallyport_block_size: u64,
}
impl super::super::Config for Config {
type Flags = (SecInfo, bool);
fn flags(flags: u32) -> Self::Flags {
let mut rwx = Flags::empty();
if flags & PF_R != 0 {
rwx |= Flags::READ;
}
if flags & PF_W != 0 {
rwx |= Flags::WRITE;
}
if flags & PF_X != 0 {
rwx |= Flags::EXECUTE;
// Debugging with gdb also involves modifying executable memory
if cfg!(feature = "gdb") {
rwx |= Flags::WRITE;
}
}
let m = flags & elf::pf::sgx::UNMEASURED == 0;
let si = match flags & elf::pf::sgx::TCS {
0 => Class::Regular.info(rwx),
_ => Class::Tcs.info(None),
};
(si, m)
}
fn new(shim: &super::super::Binary<'_>, _exec: &super::super::Binary<'_>) -> Result<Self> |
}
| {
unsafe {
let params: Parameters = Parameters {
misc: Masked {
data: shim
.note(elf::note::NAME, elf::note::sgx::MISC)
.ok_or_else(|| anyhow!("SGX shim is missing MISC"))?,
mask: shim
.note(elf::note::NAME, elf::note::sgx::MISCMASK)
.ok_or_else(|| anyhow!("SGX shim is missing MISCMASK"))?,
},
attr: Masked {
data: shim
.note(elf::note::NAME, elf::note::sgx::ATTR)
.ok_or_else(|| anyhow!("SGX shim is missing ATTR"))?,
mask: shim
.note(elf::note::NAME, elf::note::sgx::ATTRMASK)
.ok_or_else(|| anyhow!("SGX shim is missing ATTRMASK"))?,
},
pid: shim
.note(elf::note::NAME, elf::note::sgx::PID)
.ok_or_else(|| anyhow!("SGX shim is missing PID"))?,
svn: shim
.note(elf::note::NAME, elf::note::sgx::SVN)
.ok_or_else(|| anyhow!("SGX shim is missing SVN"))?,
};
let ssap: u8 = shim
.note(elf::note::NAME, elf::note::sgx::SSAP)
.ok_or_else(|| anyhow!("SGX shim is missing SSAP"))?;
let ssap =
NonZeroU32::new(ssap.into()).ok_or_else(|| anyhow!("SGX shim SSAP is invalid"))?;
let bits: u8 = shim
.note(elf::note::NAME, elf::note::sgx::BITS)
.ok_or_else(|| anyhow!("SGX shim is missing BITS"))?;
let sallyport_block_size: u64 = shim
.note(elf::note::NAME, elf::note::BLOCK_SIZE)
.ok_or_else(|| anyhow!("SGX shim is missing BLOCK_SIZE"))?;
Ok(Self {
parameters: params,
size: 1 << bits,
ssap,
sallyport_block_size,
})
}
} |
mysql.go | package storage
import (
"database/sql"
"fmt"
"log"
"math/rand"
"sort"
"go-std/src/templates"
_ "github.com/go-sql-driver/mysql" // postgresql import
)
// MySQL struct
type MySQL struct {
db *sql.DB
// prepare statements
selectStmt *sql.Stmt
updateStmt *sql.Stmt
fortuneStmt *sql.Stmt
}
// Connect create connection and ping db
func (mysql *MySQL) Connect(dbConnectionString string, maxConnectionsInPool int) error {
var err error
mysql.db, err = sql.Open("mysql", dbConnectionString)
if err != nil {
return err
}
err = mysql.db.Ping()
if err != nil {
return err
}
mysql.db.SetMaxOpenConns(maxConnectionsInPool)
mysql.db.SetMaxIdleConns(maxConnectionsInPool)
if mysql.selectStmt, err = mysql.mustPrepare(selectQueryStrMySQL); err != nil {
return err
}
if mysql.fortuneStmt, err = mysql.mustPrepare(fortuneQueryStrMySQL); err != nil {
return err
}
if mysql.updateStmt, err = mysql.mustPrepare(updateQueryStrMySQL); err != nil {
return err
}
return nil
}
// Close connect to db
func (mysql *MySQL) Close() {
mysql.db.Close()
}
// GetOneRandomWorld return one random World struct
func (mysql MySQL) GetOneRandomWorld(w *World) error {
var err error
queryID := rand.Intn(worldsCount) + 1
if err = mysql.selectStmt.QueryRow(queryID).Scan(&w.ID, &w.RandomNumber); err != nil {
err = fmt.Errorf("error scanning world row with ID %d: %s", queryID, err)
}
return err
}
// UpdateWorlds updates some number of worlds entries, passed as arg
func (mysql MySQL) UpdateWorlds(selectedWorlds []World) error {
// against deadlocks
sort.Slice(selectedWorlds, func(i, j int) bool {
return selectedWorlds[i].ID < selectedWorlds[j].ID
})
tx, err := mysql.db.Begin()
if err != nil {
return err
}
for _, selectedWorld := range selectedWorlds {
selectedWorld.RandomNumber = rand.Intn(worldsCount) + 1
if _, err := tx.Stmt(mysql.updateStmt).Exec(selectedWorld.RandomNumber, selectedWorld.ID); err != nil {
log.Printf("Can't update row ID %d with number %d: %s", selectedWorld.ID, selectedWorld.RandomNumber, err)
tx.Rollback()
}
}
if err := tx.Commit(); err != nil {
tx.Rollback()
return err
}
return nil
}
// GetFortunes selects all fortunes from table
func (mysql MySQL) GetFortunes() ([]templates.Fortune, error) {
rows, err := mysql.fortuneStmt.Query()
defer rows.Close()
if err != nil {
return nil, fmt.Errorf("can't query fortunes: %s", err)
}
fortunes := make([]templates.Fortune, 0, 16)
var fortune templates.Fortune
for rows.Next() {
if err = rows.Scan(&fortune.ID, &fortune.Message); err != nil {
log.Printf("Can't scan fortune: %s\n", err)
}
fortunes = append(fortunes, fortune)
}
return fortunes, nil
}
// GetFortunesPool selects all fortunes from table
func (mysql MySQL) GetFortunesPool() ([]templates.Fortune, error) {
rows, err := mysql.fortuneStmt.Query()
defer rows.Close()
if err != nil {
return nil, fmt.Errorf("can't query fortunes: %s", err)
}
fortunes := templates.FortunesPool.Get().([]templates.Fortune)
var fortune templates.Fortune
for rows.Next() {
if err = rows.Scan(&fortune.ID, &fortune.Message); err != nil {
log.Printf("Can't scan fortune: %s\n", err)
}
fortunes = append(fortunes, fortune)
}
return fortunes, nil
}
func (mysql MySQL) mustPrepare(query string) (*sql.Stmt, error) {
stmt, err := mysql.db.Prepare(query)
if err != nil {
log.Printf("Error when preparing statement %q: %s\n", query, err)
return nil, err
}
return stmt, nil
}
// NewMySQLDB creates new connection to postgres db with MySQL driver
func NewMySQLDB(dbConnectionString string, maxConnectionsInPool int) (DB, error) {
var mysql MySQL
if err := mysql.Connect(dbConnectionString, maxConnectionsInPool); err != nil {
return nil, err | }
// func dbInterpolateHandler(w http.ResponseWriter, r *http.Request) {
// var world World
// err := db.QueryRow(worldSelect, rand.Intn(worldRowCount)+1).Scan(&world.Id, &world.RandomNumber)
// if err != nil {
// log.Fatalf("Error scanning world row: %s", err.Error())
// }
// w.Header().Set("Server", "Go")
// w.Header().Set("Content-Type", "application/json")
// json.NewEncoder(w).Encode(&world)
// }
// func queriesInterpolateHandler(w http.ResponseWriter, r *http.Request) {
// n := getQueriesParam(r)
// world := make([]World, n)
// for i := 0; i < n; i++ {
// err := db.QueryRow(worldSelect, rand.Intn(worldRowCount)+1).Scan(&world[i].Id, &world[i].RandomNumber)
// if err != nil {
// log.Fatalf("Error scanning world row: %v", err)
// }
// }
// w.Header().Set("Server", "Go")
// w.Header().Set("Content-Type", "application/json")
// json.NewEncoder(w).Encode(world)
// }
// func fortuneInterpolateHandler(w http.ResponseWriter, r *http.Request) {
// rows, err := db.Query(fortuneSelect)
// if err != nil {
// log.Fatalf("Error preparing statement: %v", err)
// }
// fortunes := fetchFortunes(rows)
// fortunes = append(fortunes, &Fortune{Message: "Additional fortune added at request time."})
// sort.Sort(ByMessage{fortunes})
// w.Header().Set("Server", "Go")
// w.Header().Set("Content-Type", "text/html; charset=utf-8")
// if err := tmpl.Execute(w, fortunes); err != nil {
// http.Error(w, err.Error(), http.StatusInternalServerError)
// }
// }
// func updateInterpolateHandler(w http.ResponseWriter, r *http.Request) {
// n := getQueriesParam(r)
// world := make([]World, n)
// for i := 0; i < n; i++ {
// if err := db.QueryRow(worldSelect, rand.Intn(worldRowCount)+1).Scan(&world[i].Id, &world[i].RandomNumber); err != nil {
// log.Fatalf("Error scanning world row: %v", err)
// }
// world[i].RandomNumber = uint16(rand.Intn(worldRowCount) + 1)
// if _, err := db.Exec(worldUpdate, world[i].RandomNumber, world[i].Id); err != nil {
// log.Fatalf("Error updating world row: %v", err)
// }
// }
// w.Header().Set("Server", "Go")
// w.Header().Set("Content-Type", "application/json")
// encoder := json.NewEncoder(w)
// encoder.Encode(world)
// } | }
return &mysql, nil |
flags.go | package utils
import (
"io/ioutil"
"log"
"os"
"reflect"
"strings"
"github.com/urfave/cli/v2"
"github.com/fatih/structs"
"github.com/yudai/hcl"
"github.com/kost/tty2web/pkg/homedir"
)
func GenerateFlags(options ...interface{}) (flags []cli.Flag, mappings map[string]string, err error) {
mappings = make(map[string]string)
for _, struct_ := range options {
o := structs.New(struct_)
for _, field := range o.Fields() {
alias := []string{}
flagName := field.Tag("flagName")
if flagName == "" {
continue
}
envName := "TTY2WEB_" + strings.ToUpper(strings.Join(strings.Split(flagName, "-"), "_"))
mappings[flagName] = field.Name()
flagShortName := field.Tag("flagSName")
if flagShortName != "" {
alias = []string{flagShortName}
}
flagDescription := field.Tag("flagDescribe")
switch field.Kind() {
case reflect.String:
flags = append(flags, &cli.StringFlag{
Name: flagName,
Value: field.Value().(string),
Usage: flagDescription,
Aliases: alias,
EnvVars: []string{envName},
})
case reflect.Bool:
flags = append(flags, &cli.BoolFlag{
Name: flagName,
Usage: flagDescription,
Aliases: alias,
EnvVars: []string{envName},
})
case reflect.Int:
flags = append(flags, &cli.IntFlag{
Name: flagName,
Value: field.Value().(int),
Usage: flagDescription,
Aliases: alias,
EnvVars: []string{envName},
})
}
}
}
return
}
func ApplyFlags(
flags []cli.Flag,
mappingHint map[string]string,
c *cli.Context,
options ...interface{},
) {
objects := make([]*structs.Struct, len(options))
for i, struct_ := range options {
objects[i] = structs.New(struct_)
}
for flagName, fieldName := range mappingHint {
if !c.IsSet(flagName) |
var field *structs.Field
var ok bool
for _, o := range objects {
field, ok = o.FieldOk(fieldName)
if ok {
break
}
}
if field == nil {
continue
}
var val interface{}
switch field.Kind() {
case reflect.String:
val = c.String(flagName)
case reflect.Bool:
val = c.Bool(flagName)
case reflect.Int:
val = c.Int(flagName)
}
field.Set(val)
}
}
func ApplyConfigFile(filePath string, options ...interface{}) error {
filePath = homedir.Expand(filePath)
if _, err := os.Stat(filePath); os.IsNotExist(err) {
return err
}
fileString := []byte{}
log.Printf("Loading config file at: %s", filePath)
fileString, err := ioutil.ReadFile(filePath)
if err != nil {
return err
}
for _, object := range options {
if err := hcl.Decode(object, string(fileString)); err != nil {
return err
}
}
return nil
}
| {
continue
} |
select_same.py | # ===============================================================================
# Copyright 2018 ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
from pyface.message_dialog import warning
from traits.api import HasTraits, List
from traitsui.api import UItem, CheckListEditor
from pychron.core.helpers.traitsui_shortcuts import okcancel_view
class SelectSameMixin(HasTraits):
default_attr = None
def select_same_attr(self):
hs = self._get_selection_attrs()
if self.selected:
ev = SelectAttrView(available_attributes=hs)
ev.on_trait_change(self._handle_select_attributes, 'attributes')
ev.edit_traits()
else:
warning(None, 'You must select at least one run to use "Select Same"')
def _handle_select_attributes(self, attributes):
if attributes:
s = self.selected[0]
def test(v):
return all([getattr(v, k) == getattr(s, k) for k in attributes])
self._select_same(test)
def select_same(self):
self._select_same(self._default_test())
def _select_same(self, test):
self.selected = [si for si in self._get_records() if test(si)]
def _get_records(self):
|
def _get_selection_attrs(self):
raise NotImplementedError
def _default_test(self):
v = getattr(self.selected[0], self.default_attr)
def test(si):
return getattr(si, self.default_attr) == v
return test
class SelectAttrView(HasTraits):
attributes = List
available_attributes = List
def traits_view(self):
v = okcancel_view(UItem('attributes', style='custom',
editor=CheckListEditor(name='available_attributes',
cols=3)),
title='Select Attributes')
return v
# ============= EOF =============================================
| raise NotImplementedError |
BoolOption.py | #
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Options/BoolOption.py 4043 2009/02/23 09:06:45 scons"
__doc__ = """Place-holder for the old SCons.Options module hierarchy
This is for backwards compatibility. The new equivalent is the Variables/
class hierarchy. These will have deprecation warnings added (some day),
and will then be removed entirely (some day).
"""
import SCons.Variables
import SCons.Warnings
warned = False
def | (*args, **kw):
global warned
if not warned:
msg = "The BoolOption() function is deprecated; use the BoolVariable() function instead."
SCons.Warnings.warn(SCons.Warnings.DeprecatedOptionsWarning, msg)
warned = True
return apply(SCons.Variables.BoolVariable, args, kw)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| BoolOption |
15.2.3.8-2-b-1.js | /// Copyright (c) 2012 Ecma International. All rights reserved.
/**
* @path ch15/15.2/15.2.3/15.2.3.8/15.2.3.8-2-b-1.js
* @description Object.seal - the [[Configurable]] attribute of own data property of 'O' is set from true to false and other attributes of the property are unaltered
*/
function | () {
var obj = {};
Object.defineProperty(obj, "foo", {
value: 10,
writable: true,
enumerable: true,
configurable: true
});
var preCheck = Object.isExtensible(obj);
Object.seal(obj);
return preCheck && dataPropertyAttributesAreCorrect(obj, "foo", 10, true, true, false);
}
runTestCase(testcase);
| testcase |
error.go | package gocbcore
import (
"errors"
"io"
)
// dwError is a special error used for the purposes of rewrapping
// another error to provide more detailed information inherently
// with the error type itself. Mainly used for timeout.
type dwError struct {
InnerError error
Message string
}
func (e dwError) Error() string {
return e.Message
}
func (e dwError) Unwrap() error {
return e.InnerError
}
var (
// ErrNoSupportedMechanisms occurs when the server does not support any of the
// authentication methods that the client finds suitable.
ErrNoSupportedMechanisms = errors.New("no supported authentication mechanisms")
// ErrBadHosts occurs when the list of hosts specified cannot be contacted.
ErrBadHosts = errors.New("failed to connect to any of the specified hosts")
// ErrProtocol occurs when the server responds with unexpected or unparseable data.
ErrProtocol = errors.New("failed to parse server response")
// ErrNoReplicas occurs when no replicas respond in time
ErrNoReplicas = errors.New("no replicas responded in time")
// ErrCliInternalError indicates an internal error occurred within the client.
ErrCliInternalError = errors.New("client internal error")
// ErrInvalidCredentials is returned when an invalid set of credentials is provided for a service.
ErrInvalidCredentials = errors.New("an invalid set of credentials was provided")
// ErrInvalidServer occurs when an explicit, but invalid server index is specified.
ErrInvalidServer = errors.New("specific server index is invalid")
// ErrInvalidVBucket occurs when an explicit, but invalid vbucket index is specified.
ErrInvalidVBucket = errors.New("specific vbucket index is invalid")
// ErrInvalidReplica occurs when an explicit, but invalid replica index is specified.
ErrInvalidReplica = errors.New("specific server index is invalid")
// ErrInvalidService occurs when an explicit but invalid service type is specified
ErrInvalidService = errors.New("invalid service")
// ErrInvalidCertificate occurs when a certificate that is not useable is passed to an Agent.
ErrInvalidCertificate = errors.New("certificate is invalid")
// ErrCollectionsUnsupported occurs when collections are used but either server does not support them or the agent
// was created without them enabled.
ErrCollectionsUnsupported = errors.New("collections are not enabled")
// ErrBucketAlreadySelected occurs when SelectBucket is called when a bucket is already selected..
ErrBucketAlreadySelected = errors.New("bucket already selected")
// ErrShutdown occurs when operations are performed on a previously closed Agent.
ErrShutdown = errors.New("connection shut down")
// ErrOverload occurs when too many operations are dispatched and all queues are full.
ErrOverload = errors.New("queue overflowed")
// ErrSocketClosed occurs when a socket closes while an operation is in flight.
ErrSocketClosed = io.EOF
)
// Shared Error Definitions RFC#58@15
var (
// ErrTimeout occurs when an operation does not receive a response in a timely manner.
ErrTimeout = errors.New("operation has timed out")
ErrRequestCanceled = errors.New("request canceled")
ErrInvalidArgument = errors.New("invalid argument")
ErrServiceNotAvailable = errors.New("service not available")
ErrInternalServerFailure = errors.New("internal server failure")
ErrAuthenticationFailure = errors.New("authentication failure")
ErrTemporaryFailure = errors.New("temporary failure")
ErrParsingFailure = errors.New("parsing failure")
ErrCasMismatch = errors.New("cas mismatch")
ErrBucketNotFound = errors.New("bucket not found")
ErrCollectionNotFound = errors.New("collection not found")
ErrEncodingFailure = errors.New("encoding failure")
ErrDecodingFailure = errors.New("decoding failure")
ErrUnsupportedOperation = errors.New("unsupported operation")
ErrAmbiguousTimeout = &dwError{ErrTimeout, "ambiguous timeout"}
ErrUnambiguousTimeout = &dwError{ErrTimeout, "unambiguous timeout"}
// ErrFeatureNotAvailable occurs when an operation is performed on a bucket which does not support it.
ErrFeatureNotAvailable = errors.New("feature is not available")
ErrScopeNotFound = errors.New("scope not found")
ErrIndexNotFound = errors.New("index not found")
ErrIndexExists = errors.New("index exists")
)
// Key Value Error Definitions RFC#58@15
var (
ErrDocumentNotFound = errors.New("document not found")
ErrDocumentUnretrievable = errors.New("document unretrievable")
ErrDocumentLocked = errors.New("document locked")
ErrValueTooLarge = errors.New("value too large")
ErrDocumentExists = errors.New("document exists")
ErrValueNotJSON = errors.New("value not json")
ErrDurabilityLevelNotAvailable = errors.New("durability level not available")
ErrDurabilityImpossible = errors.New("durability impossible")
ErrDurabilityAmbiguous = errors.New("durability ambiguous")
ErrDurableWriteInProgress = errors.New("durable write in progress")
ErrDurableWriteReCommitInProgress = errors.New("durable write recommit in progress")
ErrMutationLost = errors.New("mutation lost")
ErrPathNotFound = errors.New("path not found")
ErrPathMismatch = errors.New("path mismatch")
ErrPathInvalid = errors.New("path invalid")
ErrPathTooBig = errors.New("path too big")
ErrPathTooDeep = errors.New("path too deep")
ErrValueTooDeep = errors.New("value too deep")
ErrValueInvalid = errors.New("value invalid")
ErrDocumentNotJSON = errors.New("document not json")
ErrNumberTooBig = errors.New("number too big")
ErrDeltaInvalid = errors.New("delta invalid")
ErrPathExists = errors.New("path exists")
ErrXattrUnknownMacro = errors.New("xattr unknown macro")
ErrXattrInvalidFlagCombo = errors.New("xattr invalid flag combination")
ErrXattrInvalidKeyCombo = errors.New("xattr invalid key combination")
ErrXattrUnknownVirtualAttribute = errors.New("xattr unknown virtual attribute")
ErrXattrCannotModifyVirtualAttribute = errors.New("xattr cannot modify virtual attribute")
ErrXattrInvalidOrder = errors.New("xattr invalid order")
)
// Query Error Definitions RFC#58@15
var (
ErrPlanningFailure = errors.New("planning failure")
ErrIndexFailure = errors.New("index failure")
ErrPreparedStatementFailure = errors.New("prepared statement failure")
)
// Analytics Error Definitions RFC#58@15
var (
ErrCompilationFailure = errors.New("compilation failure")
ErrJobQueueFull = errors.New("job queue full")
ErrDatasetNotFound = errors.New("dataset not found")
ErrDataverseNotFound = errors.New("dataverse not found")
ErrDatasetExists = errors.New("dataset exists")
ErrDataverseExists = errors.New("dataverse exists")
ErrLinkNotFound = errors.New("link not found")
)
// Search Error Definitions RFC#58@15
var ()
// View Error Definitions RFC#58@15
var (
ErrViewNotFound = errors.New("view not found")
ErrDesignDocumentNotFound = errors.New("design document not found")
)
// Management Error Definitions RFC#58@15
var (
ErrCollectionExists = errors.New("collection exists")
ErrScopeExists = errors.New("scope exists")
ErrUserNotFound = errors.New("user not found")
ErrGroupNotFound = errors.New("group not found") | ErrUserExists = errors.New("user exists")
ErrBucketNotFlushable = errors.New("bucket not flushable")
) | ErrBucketExists = errors.New("bucket exists") |
shard.go | /*
Copyright 2011 Google Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
Package shard registers the "shard" blobserver storage type,
predictably spraying out blobs out over the provided backends
based on their blobref. Each blob maps to exactly one backend.
Example low-level config:
"/foo/": {
"handler": "storage-shard",
"handlerArgs": {
"backends": ["/s1/", "/s2/"]
}
},
*/
package shard
import (
"errors"
"io"
"time"
"camlistore.org/pkg/blob"
"camlistore.org/pkg/blobserver"
"camlistore.org/pkg/jsonconfig"
)
type shardStorage struct {
*blobserver.SimpleBlobHubPartitionMap
shardPrefixes []string
shards []blobserver.Storage
}
func (sto *shardStorage) GetBlobHub() blobserver.BlobHub {
return sto.SimpleBlobHubPartitionMap.GetBlobHub()
}
func newFromConfig(ld blobserver.Loader, config jsonconfig.Obj) (storage blobserver.Storage, err error) {
sto := &shardStorage{
SimpleBlobHubPartitionMap: &blobserver.SimpleBlobHubPartitionMap{},
}
sto.shardPrefixes = config.RequiredList("backends")
if err := config.Validate(); err != nil |
if len(sto.shardPrefixes) == 0 {
return nil, errors.New("shard: need at least one shard")
}
sto.shards = make([]blobserver.Storage, len(sto.shardPrefixes))
for i, prefix := range sto.shardPrefixes {
shardSto, err := ld.GetStorage(prefix)
if err != nil {
return nil, err
}
sto.shards[i] = shardSto
}
return sto, nil
}
func (sto *shardStorage) shard(b blob.Ref) blobserver.Storage {
return sto.shards[int(sto.shardNum(b))]
}
func (sto *shardStorage) shardNum(b blob.Ref) uint32 {
return b.Sum32() % uint32(len(sto.shards))
}
func (sto *shardStorage) FetchStreaming(b blob.Ref) (file io.ReadCloser, size int64, err error) {
return sto.shard(b).FetchStreaming(b)
}
func (sto *shardStorage) ReceiveBlob(b blob.Ref, source io.Reader) (sb blob.SizedRef, err error) {
sb, err = sto.shard(b).ReceiveBlob(b, source)
if err == nil {
hub := sto.GetBlobHub()
hub.NotifyBlobReceived(b)
}
return
}
func (sto *shardStorage) batchedShards(blobs []blob.Ref, fn func(blobserver.Storage, []blob.Ref) error) error {
m := make(map[uint32][]blob.Ref)
for _, b := range blobs {
sn := sto.shardNum(b)
m[sn] = append(m[sn], b)
}
ch := make(chan error, len(m))
for sn := range m {
sblobs := m[sn]
s := sto.shards[sn]
go func() {
ch <- fn(s, sblobs)
}()
}
var reterr error
for _ = range m {
if err := <-ch; err != nil {
reterr = err
}
}
return reterr
}
func (sto *shardStorage) RemoveBlobs(blobs []blob.Ref) error {
return sto.batchedShards(blobs, func(s blobserver.Storage, blobs []blob.Ref) error {
return s.RemoveBlobs(blobs)
})
}
func (sto *shardStorage) StatBlobs(dest chan<- blob.SizedRef, blobs []blob.Ref, wait time.Duration) error {
return sto.batchedShards(blobs, func(s blobserver.Storage, blobs []blob.Ref) error {
return s.StatBlobs(dest, blobs, wait)
})
}
func (sto *shardStorage) EnumerateBlobs(dest chan<- blob.SizedRef, after string, limit int, wait time.Duration) error {
return blobserver.MergedEnumerate(dest, sto.shards, after, limit, wait)
}
func init() {
blobserver.RegisterStorageConstructor("shard", blobserver.StorageConstructor(newFromConfig))
}
| {
return nil, err
} |
load_and_process.py | import pandas as pd
import cv2
import numpy as np
dataset_path = 'fer2013/fer2013/fer2013.csv'
image_size=(48,48)
def load_fer2013():
data = pd.read_csv(dataset_path)
pixels = data['pixels'].tolist()
width, height = 48, 48
faces = []
for pixel_sequence in pixels: | face = [int(pixel) for pixel in pixel_sequence.split(' ')]
face = np.asarray(face).reshape(width, height)
face = cv2.resize(face.astype('uint8'),image_size)
faces.append(face.astype('float32'))
faces = np.asarray(faces)
faces = np.expand_dims(faces, -1)
emotions = pd.get_dummies(data['emotion']).as_matrix()
return faces, emotions
def preprocess_input(x, v2=True):
x = x.astype('float32')
x = x / 255.0
if v2:
x = x - 0.5
x = x * 2.0
return x | |
network.go | // Copyright 2018 ETH Zurich
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package snetproxy
import (
"net"
"time"
"github.com/scionproto/scion/go/lib/addr"
"github.com/scionproto/scion/go/lib/snet"
)
var _ snet.Network = (*ProxyNetwork)(nil)
// ProxyNetwork is a wrapper network that creates conns with transparent
// reconnection capabilities. Connections created by ProxyNetwork also validate
// that dispatcher registrations do not change addresses.
//
// Callers interested in providing their own reconnection callbacks and
// validating the new connection themselves should use the proxy connection
// constructors directly.
type ProxyNetwork struct {
network snet.Network
}
// NewProxyNetwork adds transparent reconnection capabilities to the
// connections created by an snet network.
func | (network snet.Network) *ProxyNetwork {
return &ProxyNetwork{network: network}
}
func (pn *ProxyNetwork) DialSCIONWithBindSVC(network string,
laddr, raddr, baddr *snet.Addr, svc addr.HostSVC, timeout time.Duration) (snet.Conn, error) {
dialer := pn.newReconnecterFromDialArgs(network, laddr, raddr, baddr, svc)
conn, err := dialer.Reconnect(timeout)
if err != nil {
return nil, err
}
reconnecter := pn.newReconnecterFromDialArgs(
network,
toSnetAddr(conn.LocalAddr()),
toSnetAddr(conn.RemoteAddr()),
toSnetAddr(conn.BindAddr()),
conn.SVC(),
)
return NewProxyConn(conn, reconnecter), nil
}
func (pn *ProxyNetwork) newReconnecterFromDialArgs(network string, laddr, raddr, baddr *snet.Addr,
svc addr.HostSVC) *TickingReconnecter {
f := func(timeout time.Duration) (snet.Conn, error) {
return pn.network.DialSCIONWithBindSVC(network, laddr, raddr, baddr, svc, timeout)
}
return NewTickingReconnecter(f)
}
func (pn *ProxyNetwork) ListenSCIONWithBindSVC(network string,
laddr, baddr *snet.Addr, svc addr.HostSVC, timeout time.Duration) (snet.Conn, error) {
listener := pn.newReconnecterFromListenArgs(network, laddr, baddr, svc)
conn, err := listener.Reconnect(timeout)
if err != nil {
return nil, err
}
reconnecter := pn.newReconnecterFromListenArgs(
network,
toSnetAddr(conn.LocalAddr()),
toSnetAddr(conn.BindAddr()),
conn.SVC(),
)
return NewProxyConn(conn, reconnecter), nil
}
func (pn *ProxyNetwork) newReconnecterFromListenArgs(network string,
laddr, baddr *snet.Addr, svc addr.HostSVC) *TickingReconnecter {
f := func(timeout time.Duration) (snet.Conn, error) {
return pn.network.ListenSCIONWithBindSVC(network, laddr, baddr, svc, timeout)
}
return NewTickingReconnecter(f)
}
func toSnetAddr(address net.Addr) *snet.Addr {
if address == nil {
return nil
}
return address.(*snet.Addr)
}
| NewProxyNetwork |
grobid_xml_enhancer.py | import logging
from io import BytesIO
from lxml import etree
from lxml.builder import E
from sciencebeam_gym.inference_model.extract_to_xml import (
XmlPaths,
create_node_recursive,
rsplit_xml_path
)
from .grobid_service import (
grobid_service,
GrobidApiPaths
)
TEI_NS = 'http://www.tei-c.org/ns/1.0'
TEI_NS_PREFIX = '{%s}' % TEI_NS
TEI_PERS_NAME = TEI_NS_PREFIX + 'persName'
TEI_FORNAME = TEI_NS_PREFIX + 'forename'
TEI_SURNAME = TEI_NS_PREFIX + 'surname'
JATS_SURNAME = 'surname'
JATS_GIVEN_NAMES = 'given-names'
JATS_ADDR_LINE = 'addr-line'
JATS_NAMED_CONTENT = 'named-content'
JATS_INSTITUTION = 'institution'
def get_logger():
return logging.getLogger(__name__)
def create_or_append(xml_root, path):
parent_path, tag_name = rsplit_xml_path(path)
parent_node = create_node_recursive(xml_root, parent_path, exists_ok=True)
node = E(tag_name) # pylint: disable=not-callable
parent_node.append(node)
return node
class | (object):
def __init__(self, grobid_url, start_service):
self.process_header_names = grobid_service(
grobid_url,
GrobidApiPaths.PROCESS_HEADER_NAMES,
start_service=start_service,
field_name='names'
)
self.process_affiliations = grobid_service(
grobid_url,
GrobidApiPaths.PROCESS_AFFILIATIONS,
start_service=start_service,
field_name='affiliations'
)
def process_and_replace_authors(self, xml_root):
author_nodes = list(xml_root.findall(XmlPaths.AUTHOR))
if author_nodes:
authors = '\n'.join(x.text for x in author_nodes)
get_logger().debug('authors: %s', authors)
grobid_response = self.process_header_names(authors)
get_logger().debug('grobid_response: %s', grobid_response)
response_xml_root = etree.fromstring('<dummy>%s</dummy>' % grobid_response)
for author in author_nodes:
author.getparent().remove(author)
for pers_name in response_xml_root.findall(TEI_PERS_NAME):
get_logger().debug('pers_name: %s', pers_name)
node = create_or_append(xml_root, XmlPaths.AUTHOR)
for surname in pers_name.findall(TEI_SURNAME):
node.append(E(JATS_SURNAME, surname.text)) # pylint: disable=not-callable
forenames = [x.text for x in pers_name.findall(TEI_FORNAME)]
if forenames:
node.append(
E(JATS_GIVEN_NAMES, ' '.join(forenames)) # pylint: disable=not-callable
)
return xml_root
def process_and_replace_affiliations(self, xml_root):
aff_nodes = list(xml_root.findall(XmlPaths.AUTHOR_AFF))
if aff_nodes:
affiliations = '\n'.join(x.text for x in aff_nodes)
get_logger().debug('affiliations: %s', affiliations)
grobid_response = self.process_affiliations(affiliations)
get_logger().debug('grobid_response: %s', grobid_response)
response_xml_root = etree.fromstring('<dummy>%s</dummy>' % grobid_response)
for aff in aff_nodes:
aff.getparent().remove(aff)
for affiliation in response_xml_root.findall('affiliation'):
get_logger().debug('affiliation: %s', affiliation)
node = create_or_append(xml_root, XmlPaths.AUTHOR_AFF)
for department in affiliation.xpath('./orgName[@type="department"]'):
node.append(E( # pylint: disable=not-callable
JATS_ADDR_LINE,
E( # pylint: disable=not-callable
JATS_NAMED_CONTENT,
department.text,
{
'content-type': 'department'
}
)
))
for institution in affiliation.xpath('./orgName[@type="institution"]'):
node.append(E( # pylint: disable=not-callable
JATS_INSTITUTION,
institution.text
))
def __call__(self, extracted_xml):
xml_root = etree.parse(BytesIO(extracted_xml))
self.process_and_replace_authors(xml_root)
self.process_and_replace_affiliations(xml_root)
return etree.tostring(xml_root, pretty_print=True)
| GrobidXmlEnhancer |
adapter.go | // Copyright 2017 The casbin Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package gormadapter
import (
"context"
"errors"
"fmt"
"runtime"
"strings"
"github.com/casbin/casbin/v2/model"
"github.com/casbin/casbin/v2/persist"
"gorm.io/driver/mysql"
"gorm.io/gorm"
)
const (
defaultDatabaseName = "casbin"
defaultTableName = "sys_casbin_rule"
)
type customTableKey struct{}
type CasbinRule struct {
PType string `gorm:"size:100"`
V0 string `gorm:"size:100"`
V1 string `gorm:"size:100"`
V2 string `gorm:"size:100"`
V3 string `gorm:"size:100"`
V4 string `gorm:"size:100"`
V5 string `gorm:"size:100"`
}
func (CasbinRule) TableName() string {
return "sys_casbin_rule"
}
type Filter struct {
PType []string
V0 []string
V1 []string
V2 []string
V3 []string
V4 []string
V5 []string
}
// Adapter represents the Gorm adapter for policy storage.
type Adapter struct {
driverName string
dataSourceName string
databaseName string
tablePrefix string
tableName string
dbSpecified bool
db *gorm.DB
isFiltered bool
}
// finalizer is the destructor for Adapter.
func finalizer(a *Adapter) {
sqlDB, err := a.db.DB()
if err != nil {
panic(err)
}
err = sqlDB.Close()
if err != nil {
panic(err)
}
}
// NewAdapter is the constructor for Adapter.
// Params : databaseName,tableName,dbSpecified
// databaseName,{tableName/dbSpecified}
// {database/dbSpecified}
// databaseName and tableName are user defined.
// Their default value are "casbin" and "casbin_rule"
//
// dbSpecified is an optional bool parameter. The default value is false.
// It's up to whether you have specified an existing DB in dataSourceName.
// If dbSpecified == true, you need to make sure the DB in dataSourceName exists.
// If dbSpecified == false, the adapter will automatically create a DB named databaseName.
func | (driverName string, dataSourceName string, params ...interface{}) (*Adapter, error) {
a := &Adapter{}
a.driverName = driverName
a.dataSourceName = dataSourceName
a.tableName = defaultTableName
a.databaseName = defaultDatabaseName
a.dbSpecified = false
if len(params) == 0 {
} else if len(params) == 1 {
switch p1 := params[0].(type) {
case bool:
a.dbSpecified = p1
case string:
a.databaseName = p1
default:
return nil, errors.New("wrong format")
}
} else if len(params) == 2 {
switch p2 := params[1].(type) {
case bool:
a.dbSpecified = p2
p1, ok := params[0].(string)
if !ok {
return nil, errors.New("wrong format")
}
a.databaseName = p1
case string:
p1, ok := params[0].(string)
if !ok {
return nil, errors.New("wrong format")
}
a.databaseName = p1
a.tableName = p2
default:
return nil, errors.New("wrong format")
}
} else if len(params) == 3 {
if p3, ok := params[2].(bool); ok {
a.dbSpecified = p3
a.databaseName = params[0].(string)
a.tableName = params[1].(string)
} else {
return nil, errors.New("wrong format")
}
} else {
return nil, errors.New("too many parameters")
}
// Open the DB, create it if not existed.
err := a.open()
if err != nil {
return nil, err
}
// Call the destructor when the object is released.
runtime.SetFinalizer(a, finalizer)
return a, nil
}
// NewAdapterByDBUseTableName creates gorm-adapter by an existing Gorm instance and the specified table prefix and table name
// Example: gormadapter.NewAdapterByDBUseTableName(&db, "cms", "casbin") Automatically generate table name like this "cms_casbin"
func NewAdapterByDBUseTableName(db *gorm.DB, prefix string, tableName string) (*Adapter, error) {
if len(tableName) == 0 {
tableName = defaultTableName
}
a := &Adapter{
tablePrefix: prefix,
tableName: tableName,
}
a.db = db.Scopes(a.casbinRuleTable()).Session(&gorm.Session{Context: db.Statement.Context})
err := a.createTable()
if err != nil {
return nil, err
}
return a, nil
}
// NewFilteredAdapter is the constructor for FilteredAdapter.
// Casbin will not automatically call LoadPolicy() for a filtered adapter.
func NewFilteredAdapter(driverName string, dataSourceName string, params ...interface{}) (*Adapter, error) {
adapter, err := NewAdapter(driverName, dataSourceName, params...)
if err != nil {
return nil, err
}
adapter.isFiltered = true
return adapter, err
}
// NewAdapterByDB creates gorm-adapter by an existing Gorm instance
func NewAdapterByDB(db *gorm.DB) (*Adapter, error) {
return NewAdapterByDBUseTableName(db, "", defaultTableName)
}
func NewAdapterByDBWithCustomTable(db *gorm.DB, t interface{}) (*Adapter, error) {
ctx := db.Statement.Context
if ctx == nil {
ctx = context.Background()
}
ctx = context.WithValue(ctx, customTableKey{}, t)
return NewAdapterByDBUseTableName(db.WithContext(ctx), "", defaultTableName)
}
func openDBConnection(driverName, dataSourceName string) (*gorm.DB, error) {
var err error
var db *gorm.DB
if driverName == "mysql" {
db, err = gorm.Open(mysql.Open(dataSourceName), &gorm.Config{})
} else {
return nil, errors.New("database dialect is not supported")
}
if err != nil {
return nil, err
}
return db, err
}
func (a *Adapter) createDatabase() error {
var err error
db, err := openDBConnection(a.driverName, a.dataSourceName)
if err != nil {
return err
}
err = db.Exec("CREATE DATABASE IF NOT EXISTS " + a.databaseName).Error
if err != nil {
return err
}
return nil
}
func (a *Adapter) open() error {
var err error
var db *gorm.DB
if a.dbSpecified {
db, err = openDBConnection(a.driverName, a.dataSourceName)
if err != nil {
return err
}
} else {
if err = a.createDatabase(); err != nil {
return err
}
db, err = openDBConnection(a.driverName, a.dataSourceName+a.databaseName)
if err != nil {
return err
}
}
a.db = db.Scopes(a.casbinRuleTable()).Session(&gorm.Session{})
return a.createTable()
}
func (a *Adapter) close() error {
a.db = nil
return nil
}
// getTableInstance return the dynamic table name
func (a *Adapter) getTableInstance() *CasbinRule {
return &CasbinRule{}
}
func (a *Adapter) getFullTableName() string {
if a.tablePrefix != "" {
return a.tablePrefix + "_" + a.tableName
}
return a.tableName
}
func (a *Adapter) casbinRuleTable() func(db *gorm.DB) *gorm.DB {
return func(db *gorm.DB) *gorm.DB {
tableName := a.getFullTableName()
return db.Table(tableName)
}
}
func (a *Adapter) createTable() error {
t := a.db.Statement.Context.Value(customTableKey{})
if t == nil {
t = a.getTableInstance()
}
if err := a.db.AutoMigrate(t); err != nil {
return err
}
tableName := a.getFullTableName()
index := "idx_" + tableName
hasIndex := a.db.Migrator().HasIndex(t, index)
if !hasIndex {
if err := a.db.Exec(fmt.Sprintf("CREATE UNIQUE INDEX %s ON %s (p_type,v0,v1,v2,v3,v4,v5)", index, tableName)).Error; err != nil {
return err
}
}
return nil
}
func (a *Adapter) dropTable() error {
t := a.db.Statement.Context.Value(customTableKey{})
if t == nil {
return a.db.Migrator().DropTable(a.getTableInstance())
}
return a.db.Migrator().DropTable(t)
}
func loadPolicyLine(line CasbinRule, model model.Model) {
var p = []string{line.PType,
line.V0, line.V1, line.V2, line.V3, line.V4, line.V5}
var lineText string
if line.V5 != "" {
lineText = strings.Join(p, ", ")
} else if line.V4 != "" {
lineText = strings.Join(p[:6], ", ")
} else if line.V3 != "" {
lineText = strings.Join(p[:5], ", ")
} else if line.V2 != "" {
lineText = strings.Join(p[:4], ", ")
} else if line.V1 != "" {
lineText = strings.Join(p[:3], ", ")
} else if line.V0 != "" {
lineText = strings.Join(p[:2], ", ")
}
persist.LoadPolicyLine(lineText, model)
}
// LoadPolicy loads policy from database.
func (a *Adapter) LoadPolicy(model model.Model) error {
var lines []CasbinRule
if err := a.db.Find(&lines).Error; err != nil {
return err
}
for _, line := range lines {
loadPolicyLine(line, model)
}
return nil
}
// LoadFilteredPolicy loads only policy rules that match the filter.
func (a *Adapter) LoadFilteredPolicy(model model.Model, filter interface{}) error {
var lines []CasbinRule
filterValue, ok := filter.(Filter)
if !ok {
return errors.New("invalid filter type")
}
if err := a.db.Scopes(a.filterQuery(a.db, filterValue)).Find(&lines).Error; err != nil {
return err
}
for _, line := range lines {
loadPolicyLine(line, model)
}
a.isFiltered = true
return nil
}
// IsFiltered returns true if the loaded policy has been filtered.
func (a *Adapter) IsFiltered() bool {
return a.isFiltered
}
// filterQuery builds the gorm query to match the rule filter to use within a scope.
func (a *Adapter) filterQuery(db *gorm.DB, filter Filter) func(db *gorm.DB) *gorm.DB {
return func(db *gorm.DB) *gorm.DB {
if len(filter.PType) > 0 {
db = db.Where("p_type in (?)", filter.PType)
}
if len(filter.V0) > 0 {
db = db.Where("v0 in (?)", filter.V0)
}
if len(filter.V1) > 0 {
db = db.Where("v1 in (?)", filter.V1)
}
if len(filter.V2) > 0 {
db = db.Where("v2 in (?)", filter.V2)
}
if len(filter.V3) > 0 {
db = db.Where("v3 in (?)", filter.V3)
}
if len(filter.V4) > 0 {
db = db.Where("v4 in (?)", filter.V4)
}
if len(filter.V5) > 0 {
db = db.Where("v5 in (?)", filter.V5)
}
return db
}
}
func (a *Adapter) savePolicyLine(ptype string, rule []string) CasbinRule {
line := a.getTableInstance()
line.PType = ptype
if len(rule) > 0 {
line.V0 = rule[0]
}
if len(rule) > 1 {
line.V1 = rule[1]
}
if len(rule) > 2 {
line.V2 = rule[2]
}
if len(rule) > 3 {
line.V3 = rule[3]
}
if len(rule) > 4 {
line.V4 = rule[4]
}
if len(rule) > 5 {
line.V5 = rule[5]
}
return *line
}
// SavePolicy saves policy to database.
func (a *Adapter) SavePolicy(model model.Model) error {
err := a.dropTable()
if err != nil {
return err
}
err = a.createTable()
if err != nil {
return err
}
for ptype, ast := range model["p"] {
for _, rule := range ast.Policy {
line := a.savePolicyLine(ptype, rule)
err := a.db.Create(&line).Error
if err != nil {
return err
}
}
}
for ptype, ast := range model["g"] {
for _, rule := range ast.Policy {
line := a.savePolicyLine(ptype, rule)
err := a.db.Create(&line).Error
if err != nil {
return err
}
}
}
return nil
}
// AddPolicy adds a policy rule to the storage.
func (a *Adapter) AddPolicy(sec string, ptype string, rule []string) error {
line := a.savePolicyLine(ptype, rule)
err := a.db.Create(&line).Error
return err
}
// RemovePolicy removes a policy rule from the storage.
func (a *Adapter) RemovePolicy(sec string, ptype string, rule []string) error {
line := a.savePolicyLine(ptype, rule)
err := a.rawDelete(a.db, line) //can't use db.Delete as we're not using primary key http://jinzhu.me/gorm/crud.html#delete
return err
}
// AddPolicies adds multiple policy rules to the storage.
func (a *Adapter) AddPolicies(sec string, ptype string, rules [][]string) error {
return a.db.Transaction(func(tx *gorm.DB) error {
for _, rule := range rules {
line := a.savePolicyLine(ptype, rule)
if err := tx.Create(&line).Error; err != nil {
return err
}
}
return nil
})
}
// RemovePolicies removes multiple policy rules from the storage.
func (a *Adapter) RemovePolicies(sec string, ptype string, rules [][]string) error {
return a.db.Transaction(func(tx *gorm.DB) error {
for _, rule := range rules {
line := a.savePolicyLine(ptype, rule)
if err := a.rawDelete(tx, line); err != nil { //can't use db.Delete as we're not using primary key http://jinzhu.me/gorm/crud.html#delete
return err
}
}
return nil
})
}
// RemoveFilteredPolicy removes policy rules that match the filter from the storage.
func (a *Adapter) RemoveFilteredPolicy(sec string, ptype string, fieldIndex int, fieldValues ...string) error {
line := a.getTableInstance()
line.PType = ptype
if fieldIndex <= 0 && 0 < fieldIndex+len(fieldValues) {
line.V0 = fieldValues[0-fieldIndex]
}
if fieldIndex <= 1 && 1 < fieldIndex+len(fieldValues) {
line.V1 = fieldValues[1-fieldIndex]
}
if fieldIndex <= 2 && 2 < fieldIndex+len(fieldValues) {
line.V2 = fieldValues[2-fieldIndex]
}
if fieldIndex <= 3 && 3 < fieldIndex+len(fieldValues) {
line.V3 = fieldValues[3-fieldIndex]
}
if fieldIndex <= 4 && 4 < fieldIndex+len(fieldValues) {
line.V4 = fieldValues[4-fieldIndex]
}
if fieldIndex <= 5 && 5 < fieldIndex+len(fieldValues) {
line.V5 = fieldValues[5-fieldIndex]
}
err := a.rawDelete(a.db, *line)
return err
}
func (a *Adapter) rawDelete(db *gorm.DB, line CasbinRule) error {
queryArgs := []interface{}{line.PType}
queryStr := "p_type = ?"
if line.V0 != "" {
queryStr += " and v0 = ?"
queryArgs = append(queryArgs, line.V0)
}
if line.V1 != "" {
queryStr += " and v1 = ?"
queryArgs = append(queryArgs, line.V1)
}
if line.V2 != "" {
queryStr += " and v2 = ?"
queryArgs = append(queryArgs, line.V2)
}
if line.V3 != "" {
queryStr += " and v3 = ?"
queryArgs = append(queryArgs, line.V3)
}
if line.V4 != "" {
queryStr += " and v4 = ?"
queryArgs = append(queryArgs, line.V4)
}
if line.V5 != "" {
queryStr += " and v5 = ?"
queryArgs = append(queryArgs, line.V5)
}
args := append([]interface{}{queryStr}, queryArgs...)
err := db.Delete(a.getTableInstance(), args...).Error
return err
}
func appendWhere(line CasbinRule) (string, []interface{}) {
queryArgs := []interface{}{line.PType}
queryStr := "p_type = ?"
if line.V0 != "" {
queryStr += " and v0 = ?"
queryArgs = append(queryArgs, line.V0)
}
if line.V1 != "" {
queryStr += " and v1 = ?"
queryArgs = append(queryArgs, line.V1)
}
if line.V2 != "" {
queryStr += " and v2 = ?"
queryArgs = append(queryArgs, line.V2)
}
if line.V3 != "" {
queryStr += " and v3 = ?"
queryArgs = append(queryArgs, line.V3)
}
if line.V4 != "" {
queryStr += " and v4 = ?"
queryArgs = append(queryArgs, line.V4)
}
if line.V5 != "" {
queryStr += " and v5 = ?"
queryArgs = append(queryArgs, line.V5)
}
return queryStr, queryArgs
}
// UpdatePolicy updates a new policy rule to DB.
func (a *Adapter) UpdatePolicy(sec string, ptype string, oldRule, newPolicy []string) error {
oldLine := a.savePolicyLine(ptype, oldRule)
queryStr, queryArgs := appendWhere(oldLine)
newLine := a.savePolicyLine(ptype, newPolicy)
err := a.db.Where(queryStr, queryArgs...).Updates(newLine).Error
return err
}
| NewAdapter |
preprocess.py | import argparse
import json
import os
import _jsonnet
import tqdm
from seq2struct import datasets
from seq2struct import models
from seq2struct.utils import registry
from seq2struct.utils import vocab
class Preprocessor:
def __init__(self, config):
self.config = config
self.model_preproc = registry.instantiate(
registry.lookup('model', config['model']).Preproc,
config['model'])
def preprocess(self):
self.model_preproc.clear_items()
for section in self.config['data']:
# if section=="train":
# continue | if to_add:
self.model_preproc.add_item(item, section, validation_info)
else:
print("======== Error parsing: {}".format(" ".join(item.text)))
self.model_preproc.save()
def add_parser():
parser = argparse.ArgumentParser()
parser.add_argument('--config', required=True)
parser.add_argument('--config-args')
args = parser.parse_args()
return args
def main(args):
if args.config_args:
config = json.loads(_jsonnet.evaluate_file(args.config, tla_codes={'args': args.config_args}))
else:
config = json.loads(_jsonnet.evaluate_file(args.config))
preprocessor = Preprocessor(config)
preprocessor.preprocess()
if __name__ == '__main__':
args = add_parser()
main(args) | data = registry.construct('dataset', self.config['data'][section])
for item in tqdm.tqdm(data, desc=section, dynamic_ncols=True):
if True:
to_add, validation_info = self.model_preproc.validate_item(item, section) |
expect_column_values_to_not_match_like_pattern_list.py | from typing import Optional
from great_expectations.core.expectation_configuration import ExpectationConfiguration
from great_expectations.expectations.expectation import (
ColumnMapExpectation,
InvalidExpectationConfigurationError,
)
from great_expectations.expectations.util import render_evaluation_parameter_string
from great_expectations.render.renderer.renderer import renderer
from great_expectations.render.util import substitute_none_for_missing
class ExpectColumnValuesToNotMatchLikePatternList(ColumnMapExpectation):
"""Expect column entries to be strings that do NOT match any of a provided list of like patterns expressions.
expect_column_values_to_not_match_like_pattern_list is a \
:func:`column_map_expectation <great_expectations.execution_engine.execution_engine.MetaExecutionEngine
.column_map_expectation>`.
Args:
column (str): \
The column name.
like_pattern_list (List[str]): \ | Return `"success": True` if at least mostly fraction of values match the expectation. \
For more detail, see :ref:`mostly`.
Other Parameters:
result_format (str or None): \
Which output mode to use: `BOOLEAN_ONLY`, `BASIC`, `COMPLETE`, or `SUMMARY`.
For more detail, see :ref:`result_format <result_format>`.
include_config (boolean): \
If True, then include the expectation config as part of the result object. \
For more detail, see :ref:`include_config`.
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see :ref:`catch_exceptions`.
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see :ref:`meta`.
Returns:
An ExpectationSuiteValidationResult
Exact fields vary depending on the values passed to :ref:`result_format <result_format>` and
:ref:`include_config`, :ref:`catch_exceptions`, and :ref:`meta`.
See Also:
:func:`expect_column_values_to_match_regex \
<great_expectations.execution_engine.execution_engine.ExecutionEngine.expect_column_values_to_match_regex>`
:func:`expect_column_values_to_match_regex_list \
<great_expectations.execution_engine.execution_engine.ExecutionEngine
.expect_column_values_to_match_regex_list>`
"""
library_metadata = {
"maturity": "production",
"tags": ["core expectation", "column map expectation"],
"contributors": [
"@great_expectations",
],
"requirements": [],
"has_full_test_suite": True,
"manually_reviewed_code": True,
}
map_metric = "column_values.not_match_like_pattern_list"
success_keys = (
"like_pattern_list",
"mostly",
)
default_kwarg_values = {
"like_pattern_list": None,
"row_condition": None,
"condition_parser": None, # we expect this to be explicitly set whenever a row_condition is passed
"mostly": 1,
"result_format": "BASIC",
"include_config": True,
"catch_exceptions": True,
}
args_keys = (
"column",
"like_pattern_list",
)
def validate_configuration(
self, configuration: Optional[ExpectationConfiguration]
) -> bool:
super().validate_configuration(configuration)
try:
assert (
"like_pattern_list" in configuration.kwargs
), "Must provide like_pattern_list"
assert isinstance(
configuration.kwargs.get("like_pattern_list"), (list, dict)
), "like_pattern_list must be a list"
assert isinstance(configuration.kwargs.get("like_pattern_list"), dict) or (
len(configuration.kwargs.get("like_pattern_list")) > 0
), "At least one like_pattern must be supplied in the like_pattern_list."
if isinstance(configuration.kwargs.get("like_pattern_list"), dict):
assert "$PARAMETER" in configuration.kwargs.get(
"like_pattern_list"
), 'Evaluation Parameter dict for like_pattern_list kwarg must have "$PARAMETER" key.'
except AssertionError as e:
raise InvalidExpectationConfigurationError(str(e))
return True
@classmethod
@renderer(renderer_type="renderer.prescriptive")
@render_evaluation_parameter_string
def _prescriptive_renderer(
cls,
configuration=None,
result=None,
language=None,
runtime_configuration=None,
**kwargs
):
runtime_configuration = runtime_configuration or {}
include_column_name = runtime_configuration.get("include_column_name", True)
include_column_name = (
include_column_name if include_column_name is not None else True
)
styling = runtime_configuration.get("styling")
params = substitute_none_for_missing(
configuration.kwargs,
["column", "mostly", "row_condition", "condition_parser"],
) | The list of like pattern expressions the column entries should NOT match.
Keyword Args:
mostly (None or a float between 0 and 1): \ |
value.rs | use rusoto_core::RusotoError;
use rusoto_secretsmanager::{
SecretsManager,
GetSecretValueRequest,
GetSecretValueResponse,
GetSecretValueError,
PutSecretValueRequest,
PutSecretValueResponse,
PutSecretValueError,
};
use serde::{Serialize, Deserialize};
use serde_json::Value;
use std::time::Duration;
use std::collections::HashMap;
use std::fmt;
use crate::{
RotatorError,
RotatorResult,
SM_CLIENT,
};
#[derive(Clone, Default, Serialize, Deserialize)]
pub struct Secret {
pub username: Option<String>,
pub password: Option<String>,
// for service specific credentials:
#[serde(skip_serializing_if = "Option::is_none")]
pub service_specific_credential_id: Option<String>,
// capture unknown fields for future proofing and interoperability
#[serde(flatten)]
pub attributes: HashMap<String, Value>,
}
impl fmt::Debug for Secret {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let mut s = self.clone();
if s.password.is_some() {
s.password = Some("******".to_string());
}
write!(f, "{}", serde_json::to_string(&s).map_err(|err| {
error!("error displaying secret details: {:?}", err);
fmt::Error
})?)
}
}
#[derive(Clone)]
pub struct SecretValue {
pub secret: Option<Secret>,
}
pub fn | (secret_id: &str, version_stage: Option<&str>, version_id: Option<&str>, timeout: Duration) -> RotatorResult<SecretValue> {
info!("fetching secret for secret_id={} version_stage={:?} version_id={:?}", secret_id, version_stage, version_id);
let value = get_secret_value_string(secret_id, version_stage, version_id, timeout)?;
let secret = if let Some(string_value) = value.secret_string {
serde_json::from_str(&string_value)
.map_err(|err| RotatorError::SerialisationError {
secret_id: secret_id.to_string(),
message: format!("secret string value deserialisation error: {:?}", err),
})?
} else {
None
};
info!("found secret {:?}", secret);
Ok(SecretValue {
secret: secret,
})
}
pub fn get_secret_value_string(secret_id: &str, version_stage: Option<&str>, version_id: Option<&str>, timeout: Duration) -> RotatorResult<GetSecretValueResponse> {
SM_CLIENT.get_secret_value(GetSecretValueRequest {
secret_id: secret_id.to_string(),
version_stage: version_stage.map(|s| s.to_string()),
version_id: version_id.map(|s| s.to_string()),
})
.with_timeout(timeout)
.sync()
.map_err(|e| match e {
RusotoError::Service(GetSecretValueError::ResourceNotFound(msg)) => {
RotatorError::SecretValueNotFound {
secret_id: secret_id.to_string(),
version_stage: version_stage.map(|s| s.to_string()),
version_ids: version_id.map(|s| vec![s.to_string()]).unwrap_or(vec![]),
message: msg,
}
},
e => RotatorError::GetSecretValue(format!("{:?}", e))
})
}
pub fn put_secret_value(secret_id: &str, token: &str, secret: &Secret, version_stage: &str, timeout: Duration) -> RotatorResult<PutSecretValueResponse> {
info!("putting secret for secret_id={} version_stage={:?} version_id={:?} secret={:?}", secret_id, version_stage, token, secret);
let secret_string = serde_json::to_string(secret)
.map_err(|err| RotatorError::SerialisationError {
secret_id: secret_id.to_string(),
message: format!("{:?}", err),
})?;
put_secret_value_string(secret_id, token, &secret_string, version_stage, timeout)
}
pub fn put_secret_value_string(secret_id: &str, token: &str, secret_string: &str, version_stage: &str, timeout: Duration) -> RotatorResult<PutSecretValueResponse> {
SM_CLIENT.put_secret_value(PutSecretValueRequest {
secret_id: secret_id.to_string(),
client_request_token: Some(token.to_string()),
secret_string: Some(secret_string.to_string()),
version_stages: Some(vec![version_stage.to_string()]),
..Default::default()
})
.with_timeout(timeout)
.sync()
.map_err(|e| match e {
RusotoError::Service(PutSecretValueError::ResourceNotFound(msg)) => {
RotatorError::SecretValueNotFound {
secret_id: secret_id.to_string(),
version_stage: Some(version_stage.to_string()),
version_ids: vec![token.to_string()],
message: msg,
}
},
RusotoError::Service(PutSecretValueError::EncryptionFailure(msg)) => {
RotatorError::EncryptionFailure {
secret_id: secret_id.to_string(),
message: msg,
}
},
e => RotatorError::PutSecretValue(format!("{:?}", e))
})
} | get_secret_value |
calculator_test.go | package calculator_test
import (
"calculator"
"math/rand"
"reflect"
"runtime"
"testing"
"time"
)
// Note Fatal|FatalF are also useful methods,
// to stop further execution within the same test.
// Return the name of a function.
func getFuncName(i interface{}) string {
return runtime.FuncForPC(reflect.ValueOf(i).Pointer()).Name()
}
// Return a slice of N random numbers from 0-500
func randFloat64Slice(n int) []float64 {
rand.Seed(time.Now().UnixNano())
r := make([]float64, n)
// This `range` serves to loop the right number of times for the size of r
for i := range r {
x := rand.Float64() * float64(rand.Intn(500))
r[i] = x
}
return r
}
// Test Add(), Subtract(), and Multiply()
func TestAddSubtractMultiply(t *testing.T) {
// Define test cases
// This anonymous struct has no name, as its only used here.
testCases := []struct {
function func(float64, float64, ...float64) float64
a float64
b float64
want float64
description string
}{
{
function: calculator.Add,
description: "two positive numbers which sum to a positive",
a: 2,
b: 2,
want: 4,
},
{
function: calculator.Add,
description: "a positive and negative number which sum to a positive",
a: 7,
b: -2,
want: 5,
},
{
function: calculator.Add,
description: "a positive and negative number which sum to a negative",
a: 3,
b: -5,
want: -2,
},
{
function: calculator.Subtract,
description: "two positive numbers whos difference is negative",
a: 2,
b: 9,
want: -7,
},
{
function: calculator.Subtract,
description: "two positive numbers whos difference is positive",
a: 7,
b: 2,
want: 5,
},
{
function: calculator.Subtract,
description: "one positive and one negative decimal number whos difference is a positive decimal",
a: 3,
b: -2.5,
want: 5.5,
},
{
function: calculator.Multiply,
description: "two positive numbers whos product is positive",
a: 2,
b: 20,
want: 40,
},
{
function: calculator.Multiply,
description: " a positive and negative number whos product is negative",
a: 7,
b: -2,
want: -14,
},
{
function: calculator.Multiply,
description: "a positive decimal and negative decimal whos product is a negative decimal",
a: 8.4,
b: -2.5,
want: -21,
},
}
t.Parallel()
for _, c := range testCases {
got := c.function(c.a, c.b)
if c.want != got {
t.Errorf("want %v, got %v, while testing %s. The function call was: %s(%v, %v)", c.want, got, c.description, getFuncName(c.function), c.a, c.b)
}
}
}
// Generate 100 random test-cases for Add()
func TestAddRandomly(t *testing.T) {
t.Parallel()
rand.Seed(time.Now().UnixNano())
// logs are viewable using `go test -v`
t.Log("Beginning random test-cases for Add(). . .")
for i := 0; i < 100; i++ {
// rand.Float64() returns a number in 0.0-1.0
// Use another randomly-generated int to vary the whole number.
a := rand.Float64() * float64(rand.Intn(500))
b := rand.Float64() * float64(rand.Intn(500))
want := a + b
t.Logf("Random test %d: Add(%v, %v), wants %v", i, a, b, want)
got := calculator.Add(a, b)
if want != got {
t.Errorf("want %v, got %v, while testing randomly-generated cases. The function call was: Add(%v, %v)", want, got, a, b)
}
}
t.Log("Completed random test-cases for Add(). . .")
}
// Randomly test the variadic call to Add()
func TestAddVariadicRandomly(t *testing.T) {
t.Parallel()
rand.Seed(time.Now().UnixNano())
// rand.Float64() returns a number in 0.0-1.0
// Use another randomly-generated int to vary the whole number.
a := rand.Float64() * float64(rand.Intn(500))
b := rand.Float64() * float64(rand.Intn(500))
// v will be assigned multiple random float64 in a slice
v := randFloat64Slice(5)
// Get our own sum to compare to Add()
var want float64 = a + b
for _, x := range v {
want += x
}
got := calculator.Add(a, b, v...)
t.Logf("Random variadic test: Add(%v, %v, %v), wants %v, got %v", a, b, v, want, got)
if want != got {
t.Errorf("want %v, got %v, while testing a random variadic case. The function call was: Add(%v, %v, %v)", want, got, a, b, v)
}
}
// Randomly test the variadic call to Subtract()
func TestSubtractVariadicRandomly(t *testing.T) {
t.Parallel()
rand.Seed(time.Now().UnixNano())
// rand.Float64() returns a number in 0.0-1.0
// Use another randomly-generated int to vary the whole number.
a := rand.Float64() * float64(rand.Intn(500))
b := rand.Float64() * float64(rand.Intn(500))
// v will be assigned multiple random float64 in a slice
v := randFloat64Slice(5)
// Get our own difference to compare to Subtract()
var want float64 = a - b
for _, x := range v {
want -= x
}
got := calculator.Subtract(a, b, v...)
t.Logf("Random variadic test: Subtract(%v, %v, %v), wants %v, got %v", a, b, v, want, got)
if want != got {
t.Errorf("want %v, got %v, while testing a random variadic case. The function call was: Subtract(%v, %v, %v)", want, got, a, b, v)
}
}
// Randomly test the variadic call to Multiply()
func TestMultiplyVariadicRandomly(t *testing.T) {
t.Parallel()
rand.Seed(time.Now().UnixNano())
// rand.Float64() returns a number in 0.0-1.0
// Use another randomly-generated int to vary the whole number.
a := rand.Float64() * float64(rand.Intn(500))
b := rand.Float64() * float64(rand.Intn(500))
// v will be assigned multiple random float64 in a slice
v := randFloat64Slice(5)
// Get our own product to compare to Multiply()
var want float64 = a * b
for _, x := range v {
want *= x
}
got := calculator.Multiply(a, b, v...)
t.Logf("Random variadic test: Multiply(%v, %v, %v), wants %v, got %v", a, b, v, want, got)
if want != got {
t.Errorf("want %v, got %v, while testing a random variadic case. The function call was: Multiply(%v, %v, %v)", want, got, a, b, v)
}
}
func TestDivide(t *testing.T) {
// Define test cases
testCases := []struct {
a float64
b float64
want float64
description string
errExpected bool
}{
{
description: "dividing by zero",
a: 2,
b: 0,
want: 123456789,
errExpected: true,
},
{
description: "two positive numbers whos quotient is positive",
a: 20,
b: 2,
want: 10,
},
{
description: " a positive and negative number whos quotient is negative",
a: 10,
b: -2,
want: -5,
},
{
description: "a positive decimal and negative decimal whos quotient is a negative decimal",
a: 8.4,
b: -2.5,
want: -3.3600000000000003,
},
}
t.Parallel()
for _, c := range testCases {
got, err := calculator.Divide(c.a, c.b)
if err != nil && !c.errExpected {
t.Errorf("error received while testing %s. The function call was: Divide(%v, %v), and the error was: %v", c.description, c.a, c.b, err)
}
// Only fail on want != got if an error was not expected
if !c.errExpected && c.want != got {
t.Errorf("want %v, got %v, while testing %s. The function call was: Divide(%v, %v)", c.want, got, c.description, c.a, c.b)
}
}
}
// Randomly test the variadic call to Divide()
func TestDivideVariadicRandomly(t *testing.T) {
t.Parallel()
rand.Seed(time.Now().UnixNano())
// rand.Float64() returns a number in 0.0-1.0
// Use another randomly-generated int to vary the whole number.
a := rand.Float64() * float64(rand.Intn(500))
b := rand.Float64() * float64(rand.Intn(500))
// Avoid dividing by 0
if b == 0 {
b += 0.1
}
// v will be assigned multiple random float64 in a slice
v := randFloat64Slice(5)
// Get our own quotient to compare to Divide()
// Also change any randomly-generated 0.0s, to avoid dividing-by-zero!
var want float64 = a / b
for i, x := range v {
if x == 0 {
x += 0.1
v[i] = x // change the slice
}
want /= x
}
got, err := calculator.Divide(a, b, v...)
if err != nil {
t.Errorf("error received while testing a random variadic case. The function call was: Divide(%v, %v, %v), and the error was: %v", a, b, v, err)
}
t.Logf("Random variadic test: Divide(%v, %v, %v), wants %v, got %v", a, b, v, want, got)
if want != got {
t.Errorf("want %v, got %v, while testing a random variadic case. The function call was: Divide(%v, %v, %v)", want, got, a, b, v)
}
}
func | (t *testing.T) {
// Define test cases
testCases := []struct {
a float64
b float64
want float64
description string
errExpected bool
}{
{
description: "negative input",
a: -64,
want: 123456789,
errExpected: true,
},
{
description: "64",
a: 64,
want: 8,
},
}
t.Parallel()
for _, c := range testCases {
got, err := calculator.Sqrt(c.a)
if err != nil && !c.errExpected {
t.Errorf("error received while testing %s. The function call was: Sqrt(%v), and the error was: %v", c.description, c.a, err)
}
// Only fail on want != got if an error was not expected
if !c.errExpected && c.want != got {
t.Errorf("want %v, got %v, while testing %s. The function call was: Sqrt(%v)", c.want, got, c.description, c.a)
}
}
}
// Test evaluating an expression,
// a string representation of an operation like 1 + 8
func TestEvaluateExpression(t *testing.T) {
// Define test cases
testCases := []struct {
e, description string
want float64
errExpected bool
}{
{
description: "an expression with two positive numbers which sum to a positive",
e: "2+2",
want: 4,
},
{
description: "an expression with two positive numbers whos difference is negative",
e: "2 - 9",
want: -7,
},
{
description: "an expression with two positive numbers whos product is positive",
e: "2 * 20",
want: 40,
},
{
description: "an expression that divides by zero",
e: "2 / 0",
want: 123456789,
errExpected: true,
},
{
description: "an expression with two positive numbers whos quotient is positive",
e: "20 / 2",
want: 10,
},
{
description: "an expression with an invalid operator",
e: "2 X 2",
want: 123456789,
errExpected: true,
},
{
description: "an invalid multi-expression",
e: "2 + 2 * 2",
want: 123456789,
errExpected: true,
},
}
t.Parallel()
for _, c := range testCases {
got, err := calculator.EvaluateExpression(c.e)
if err != nil && !c.errExpected {
t.Errorf("error received while testing %s. The function call was: Expression(%v), and the error was: %v", c.description, c.e, err)
}
// Only fail on want != got if an error was not expected
if !c.errExpected && c.want != got {
t.Errorf("want %v, got %v, while testing %s. The function call was: Expression(%v)", c.want, got, c.description, c.e)
}
}
}
| TestSqrt |
odoo_module.py | # Copyright (C) 2016-Today: Odoo Community Association (OCA)
# @author: Sylvain LE GAL (https://twitter.com/legalsylvain)
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from odoo import _, api, fields, models
from odoo.tools import html_sanitize
class OdooModule(models.Model):
_inherit = "abstract.action.mixin"
_name = "odoo.module"
_description = "Odoo Module"
_order = "technical_name, name"
# Column Section
name = fields.Char(
string="Name", store=True, readonly=True, compute="_compute_name"
)
technical_name = fields.Char(
string="Technical Name", index=True, required=True, readonly=True
)
module_version_ids = fields.One2many(
comodel_name="odoo.module.version",
inverse_name="module_id",
string="Versions",
readonly=True,
)
module_version_qty = fields.Integer(
string="Number of Module Versions",
compute="_compute_module_version_qty",
store=True,
)
author_ids = fields.Many2many(
string="Authors",
comodel_name="odoo.author",
compute="_compute_author",
relation="github_module_author_rel",
column1="module_id",
column2="author_id",
store=True,
)
author_ids_description = fields.Char(
string="Authors (Text)", compute="_compute_author", store=True
)
organization_serie_ids = fields.Many2many(
string="Series",
comodel_name="github.organization.serie",
compute="_compute_organization_serie",
store=True,
relation="github_module_organization_serie_rel",
column1="module_id",
column2="organization_serie_id",
)
organization_serie_ids_description = fields.Char(
string="Series (Text)", store=True, compute="_compute_organization_serie",
)
description_rst = fields.Char(
string="RST Description of the last Version",
store=True,
readonly=True,
compute="_compute_description",
)
description_rst_html = fields.Html(
string="HTML of the RST Description of the last Version",
store=True,
readonly=True,
compute="_compute_description",
)
dependence_module_version_ids = fields.Many2many(
comodel_name="odoo.module.version",
string="Module Versions that depend on this module",
relation="module_version_dependency_rel",
column1="dependency_module_id",
column2="module_version_id",
)
dependence_module_version_qty = fields.Integer(
string="Number of Module Versions that depend on this module",
compute="_compute_dependence_module_version_qty",
store=True,
)
image = fields.Binary(
string="Icon Image", compute="_compute_image", store=True, attachment=True
)
# Compute Section
@api.depends("module_version_ids.image")
def _compute_image(self):
|
@api.depends("technical_name", "module_version_ids.name")
def _compute_name(self):
module_version_obj = self.env["odoo.module.version"]
for module in self:
version_ids = module.module_version_ids.ids
last_version = module_version_obj.search(
[("id", "in", version_ids)], order="organization_serie_id desc", limit=1
)
if last_version:
module.name = last_version.name
else:
module.name = module.technical_name
@api.depends("module_version_ids", "module_version_ids.description_rst_html")
def _compute_description(self):
module_version_obj = self.env["odoo.module.version"]
for module in self:
version_ids = module.module_version_ids.ids
last_version = module_version_obj.search(
[("id", "in", version_ids)], order="organization_serie_id desc", limit=1
)
if last_version:
module.description_rst = last_version.description_rst
module.description_rst_html = last_version.description_rst_html
else:
module.description_rst = ""
module.description_rst_html = html_sanitize(
"<h1 style='color:gray;'>" + _("No Version Found") + "</h1>"
)
@api.depends("dependence_module_version_ids.dependency_module_ids")
def _compute_dependence_module_version_qty(self):
for module in self:
module.dependence_module_version_qty = len(
module.dependence_module_version_ids
)
@api.depends("module_version_ids")
def _compute_module_version_qty(self):
for module in self:
module.module_version_qty = len(module.module_version_ids)
@api.depends("module_version_ids.author_ids")
def _compute_author(self):
for module in self:
authors = []
for version in module.module_version_ids:
authors += version.author_ids
authors = set(authors)
module.author_ids = [x.id for x in authors]
module.author_ids_description = ", ".join(sorted([x.name for x in authors]))
@api.depends("module_version_ids.organization_serie_id")
def _compute_organization_serie(self):
for module in self:
organization_series = []
for version in module.module_version_ids:
organization_series += version.organization_serie_id
organization_series = set(organization_series)
module.organization_serie_ids = [x.id for x in organization_series]
module.organization_serie_ids_description = " - ".join(
[x.name for x in sorted(organization_series, key=lambda x: x.sequence)]
)
# Custom Section
@api.model
def create_if_not_exist(self, technical_name):
module = self.search([("technical_name", "=", technical_name)])
if not module:
module = self.create({"technical_name": technical_name})
return module
def name_get(self):
return [(module.id, module.technical_name) for module in self]
| module_version_obj = self.env["odoo.module.version"]
for module in self:
version_ids = module.module_version_ids.ids
last_version = module_version_obj.search(
[("id", "in", version_ids)], order="organization_serie_id desc", limit=1
)
module.image = last_version and last_version.image |
logger.rs | pub fn register() {
log::set_logger(&KernelLogger).unwrap();
log::set_max_level(log::LevelFilter::Info);
}
struct KernelLogger;
impl log::Log for KernelLogger {
fn enabled(&self, _metadata: &log::Metadata) -> bool |
fn log(&self, record: &log::Record) {
sprintln!("{}: {}", record.level(), record.args());
}
fn flush(&self) {}
}
| {
true
} |
zt_copy_blob_upload_test.go | // Copyright © 2017 Microsoft <[email protected]>
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package cmd
import (
"github.com/Azure/azure-storage-azcopy/common"
chk "gopkg.in/check.v1"
"net/url"
"path/filepath"
"strings"
)
// regular local file->blob upload
func (s *cmdIntegrationSuite) TestUploadSingleFileToBlobVirtualDirectory(c *chk.C) {
bsu := getBSU()
containerURL, containerName := createNewContainer(c, bsu)
defer deleteContainer(c, containerURL)
for _, srcFileName := range []string{"singleblobisbest", "打麻将.txt", "%4509%4254$85140&"} {
// set up the source as a single file
srcDirName := scenarioHelper{}.generateLocalDirectory(c)
fileList := []string{srcFileName}
scenarioHelper{}.generateLocalFilesFromList(c, srcDirName, fileList)
// set up the destination container with a single blob
dstBlobName := "testfolder/"
// set up interceptor
mockedRPC := interceptor{}
Rpc = mockedRPC.intercept
mockedRPC.init()
// construct the raw input to simulate user input
rawBlobURLWithSAS := scenarioHelper{}.getRawBlobURLWithSAS(c, containerName, dstBlobName)
raw := getDefaultCopyRawInput(filepath.Join(srcDirName, srcFileName), rawBlobURLWithSAS.String())
// the blob was created after the file, so no sync should happen
runCopyAndVerify(c, raw, func(err error) {
c.Assert(err, chk.IsNil)
// Validate that the destination is the file name (within the folder).
// The destination being the folder *was* the issue in the past.
// The service would just name the file as the folder if we didn't explicitly specify it.
c.Assert(len(mockedRPC.transfers), chk.Equals, 1)
d, err := url.PathUnescape(mockedRPC.transfers[0].Destination) //Unescape the destination, as we have special characters.
c.Assert(err, chk.IsNil)
c.Assert(d, chk.Equals, srcFileName)
})
// clean the RPC for the next test
mockedRPC.reset()
// now target the destination container, the result should be the same
rawContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(c, containerName)
raw = getDefaultCopyRawInput(filepath.Join(srcDirName, srcFileName), rawContainerURLWithSAS.String())
// the file was created after the blob, so no sync should happen
runCopyAndVerify(c, raw, func(err error) {
c.Assert(err, chk.IsNil)
// verify explicitly since the source and destination names will be different:
// the source is "" since the given URL points to the blob itself
// the destination should be the blob name, since the given local path points to the parent dir
c.Assert(len(mockedRPC.transfers), chk.Equals, 1)
c.Assert(mockedRPC.transfers[0].Source, chk.Equals, "")
c.Assert(mockedRPC.transfers[0].Destination, chk.Equals, common.AZCOPY_PATH_SEPARATOR_STRING+url.PathEscape(srcFileName))
})
}
}
// regular local file->blob upload
func (s *cmdIntegrationSuite) TestUploadSingleFileToBlob(c *chk.C) {
bsu := getBSU()
containerURL, containerName := createNewContainer(c, bsu)
defer deleteContainer(c, containerURL)
for _, srcFileName := range []string{"singleblobisbest", "打麻将.txt", "%4509%4254$85140&"} {
// set up the source as a single file
srcDirName := scenarioHelper{}.generateLocalDirectory(c)
fileList := []string{srcFileName}
scenarioHelper{}.generateLocalFilesFromList(c, srcDirName, fileList)
// set up the destination container with a single blob
dstBlobName := "whatever"
scenarioHelper{}.generateBlobsFromList(c, containerURL, []string{dstBlobName}, blockBlobDefaultData)
c.Assert(containerURL, chk.NotNil)
// set up interceptor
mockedRPC := interceptor{}
Rpc = mockedRPC.intercept
mockedRPC.init()
// construct the raw input to simulate user input
rawBlobURLWithSAS := scenarioHelper{}.getRawBlobURLWithSAS(c, containerName, dstBlobName)
raw := getDefaultCopyRawInput(filepath.Join(srcDirName, srcFileName), rawBlobURLWithSAS.String())
// the blob was created after the file, so no sync should happen
runCopyAndVerify(c, raw, func(err error) {
c.Assert(err, chk.IsNil)
// validate that the right number of transfers were scheduled
validateUploadTransfersAreScheduled(c, "", "", []string{""}, mockedRPC)
})
// clean the RPC for the next test
mockedRPC.reset()
// now target the destination container, the result should be the same
rawContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(c, containerName)
raw = getDefaultCopyRawInput(filepath.Join(srcDirName, srcFileName), rawContainerURLWithSAS.String())
// the file was created after the blob, so no sync should happen
runCopyAndVerify(c, raw, func(err error) {
c.Assert(err, chk.IsNil)
// verify explicitly since the source and destination names will be different:
// the source is "" since the given URL points to the blob itself
// the destination should be the blob name, since the given local path points to the parent dir
c.Assert(len(mockedRPC.transfers), chk.Equals, 1)
c.Assert(mockedRPC.transfers[0].Source, chk.Equals, "")
c.Assert(mockedRPC.transfers[0].Destination, chk.Equals, common.AZCOPY_PATH_SEPARATOR_STRING+url.PathEscape(srcFileName))
})
}
}
// regular directory->container upload
func (s *cmdIntegrationSuite) TestUploadDirectoryToContainer(c *chk.C) {
bsu := getBSU()
// set up the source with numerous files
srcDirPath := scenarioHelper{}.generateLocalDirectory(c)
fileList := scenarioHelper{}.generateCommonRemoteScenarioForLocal(c, srcDirPath, "")
// set up an empty container
containerURL, containerName := createNewContainer(c, bsu)
defer deleteContainer(c, containerURL)
// set up interceptor
mockedRPC := interceptor{}
Rpc = mockedRPC.intercept
mockedRPC.init()
// construct the raw input to simulate user input
rawContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(c, containerName)
raw := getDefaultCopyRawInput(srcDirPath, rawContainerURLWithSAS.String())
raw.recursive = true
runCopyAndVerify(c, raw, func(err error) {
c.Assert(err, chk.IsNil)
// validate that the right number of transfers were scheduled
c.Assert(len(mockedRPC.transfers), chk.Equals, len(fileList))
// validate that the right transfers were sent
validateUploadTransfersAreScheduled(c, common.AZCOPY_PATH_SEPARATOR_STRING,
common.AZCOPY_PATH_SEPARATOR_STRING+filepath.Base(srcDirPath)+common.AZCOPY_PATH_SEPARATOR_STRING, fileList, mockedRPC)
})
// turn off recursive, this time nothing should be transferred
raw.recursive = false
mockedRPC.reset()
runCopyAndVerify(c, raw, func(err error) {
c.Assert(err, chk.NotNil)
c.Assert(len(mockedRPC.transfers), chk.Equals, 0)
})
}
// regular directory->virtual dir upload
func (s *cmdIntegrationSuite) TestUploadDirectoryToVirtualDirectory(c *chk.C) {
bsu := getBSU()
vdirName := "vdir"
// set up the source with numerous files
srcDirPath := scenarioHelper{}.generateLocalDirectory(c)
fileList := scenarioHelper{}.generateCommonRemoteScenarioForLocal(c, srcDirPath, "")
// set up an empty container
containerURL, containerName := createNewContainer(c, bsu)
defer deleteContainer(c, containerURL)
// set up interceptor
mockedRPC := interceptor{}
Rpc = mockedRPC.intercept
mockedRPC.init()
// construct the raw input to simulate user input
rawContainerURLWithSAS := scenarioHelper{}.getRawBlobURLWithSAS(c, containerName, vdirName)
raw := getDefaultCopyRawInput(srcDirPath, rawContainerURLWithSAS.String())
raw.recursive = true
runCopyAndVerify(c, raw, func(err error) {
c.Assert(err, chk.IsNil)
// validate that the right number of transfers were scheduled
c.Assert(len(mockedRPC.transfers), chk.Equals, len(fileList))
// validate that the right transfers were sent
expectedTransfers := scenarioHelper{}.shaveOffPrefix(fileList, filepath.Base(srcDirPath)+common.AZCOPY_PATH_SEPARATOR_STRING)
validateUploadTransfersAreScheduled(c, common.AZCOPY_PATH_SEPARATOR_STRING,
common.AZCOPY_PATH_SEPARATOR_STRING+filepath.Base(srcDirPath)+common.AZCOPY_PATH_SEPARATOR_STRING, expectedTransfers, mockedRPC)
})
// turn off recursive, this time nothing should be transferred
raw.recursive = false
mockedRPC.reset()
runCopyAndVerify(c, raw, func(err error) {
c.Assert(err, chk.NotNil)
c.Assert(len(mockedRPC.transfers), chk.Equals, 0)
})
}
// files(from pattern)->container upload
func (s *cmdIntegrationSuite) TestUploadDirectoryToContainerWithPattern(c *chk.C) {
bsu := getBSU()
// set up the source with numerous files
srcDirPath := scenarioHelper{}.generateLocalDirectory(c)
scenarioHelper{}.generateCommonRemoteScenarioForLocal(c, srcDirPath, "")
// add special files that we wish to include
filesToInclude := []string{"important.pdf", "includeSub/amazing.pdf", "includeSub/wow/amazing.pdf"}
scenarioHelper{}.generateLocalFilesFromList(c, srcDirPath, filesToInclude)
// set up an empty container
containerURL, containerName := createNewContainer(c, bsu)
defer deleteContainer(c, containerURL)
// set up interceptor
mockedRPC := interceptor{}
Rpc = mockedRPC.intercept
mockedRPC.init()
// construct the raw input to simulate user input
rawContainerURLWithSAS := scenarioHelper{}.getRawContainerURLWithSAS(c, containerName)
raw := getDefaultCopyRawInput(filepath.Join(srcDirPath, "/*.pdf"), rawContainerURLWithSAS.String())
raw.recursive = true
runCopyAndVerify(c, raw, func(err error) {
c.Assert(err, chk.IsNil)
// validate that the right number of transfers were scheduled
c.Assert(len(mockedRPC.transfers), chk.Equals, 1)
| c.Assert(strings.HasSuffix(mockedRPC.transfers[0].Source, ".pdf"), chk.Equals, true)
c.Assert(strings.Contains(mockedRPC.transfers[0].Source[1:], common.AZCOPY_PATH_SEPARATOR_STRING), chk.Equals, false)
})
} | // only the top pdf should be included
c.Assert(len(mockedRPC.transfers), chk.Equals, 1)
c.Assert(mockedRPC.transfers[0].Source, chk.Equals, mockedRPC.transfers[0].Destination) |
ellipse-drawer.service.ts | import { CesiumService } from '../../../cesium/cesium.service';
import { EllipseGeometry } from 'cesium';
import { Injectable } from '@angular/core';
import { StaticPrimitiveDrawer } from '../static-primitive-drawer/static-primitive-drawer.service';
/**
+ * This drawer is responsible for drawing an ellipse over the Cesium map.
+ * This implementation uses simple EllipseGeometry and Primitive parameters.
+ * This doesn't allow us to change the position, color, etc.. of the ellipses. For that you may use the dynamic ellipse component.
+ */
@Injectable()
export class | extends StaticPrimitiveDrawer {
constructor(cesiumService: CesiumService) {
super(EllipseGeometry, cesiumService);
}
}
| StaticEllipseDrawerService |
provider.py | from pip._vendor.packaging.specifiers import SpecifierSet
from pip._vendor.resolvelib.providers import AbstractProvider
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
if MYPY_CHECK_RUNNING:
from typing import Any, Dict, Optional, Sequence, Set, Tuple, Union
from pip._internal.req.req_install import InstallRequirement
from pip._vendor.packaging.version import _BaseVersion
from .base import Requirement, Candidate
from .factory import Factory
# Notes on the relationship between the provider, the factory, and the
# candidate and requirement classes.
#
# The provider is a direct implementation of the resolvelib class. Its role
# is to deliver the API that resolvelib expects.
#
# Rather than work with completely abstract "requirement" and "candidate"
# concepts as resolvelib does, pip has concrete classes implementing these two
# ideas. The API of Requirement and Candidate objects are defined in the base
# classes, but essentially map fairly directly to the equivalent provider
# methods. In particular, `find_matches` and `is_satisfied_by` are
# requirement methods, and `get_dependencies` is a candidate method.
#
# The factory is the interface to pip's internal mechanisms. It is stateless,
# and is created by the resolver and held as a property of the provider. It is
# responsible for creating Requirement and Candidate objects, and provides
# services to those objects (access to pip's finder and preparer).
class PipProvider(AbstractProvider):
def __init__(
self,
factory, # type: Factory
constraints, # type: Dict[str, SpecifierSet]
ignore_dependencies, # type: bool
upgrade_strategy, # type: str
user_requested, # type: Set[str]
):
# type: (...) -> None
self._factory = factory
self._constraints = constraints
self._ignore_dependencies = ignore_dependencies
self._upgrade_strategy = upgrade_strategy
self.user_requested = user_requested
def _sort_matches(self, matches):
# type: (Sequence[Candidate]) -> Sequence[Candidate]
# The requirement is responsible for returning a sequence of potential
# candidates, one per version. The provider handles the logic of
# deciding the order in which these candidates should be passed to
# the resolver.
# The `matches` argument is a sequence of candidates, one per version,
# which are potential options to be installed. The requirement will
# have already sorted out whether to give us an already-installed
# candidate or a version from PyPI (i.e., it will deal with options
# like --force-reinstall and --ignore-installed).
# We now work out the correct order.
#
# 1. If no other considerations apply, later versions take priority.
# 2. An already installed distribution is preferred over any other,
# unless the user has requested an upgrade.
# Upgrades are allowed when:
# * The --upgrade flag is set, and
# - The project was specified on the command line, or
# - The project is a dependency and the "eager" upgrade strategy
# was requested.
def _eligible_for_upgrade(name):
# type: (str) -> bool
"""Are upgrades allowed for this project?
This checks the upgrade strategy, and whether the project was one
that the user specified in the command line, in order to decide
whether we should upgrade if there's a newer version available.
(Note that we don't need access to the `--upgrade` flag, because
an upgrade strategy of "to-satisfy-only" means that `--upgrade`
was not specified).
"""
if self._upgrade_strategy == "eager":
return True
elif self._upgrade_strategy == "only-if-needed":
return (name in self.user_requested)
return False
def sort_key(c):
# type: (Candidate) -> Tuple[int, _BaseVersion]
"""Return a sort key for the matches.
The highest priority should be given to installed candidates that
are not eligible for upgrade. We use the integer value in the first
part of the key to sort these before other candidates.
"""
if c.is_installed and not _eligible_for_upgrade(c.name):
return (1, c.version)
return (0, c.version)
return sorted(matches, key=sort_key)
def get_install_requirement(self, c):
# type: (Candidate) -> Optional[InstallRequirement]
return c.get_install_requirement()
def identify(self, dependency):
# type: (Union[Requirement, Candidate]) -> str
return dependency.name
def get_preference(
self,
resolution, # type: Optional[Candidate]
candidates, # type: Sequence[Candidate]
information # type: Sequence[Tuple[Requirement, Candidate]]
):
# type: (...) -> Any
# Use the "usual" value for now
return len(candidates)
def find_matches(self, requirement):
# type: (Requirement) -> Sequence[Candidate]
constraint = self._constraints.get(requirement.name, SpecifierSet())
matches = requirement.find_matches(constraint)
return self._sort_matches(matches)
def is_satisfied_by(self, requirement, candidate):
# type: (Requirement, Candidate) -> bool
return requirement.is_satisfied_by(candidate)
def get_dependencies(self, candidate):
# type: (Candidate) -> Sequence[Requirement]
if self._ignore_dependencies:
|
return candidate.get_dependencies()
| return [] |
ClientGUIScrolledPanelsManagement.py | import collections
import os
import random
import re
import traceback
from qtpy import QtCore as QC
from qtpy import QtWidgets as QW
from qtpy import QtGui as QG
from hydrus.core import HydrusConstants as HC
from hydrus.core import HydrusData
from hydrus.core import HydrusExceptions
from hydrus.core import HydrusGlobals as HG
from hydrus.core import HydrusPaths
from hydrus.core import HydrusSerialisable
from hydrus.core import HydrusTags
from hydrus.core import HydrusText
from hydrus.client import ClientApplicationCommand as CAC
from hydrus.client import ClientConstants as CC
from hydrus.client.gui import ClientGUIDialogs
from hydrus.client.gui import ClientGUIDialogsQuick
from hydrus.client.gui import ClientGUIFunctions
from hydrus.client.gui import ClientGUIImport
from hydrus.client.gui import ClientGUIScrolledPanels
from hydrus.client.gui import ClientGUIScrolledPanelsEdit
from hydrus.client.gui import ClientGUIShortcuts
from hydrus.client.gui import ClientGUIStyle
from hydrus.client.gui import ClientGUITags
from hydrus.client.gui import ClientGUITagSorting
from hydrus.client.gui import ClientGUITime
from hydrus.client.gui import ClientGUITopLevelWindowsPanels
from hydrus.client.gui import QtPorting as QP
from hydrus.client.gui.lists import ClientGUIListBoxes
from hydrus.client.gui.lists import ClientGUIListConstants as CGLC
from hydrus.client.gui.lists import ClientGUIListCtrl
from hydrus.client.gui.pages import ClientGUIResultsSortCollect
from hydrus.client.gui.search import ClientGUIACDropdown
from hydrus.client.gui.search import ClientGUISearch
from hydrus.client.gui.widgets import ClientGUICommon
from hydrus.client.gui.widgets import ClientGUIControls
from hydrus.client.media import ClientMedia
from hydrus.client.metadata import ClientTags
from hydrus.client.networking import ClientNetworkingSessions
class ManageOptionsPanel( ClientGUIScrolledPanels.ManagePanel ):
def __init__( self, parent ):
ClientGUIScrolledPanels.ManagePanel.__init__( self, parent )
self._new_options = HG.client_controller.new_options
self._listbook = ClientGUICommon.ListBook( self )
self._listbook.AddPage( 'gui', 'gui', self._GUIPanel( self._listbook ) ) # leave this at the top, to make it default page
self._listbook.AddPage( 'gui pages', 'gui pages', self._GUIPagesPanel( self._listbook, self._new_options ) )
self._listbook.AddPage( 'connection', 'connection', self._ConnectionPanel( self._listbook ) )
self._listbook.AddPage( 'external programs', 'external programs', self._ExternalProgramsPanel( self._listbook ) )
self._listbook.AddPage( 'files and trash', 'files and trash', self._FilesAndTrashPanel( self._listbook ) )
self._listbook.AddPage( 'file viewing statistics', 'file viewing statistics', self._FileViewingStatisticsPanel( self._listbook ) )
self._listbook.AddPage( 'speed and memory', 'speed and memory', self._SpeedAndMemoryPanel( self._listbook, self._new_options ) )
self._listbook.AddPage( 'maintenance and processing', 'maintenance and processing', self._MaintenanceAndProcessingPanel( self._listbook ) )
self._listbook.AddPage( 'media', 'media', self._MediaPanel( self._listbook ) )
self._listbook.AddPage( 'audio', 'audio', self._AudioPanel( self._listbook, self._new_options ) )
self._listbook.AddPage( 'system tray', 'system tray', self._SystemTrayPanel( self._listbook, self._new_options ) )
self._listbook.AddPage( 'search', 'search', self._SearchPanel( self._listbook, self._new_options ) )
self._listbook.AddPage( 'colours', 'colours', self._ColoursPanel( self._listbook ) )
self._listbook.AddPage( 'popups', 'popups', self._PopupPanel( self._listbook, self._new_options ) )
self._listbook.AddPage( 'regex favourites', 'regex favourites', self._RegexPanel( self._listbook ) )
self._listbook.AddPage( 'sort/collect', 'sort/collect', self._SortCollectPanel( self._listbook ) )
self._listbook.AddPage( 'downloading', 'downloading', self._DownloadingPanel( self._listbook, self._new_options ) )
self._listbook.AddPage( 'duplicates', 'duplicates', self._DuplicatesPanel( self._listbook, self._new_options ) )
self._listbook.AddPage( 'importing', 'importing', self._ImportingPanel( self._listbook, self._new_options ) )
self._listbook.AddPage( 'style', 'style', self._StylePanel( self._listbook, self._new_options ) )
self._listbook.AddPage( 'tag presentation', 'tag presentation', self._TagPresentationPanel( self._listbook, self._new_options ) )
self._listbook.AddPage( 'tag suggestions', 'tag suggestions', self._TagSuggestionsPanel( self._listbook, self._new_options ) )
self._listbook.AddPage( 'tags', 'tags', self._TagsPanel( self._listbook, self._new_options ) )
self._listbook.AddPage( 'thumbnails', 'thumbnails', self._ThumbnailsPanel( self._listbook, self._new_options ) )
self._listbook.AddPage( 'system', 'system', self._SystemPanel( self._listbook, self._new_options ) )
#
vbox = QP.VBoxLayout()
QP.AddToLayout( vbox, self._listbook, CC.FLAGS_EXPAND_BOTH_WAYS )
self.widget().setLayout( vbox )
class _AudioPanel( QW.QWidget ):
def __init__( self, parent, new_options ):
QW.QWidget.__init__( self, parent )
self._new_options = new_options
#self._media_viewer_uses_its_own_audio_volume = QW.QCheckBox( self )
self._preview_uses_its_own_audio_volume = QW.QCheckBox( self )
self._has_audio_label = QW.QLineEdit( self )
#
tt = 'If unchecked, this media canvas will use the \'global\' audio volume slider. If checked, this media canvas will have its own separate one.'
tt += os.linesep * 2
tt += 'Keep this on if you would like the preview viewer to be quieter than the main media viewer.'
#self._media_viewer_uses_its_own_audio_volume.setChecked( self._new_options.GetBoolean( 'media_viewer_uses_its_own_audio_volume' ) )
self._preview_uses_its_own_audio_volume.setChecked( self._new_options.GetBoolean( 'preview_uses_its_own_audio_volume' ) )
#self._media_viewer_uses_its_own_audio_volume.setToolTip( tt )
self._preview_uses_its_own_audio_volume.setToolTip( tt )
self._has_audio_label.setText( self._new_options.GetString( 'has_audio_label' ) )
#
vbox = QP.VBoxLayout()
rows = []
rows.append( ( 'The preview window has its own volume: ', self._preview_uses_its_own_audio_volume ) )
#rows.append( ( 'The media viewer has its own volume: ', self._media_viewer_uses_its_own_audio_volume ) )
rows.append( ( 'Label for files with audio: ', self._has_audio_label ) )
gridbox = ClientGUICommon.WrapInGrid( self, rows )
QP.AddToLayout( vbox, gridbox, CC.FLAGS_EXPAND_PERPENDICULAR )
vbox.addStretch( 1 )
self.setLayout( vbox )
def UpdateOptions( self ):
#self._new_options.SetBoolean( 'media_viewer_uses_its_own_audio_volume', self._media_viewer_uses_its_own_audio_volume.isChecked() )
self._new_options.SetBoolean( 'preview_uses_its_own_audio_volume', self._preview_uses_its_own_audio_volume.isChecked() )
self._new_options.SetString( 'has_audio_label', self._has_audio_label.text() )
class _ColoursPanel( QW.QWidget ):
def __init__( self, parent ):
QW.QWidget.__init__( self, parent )
self._new_options = HG.client_controller.new_options
coloursets_panel = ClientGUICommon.StaticBox( self, 'coloursets' )
self._current_colourset = ClientGUICommon.BetterChoice( coloursets_panel )
self._current_colourset.addItem( 'default', 'default' )
self._current_colourset.addItem( 'darkmode', 'darkmode' )
self._current_colourset.SetValue( self._new_options.GetString( 'current_colourset' ) )
self._notebook = QW.QTabWidget( coloursets_panel )
self._gui_colours = {}
for colourset in ( 'default', 'darkmode' ):
self._gui_colours[ colourset ] = {}
colour_panel = QW.QWidget( self._notebook )
colour_types = []
colour_types.append( CC.COLOUR_THUMB_BACKGROUND )
colour_types.append( CC.COLOUR_THUMB_BACKGROUND_SELECTED )
colour_types.append( CC.COLOUR_THUMB_BACKGROUND_REMOTE )
colour_types.append( CC.COLOUR_THUMB_BACKGROUND_REMOTE_SELECTED )
colour_types.append( CC.COLOUR_THUMB_BORDER )
colour_types.append( CC.COLOUR_THUMB_BORDER_SELECTED )
colour_types.append( CC.COLOUR_THUMB_BORDER_REMOTE )
colour_types.append( CC.COLOUR_THUMB_BORDER_REMOTE_SELECTED )
colour_types.append( CC.COLOUR_THUMBGRID_BACKGROUND )
colour_types.append( CC.COLOUR_AUTOCOMPLETE_BACKGROUND )
colour_types.append( CC.COLOUR_MEDIA_BACKGROUND )
colour_types.append( CC.COLOUR_MEDIA_TEXT )
colour_types.append( CC.COLOUR_TAGS_BOX )
for colour_type in colour_types:
ctrl = ClientGUICommon.BetterColourControl( colour_panel )
ctrl.setMaximumWidth( 20 )
ctrl.SetColour( self._new_options.GetColour( colour_type, colourset ) )
self._gui_colours[ colourset ][ colour_type ] = ctrl
#
rows = []
hbox = QP.HBoxLayout()
QP.AddToLayout( hbox, self._gui_colours[colourset][CC.COLOUR_THUMB_BACKGROUND], CC.FLAGS_CENTER_PERPENDICULAR )
QP.AddToLayout( hbox, self._gui_colours[colourset][CC.COLOUR_THUMB_BACKGROUND_SELECTED], CC.FLAGS_CENTER_PERPENDICULAR )
QP.AddToLayout( hbox, self._gui_colours[colourset][CC.COLOUR_THUMB_BACKGROUND_REMOTE], CC.FLAGS_CENTER_PERPENDICULAR )
QP.AddToLayout( hbox, self._gui_colours[colourset][CC.COLOUR_THUMB_BACKGROUND_REMOTE_SELECTED], CC.FLAGS_CENTER_PERPENDICULAR )
rows.append( ( 'thumbnail background (local: normal/selected, remote: normal/selected): ', hbox ) )
hbox = QP.HBoxLayout()
QP.AddToLayout( hbox, self._gui_colours[colourset][CC.COLOUR_THUMB_BORDER], CC.FLAGS_CENTER_PERPENDICULAR )
QP.AddToLayout( hbox, self._gui_colours[colourset][CC.COLOUR_THUMB_BORDER_SELECTED], CC.FLAGS_CENTER_PERPENDICULAR )
QP.AddToLayout( hbox, self._gui_colours[colourset][CC.COLOUR_THUMB_BORDER_REMOTE], CC.FLAGS_CENTER_PERPENDICULAR )
QP.AddToLayout( hbox, self._gui_colours[colourset][CC.COLOUR_THUMB_BORDER_REMOTE_SELECTED], CC.FLAGS_CENTER_PERPENDICULAR )
rows.append( ( 'thumbnail border (local: normal/selected, remote: normal/selected): ', hbox ) )
rows.append( ( 'thumbnail grid background: ', self._gui_colours[ colourset ][ CC.COLOUR_THUMBGRID_BACKGROUND ] ) )
rows.append( ( 'autocomplete background: ', self._gui_colours[ colourset ][ CC.COLOUR_AUTOCOMPLETE_BACKGROUND ] ) )
rows.append( ( 'media viewer background: ', self._gui_colours[ colourset ][ CC.COLOUR_MEDIA_BACKGROUND ] ) )
rows.append( ( 'media viewer text: ', self._gui_colours[ colourset ][ CC.COLOUR_MEDIA_TEXT ] ) )
rows.append( ( 'tags box background: ', self._gui_colours[ colourset ][ CC.COLOUR_TAGS_BOX ] ) )
gridbox = ClientGUICommon.WrapInGrid( colour_panel, rows )
colour_panel.setLayout( gridbox )
select = colourset == 'default'
self._notebook.addTab( colour_panel, colourset )
if select: self._notebook.setCurrentWidget( colour_panel )
#
coloursets_panel.Add( ClientGUICommon.WrapInText( self._current_colourset, coloursets_panel, 'current colourset: ' ), CC.FLAGS_EXPAND_SIZER_PERPENDICULAR )
coloursets_panel.Add( self._notebook, CC.FLAGS_EXPAND_BOTH_WAYS )
vbox = QP.VBoxLayout()
QP.AddToLayout( vbox, coloursets_panel, CC.FLAGS_EXPAND_PERPENDICULAR )
vbox.addStretch( 1 )
self.setLayout( vbox )
def UpdateOptions( self ):
for colourset in self._gui_colours:
for ( colour_type, ctrl ) in list(self._gui_colours[ colourset ].items()):
colour = ctrl.GetColour()
self._new_options.SetColour( colour_type, colourset, colour )
self._new_options.SetString( 'current_colourset', self._current_colourset.GetValue() )
class _ConnectionPanel( QW.QWidget ):
def __init__( self, parent ):
QW.QWidget.__init__( self, parent )
self._new_options = HG.client_controller.new_options
general = ClientGUICommon.StaticBox( self, 'general' )
self._verify_regular_https = QW.QCheckBox( general )
if self._new_options.GetBoolean( 'advanced_mode' ):
network_timeout_min = 1
network_timeout_max = 86400 * 30
error_wait_time_min = 1
error_wait_time_max = 86400 * 30
max_network_jobs_max = 1000
max_network_jobs_per_domain_max = 100
else:
network_timeout_min = 3
network_timeout_max = 600
error_wait_time_min = 3
error_wait_time_max = 1800
max_network_jobs_max = 30
max_network_jobs_per_domain_max = 5
self._network_timeout = QP.MakeQSpinBox( general, min = network_timeout_min, max = network_timeout_max )
self._network_timeout.setToolTip( 'If a network connection cannot be made in this duration or, if once started, it experiences uninterrupted inactivity for six times this duration, it will be abandoned.' )
self._connection_error_wait_time = QP.MakeQSpinBox( general, min = error_wait_time_min, max = error_wait_time_max )
self._connection_error_wait_time.setToolTip( 'If a network connection times out as above, it will wait increasing multiples of this base time before retrying.' )
self._serverside_bandwidth_wait_time = QP.MakeQSpinBox( general, min = error_wait_time_min, max = error_wait_time_max )
self._serverside_bandwidth_wait_time.setToolTip( 'If a server returns a failure status code indicating it is short on bandwidth, the network job will wait increasing multiples of this base time before retrying.' )
self._domain_network_infrastructure_error_velocity = ClientGUITime.VelocityCtrl( general, 0, 100, 30, hours = True, minutes = True, seconds = True, per_phrase = 'within', unit = 'errors' )
self._max_network_jobs = QP.MakeQSpinBox( general, min = 1, max = max_network_jobs_max )
self._max_network_jobs_per_domain = QP.MakeQSpinBox( general, min = 1, max = max_network_jobs_per_domain_max )
#
proxy_panel = ClientGUICommon.StaticBox( self, 'proxy settings' )
self._http_proxy = ClientGUICommon.NoneableTextCtrl( proxy_panel )
self._https_proxy = ClientGUICommon.NoneableTextCtrl( proxy_panel )
self._no_proxy = ClientGUICommon.NoneableTextCtrl( proxy_panel )
#
self._verify_regular_https.setChecked( self._new_options.GetBoolean( 'verify_regular_https' ) )
self._http_proxy.SetValue( self._new_options.GetNoneableString( 'http_proxy' ) )
self._https_proxy.SetValue( self._new_options.GetNoneableString( 'https_proxy' ) )
self._no_proxy.SetValue( self._new_options.GetNoneableString( 'no_proxy' ) )
self._network_timeout.setValue( self._new_options.GetInteger( 'network_timeout' ) )
self._connection_error_wait_time.setValue( self._new_options.GetInteger( 'connection_error_wait_time' ) )
self._serverside_bandwidth_wait_time.setValue( self._new_options.GetInteger( 'serverside_bandwidth_wait_time' ) )
number = self._new_options.GetInteger( 'domain_network_infrastructure_error_number' )
time_delta = self._new_options.GetInteger( 'domain_network_infrastructure_error_time_delta' )
self._domain_network_infrastructure_error_velocity.SetValue( ( number, time_delta ) )
self._max_network_jobs.setValue( self._new_options.GetInteger( 'max_network_jobs' ) )
self._max_network_jobs_per_domain.setValue( self._new_options.GetInteger( 'max_network_jobs_per_domain' ) )
#
if self._new_options.GetBoolean( 'advanced_mode' ):
label = 'As you are in advanced mode, these options have very low and high limits. Be very careful about lowering delay time or raising max number of connections too far, as things will break.'
st = ClientGUICommon.BetterStaticText( general, label = label )
st.setObjectName( 'HydrusWarning' )
st.setWordWrap( True )
general.Add( st, CC.FLAGS_EXPAND_PERPENDICULAR )
rows = []
rows.append( ( 'network timeout (seconds): ', self._network_timeout ) )
rows.append( ( 'connection error retry wait (seconds): ', self._connection_error_wait_time ) )
rows.append( ( 'serverside bandwidth retry wait (seconds): ', self._serverside_bandwidth_wait_time ) )
rows.append( ( 'Halt new jobs as long as this many network infrastructure errors on their domain (0 for never wait): ', self._domain_network_infrastructure_error_velocity ) )
rows.append( ( 'max number of simultaneous active network jobs: ', self._max_network_jobs ) )
rows.append( ( 'max number of simultaneous active network jobs per domain: ', self._max_network_jobs_per_domain ) )
rows.append( ( 'BUGFIX: verify regular https traffic:', self._verify_regular_https ) )
gridbox = ClientGUICommon.WrapInGrid( general, rows )
general.Add( gridbox, CC.FLAGS_EXPAND_SIZER_PERPENDICULAR )
text = 'Enter strings such as "http://ip:port" or "http://user:pass@ip:port" to use for http and https traffic. It should take effect immediately on dialog ok.'
text += os.linesep * 2
text += 'NO PROXY DOES NOT WORK UNLESS YOU HAVE A CUSTOM BUILD OF REQUESTS, SORRY! no_proxy takes the form of comma-separated hosts/domains, just as in curl or the NO_PROXY environment variable. When http and/or https proxies are set, they will not be used for these.'
text += os.linesep * 2
if ClientNetworkingSessions.SOCKS_PROXY_OK:
text += 'It looks like you have socks support! You should also be able to enter (socks4 or) "socks5://ip:port".'
text += os.linesep
text += 'Use socks4a or socks5h to force remote DNS resolution, on the proxy server.'
else:
text += 'It does not look like you have socks support! If you want it, try adding "pysocks" (or "requests[socks]")!'
st = ClientGUICommon.BetterStaticText( proxy_panel, text )
st.setWordWrap( True )
proxy_panel.Add( st, CC.FLAGS_EXPAND_PERPENDICULAR )
rows = []
rows.append( ( 'http: ', self._http_proxy ) )
rows.append( ( 'https: ', self._https_proxy ) )
rows.append( ( 'no_proxy: ', self._no_proxy ) )
gridbox = ClientGUICommon.WrapInGrid( proxy_panel, rows )
proxy_panel.Add( gridbox, CC.FLAGS_EXPAND_SIZER_PERPENDICULAR )
#
vbox = QP.VBoxLayout()
QP.AddToLayout( vbox, general, CC.FLAGS_EXPAND_PERPENDICULAR )
QP.AddToLayout( vbox, proxy_panel, CC.FLAGS_EXPAND_PERPENDICULAR )
vbox.addStretch( 1 )
self.setLayout( vbox )
def UpdateOptions( self ):
self._new_options.SetBoolean( 'verify_regular_https', self._verify_regular_https.isChecked() )
self._new_options.SetNoneableString( 'http_proxy', self._http_proxy.GetValue() )
self._new_options.SetNoneableString( 'https_proxy', self._https_proxy.GetValue() )
self._new_options.SetNoneableString( 'no_proxy', self._no_proxy.GetValue() )
self._new_options.SetInteger( 'network_timeout', self._network_timeout.value() )
self._new_options.SetInteger( 'connection_error_wait_time', self._connection_error_wait_time.value() )
self._new_options.SetInteger( 'serverside_bandwidth_wait_time', self._serverside_bandwidth_wait_time.value() )
self._new_options.SetInteger( 'max_network_jobs', self._max_network_jobs.value() )
self._new_options.SetInteger( 'max_network_jobs_per_domain', self._max_network_jobs_per_domain.value() )
( number, time_delta ) = self._domain_network_infrastructure_error_velocity.GetValue()
self._new_options.SetInteger( 'domain_network_infrastructure_error_number', number )
self._new_options.SetInteger( 'domain_network_infrastructure_error_time_delta', time_delta )
class _DownloadingPanel( QW.QWidget ):
def __init__( self, parent, new_options ):
QW.QWidget.__init__( self, parent )
self._new_options = new_options
#
gallery_downloader = ClientGUICommon.StaticBox( self, 'gallery downloader' )
gug_key_and_name = HG.client_controller.network_engine.domain_manager.GetDefaultGUGKeyAndName()
self._default_gug = ClientGUIImport.GUGKeyAndNameSelector( gallery_downloader, gug_key_and_name )
self._gallery_page_wait_period_pages = QP.MakeQSpinBox( gallery_downloader, min=1, max=120 )
self._gallery_file_limit = ClientGUICommon.NoneableSpinCtrl( gallery_downloader, none_phrase = 'no limit', min = 1, max = 1000000 )
self._highlight_new_query = QW.QCheckBox( gallery_downloader )
#
subscriptions = ClientGUICommon.StaticBox( self, 'subscriptions' )
self._gallery_page_wait_period_subscriptions = QP.MakeQSpinBox( subscriptions, min=1, max=30 )
self._max_simultaneous_subscriptions = QP.MakeQSpinBox( subscriptions, min=1, max=100 )
self._subscription_file_error_cancel_threshold = ClientGUICommon.NoneableSpinCtrl( subscriptions, min = 1, max = 1000000, unit = 'errors' )
self._subscription_file_error_cancel_threshold.setToolTip( 'This is a simple patch and will be replaced with a better "retry network errors later" system at some point, but is useful to increase if you have subs to unreliable websites.' )
self._process_subs_in_random_order = QW.QCheckBox( subscriptions )
self._process_subs_in_random_order.setToolTip( 'Processing in random order is useful whenever bandwidth is tight, as it stops an \'aardvark\' subscription from always getting first whack at what is available. Otherwise, they will be processed in alphabetical order.' )
checker_options = self._new_options.GetDefaultSubscriptionCheckerOptions()
self._subscription_checker_options = ClientGUIImport.CheckerOptionsButton( subscriptions, checker_options )
#
watchers = ClientGUICommon.StaticBox( self, 'watchers' )
self._watcher_page_wait_period = QP.MakeQSpinBox( watchers, min=1, max=120 )
self._highlight_new_watcher = QW.QCheckBox( watchers )
checker_options = self._new_options.GetDefaultWatcherCheckerOptions()
self._watcher_checker_options = ClientGUIImport.CheckerOptionsButton( watchers, checker_options )
#
misc = ClientGUICommon.StaticBox( self, 'misc' )
self._pause_character = QW.QLineEdit( misc )
self._stop_character = QW.QLineEdit( misc )
self._show_new_on_file_seed_short_summary = QW.QCheckBox( misc )
self._show_deleted_on_file_seed_short_summary = QW.QCheckBox( misc )
if self._new_options.GetBoolean( 'advanced_mode' ):
delay_min = 1
else:
delay_min = 600
self._subscription_network_error_delay = ClientGUITime.TimeDeltaButton( misc, min = delay_min, days = True, hours = True, minutes = True, seconds = True )
self._subscription_other_error_delay = ClientGUITime.TimeDeltaButton( misc, min = delay_min, days = True, hours = True, minutes = True, seconds = True )
self._downloader_network_error_delay = ClientGUITime.TimeDeltaButton( misc, min = delay_min, days = True, hours = True, minutes = True, seconds = True )
#
gallery_page_tt = 'Gallery page fetches are heavy requests with unusual fetch-time requirements. It is important they not wait too long, but it is also useful to throttle them:'
gallery_page_tt += os.linesep * 2
gallery_page_tt += '- So they do not compete with file downloads for bandwidth, leading to very unbalanced 20/4400-type queues.'
gallery_page_tt += os.linesep
gallery_page_tt += '- So you do not get 1000 items in your queue before realising you did not like that tag anyway.'
gallery_page_tt += os.linesep
gallery_page_tt += '- To give servers a break (some gallery pages can be CPU-expensive to generate).'
gallery_page_tt += os.linesep * 2
gallery_page_tt += 'These delays/lots are per-domain.'
gallery_page_tt += os.linesep * 2
gallery_page_tt += 'If you do not understand this stuff, you can just leave it alone.'
self._gallery_page_wait_period_pages.setValue( self._new_options.GetInteger( 'gallery_page_wait_period_pages' ) )
self._gallery_page_wait_period_pages.setToolTip( gallery_page_tt )
self._gallery_file_limit.SetValue( HC.options['gallery_file_limit'] )
self._highlight_new_query.setChecked( self._new_options.GetBoolean( 'highlight_new_query' ) )
self._gallery_page_wait_period_subscriptions.setValue( self._new_options.GetInteger( 'gallery_page_wait_period_subscriptions' ) )
self._gallery_page_wait_period_subscriptions.setToolTip( gallery_page_tt )
self._max_simultaneous_subscriptions.setValue( self._new_options.GetInteger( 'max_simultaneous_subscriptions' ) )
self._subscription_file_error_cancel_threshold.SetValue( self._new_options.GetNoneableInteger( 'subscription_file_error_cancel_threshold' ) )
self._process_subs_in_random_order.setChecked( self._new_options.GetBoolean( 'process_subs_in_random_order' ) )
self._pause_character.setText( self._new_options.GetString( 'pause_character' ) )
self._stop_character.setText( self._new_options.GetString( 'stop_character' ) )
self._show_new_on_file_seed_short_summary.setChecked( self._new_options.GetBoolean( 'show_new_on_file_seed_short_summary' ) )
self._show_deleted_on_file_seed_short_summary.setChecked( self._new_options.GetBoolean( 'show_deleted_on_file_seed_short_summary' ) )
self._watcher_page_wait_period.setValue( self._new_options.GetInteger( 'watcher_page_wait_period' ) )
self._watcher_page_wait_period.setToolTip( gallery_page_tt )
self._highlight_new_watcher.setChecked( self._new_options.GetBoolean( 'highlight_new_watcher' ) )
self._subscription_network_error_delay.SetValue( self._new_options.GetInteger( 'subscription_network_error_delay' ) )
self._subscription_other_error_delay.SetValue( self._new_options.GetInteger( 'subscription_other_error_delay' ) )
self._downloader_network_error_delay.SetValue( self._new_options.GetInteger( 'downloader_network_error_delay' ) )
#
rows = []
rows.append( ( 'Default download source:', self._default_gug ) )
rows.append( ( 'If new query entered and no current highlight, highlight the new query:', self._highlight_new_query ) )
rows.append( ( 'Additional fixed time (in seconds) to wait between gallery page fetches:', self._gallery_page_wait_period_pages ) )
rows.append( ( 'By default, stop searching once this many files are found:', self._gallery_file_limit ) )
gridbox = ClientGUICommon.WrapInGrid( gallery_downloader, rows )
gallery_downloader.Add( gridbox, CC.FLAGS_EXPAND_SIZER_PERPENDICULAR )
#
rows = []
rows.append( ( 'Additional fixed time (in seconds) to wait between gallery page fetches:', self._gallery_page_wait_period_subscriptions ) )
rows.append( ( 'Maximum number of subscriptions that can sync simultaneously:', self._max_simultaneous_subscriptions ) )
rows.append( ( 'If a subscription has this many failed file imports, stop and continue later:', self._subscription_file_error_cancel_threshold ) )
rows.append( ( 'Sync subscriptions in random order:', self._process_subs_in_random_order ) )
gridbox = ClientGUICommon.WrapInGrid( subscriptions, rows )
subscriptions.Add( gridbox, CC.FLAGS_EXPAND_SIZER_PERPENDICULAR )
subscriptions.Add( self._subscription_checker_options, CC.FLAGS_EXPAND_PERPENDICULAR )
#
rows = []
rows.append( ( 'Additional fixed time (in seconds) to wait between watcher checks:', self._watcher_page_wait_period ) )
rows.append( ( 'If new watcher entered and no current highlight, highlight the new watcher:', self._highlight_new_watcher ) )
gridbox = ClientGUICommon.WrapInGrid( watchers, rows )
watchers.Add( gridbox, CC.FLAGS_EXPAND_SIZER_PERPENDICULAR )
watchers.Add( self._watcher_checker_options, CC.FLAGS_EXPAND_PERPENDICULAR )
#
rows = []
rows.append( ( 'Pause character:', self._pause_character ) )
rows.append( ( 'Stop character:', self._stop_character ) )
rows.append( ( 'Show a \'N\' (for \'new\') count on short file import summaries:', self._show_new_on_file_seed_short_summary ) )
rows.append( ( 'Show a \'D\' (for \'deleted\') count on short file import summaries:', self._show_deleted_on_file_seed_short_summary ) )
rows.append( ( 'Delay time on a gallery/watcher network error:', self._downloader_network_error_delay ) )
rows.append( ( 'Delay time on a subscription network error:', self._subscription_network_error_delay ) )
rows.append( ( 'Delay time on a subscription other error:', self._subscription_other_error_delay ) )
gridbox = ClientGUICommon.WrapInGrid( misc, rows )
misc.Add( gridbox, CC.FLAGS_EXPAND_SIZER_PERPENDICULAR )
#
vbox = QP.VBoxLayout()
QP.AddToLayout( vbox, gallery_downloader, CC.FLAGS_EXPAND_PERPENDICULAR )
QP.AddToLayout( vbox, subscriptions, CC.FLAGS_EXPAND_PERPENDICULAR )
QP.AddToLayout( vbox, watchers, CC.FLAGS_EXPAND_PERPENDICULAR )
QP.AddToLayout( vbox, misc, CC.FLAGS_EXPAND_PERPENDICULAR )
vbox.addStretch( 1 )
self.setLayout( vbox )
def UpdateOptions( self ):
HG.client_controller.network_engine.domain_manager.SetDefaultGUGKeyAndName( self._default_gug.GetValue() )
self._new_options.SetInteger( 'gallery_page_wait_period_pages', self._gallery_page_wait_period_pages.value() )
HC.options[ 'gallery_file_limit' ] = self._gallery_file_limit.GetValue()
self._new_options.SetBoolean( 'highlight_new_query', self._highlight_new_query.isChecked() )
self._new_options.SetInteger( 'gallery_page_wait_period_subscriptions', self._gallery_page_wait_period_subscriptions.value() )
self._new_options.SetInteger( 'max_simultaneous_subscriptions', self._max_simultaneous_subscriptions.value() )
self._new_options.SetNoneableInteger( 'subscription_file_error_cancel_threshold', self._subscription_file_error_cancel_threshold.GetValue() )
self._new_options.SetBoolean( 'process_subs_in_random_order', self._process_subs_in_random_order.isChecked() )
self._new_options.SetInteger( 'watcher_page_wait_period', self._watcher_page_wait_period.value() )
self._new_options.SetBoolean( 'highlight_new_watcher', self._highlight_new_watcher.isChecked() )
self._new_options.SetDefaultWatcherCheckerOptions( self._watcher_checker_options.GetValue() )
self._new_options.SetDefaultSubscriptionCheckerOptions( self._subscription_checker_options.GetValue() )
self._new_options.SetString( 'pause_character', self._pause_character.text() )
self._new_options.SetString( 'stop_character', self._stop_character.text() )
self._new_options.SetBoolean( 'show_new_on_file_seed_short_summary', self._show_new_on_file_seed_short_summary.isChecked() )
self._new_options.SetBoolean( 'show_deleted_on_file_seed_short_summary', self._show_deleted_on_file_seed_short_summary.isChecked() )
self._new_options.SetInteger( 'subscription_network_error_delay', self._subscription_network_error_delay.GetValue() )
self._new_options.SetInteger( 'subscription_other_error_delay', self._subscription_other_error_delay.GetValue() )
self._new_options.SetInteger( 'downloader_network_error_delay', self._downloader_network_error_delay.GetValue() )
class _DuplicatesPanel( QW.QWidget ):
def __init__( self, parent, new_options ):
QW.QWidget.__init__( self, parent )
self._new_options = new_options
#
weights_panel = ClientGUICommon.StaticBox( self, 'duplicate filter comparison score weights' )
self._duplicate_comparison_score_higher_jpeg_quality = QP.MakeQSpinBox( weights_panel, min=-100, max=100 )
self._duplicate_comparison_score_much_higher_jpeg_quality = QP.MakeQSpinBox( weights_panel, min=-100, max=100 )
self._duplicate_comparison_score_higher_filesize = QP.MakeQSpinBox( weights_panel, min=-100, max=100 )
self._duplicate_comparison_score_much_higher_filesize = QP.MakeQSpinBox( weights_panel, min=-100, max=100 )
self._duplicate_comparison_score_higher_resolution = QP.MakeQSpinBox( weights_panel, min=-100, max=100 )
self._duplicate_comparison_score_much_higher_resolution = QP.MakeQSpinBox( weights_panel, min=-100, max=100 )
self._duplicate_comparison_score_more_tags = QP.MakeQSpinBox( weights_panel, min=-100, max=100 )
self._duplicate_comparison_score_older = QP.MakeQSpinBox( weights_panel, min=-100, max=100 )
self._duplicate_comparison_score_nicer_ratio = QP.MakeQSpinBox( weights_panel, min=-100, max=100 )
self._duplicate_comparison_score_nicer_ratio.setToolTip( 'For instance, 16:9 vs 640:357.')
self._duplicate_filter_max_batch_size = QP.MakeQSpinBox( self, min = 10, max = 1024 )
#
self._duplicate_comparison_score_higher_jpeg_quality.setValue( self._new_options.GetInteger( 'duplicate_comparison_score_higher_jpeg_quality' ) )
self._duplicate_comparison_score_much_higher_jpeg_quality.setValue( self._new_options.GetInteger( 'duplicate_comparison_score_much_higher_jpeg_quality' ) )
self._duplicate_comparison_score_higher_filesize.setValue( self._new_options.GetInteger( 'duplicate_comparison_score_higher_filesize' ) )
self._duplicate_comparison_score_much_higher_filesize.setValue( self._new_options.GetInteger( 'duplicate_comparison_score_much_higher_filesize' ) )
self._duplicate_comparison_score_higher_resolution.setValue( self._new_options.GetInteger( 'duplicate_comparison_score_higher_resolution' ) )
self._duplicate_comparison_score_much_higher_resolution.setValue( self._new_options.GetInteger( 'duplicate_comparison_score_much_higher_resolution' ) )
self._duplicate_comparison_score_more_tags.setValue( self._new_options.GetInteger( 'duplicate_comparison_score_more_tags' ) )
self._duplicate_comparison_score_older.setValue( self._new_options.GetInteger( 'duplicate_comparison_score_older' ) )
self._duplicate_comparison_score_nicer_ratio.setValue( self._new_options.GetInteger( 'duplicate_comparison_score_nicer_ratio' ) )
self._duplicate_filter_max_batch_size.setValue( self._new_options.GetInteger( 'duplicate_filter_max_batch_size' ) )
#
rows = []
rows.append( ( 'Score for jpeg with non-trivially higher jpeg quality:', self._duplicate_comparison_score_higher_jpeg_quality ) )
rows.append( ( 'Score for jpeg with significantly higher jpeg quality:', self._duplicate_comparison_score_much_higher_jpeg_quality ) )
rows.append( ( 'Score for file with non-trivially higher filesize:', self._duplicate_comparison_score_higher_filesize ) )
rows.append( ( 'Score for file with significantly higher filesize:', self._duplicate_comparison_score_much_higher_filesize ) )
rows.append( ( 'Score for file with higher resolution (as num pixels):', self._duplicate_comparison_score_higher_resolution ) )
rows.append( ( 'Score for file with significantly higher resolution (as num pixels):', self._duplicate_comparison_score_much_higher_resolution ) )
rows.append( ( 'Score for file with more tags:', self._duplicate_comparison_score_more_tags ) )
rows.append( ( 'Score for file with non-trivially earlier import time:', self._duplicate_comparison_score_older ) )
rows.append( ( 'Score for file with \'nicer\' resolution ratio:', self._duplicate_comparison_score_nicer_ratio ) )
gridbox = ClientGUICommon.WrapInGrid( weights_panel, rows )
label = 'When processing potential duplicate pairs in the duplicate filter, the client tries to present the \'best\' file first. It judges the two files on a variety of potential differences, each with a score. The file with the greatest total score is presented first. Here you can tinker with these scores.'
label += os.linesep * 2
label += 'I recommend you leave all these as positive numbers, but if you wish, you can set a negative number to reduce the score.'
st = ClientGUICommon.BetterStaticText( weights_panel, label )
st.setWordWrap( True )
weights_panel.Add( st, CC.FLAGS_EXPAND_PERPENDICULAR )
weights_panel.Add( gridbox, CC.FLAGS_EXPAND_SIZER_PERPENDICULAR )
#
vbox = QP.VBoxLayout()
QP.AddToLayout( vbox, weights_panel, CC.FLAGS_EXPAND_PERPENDICULAR )
rows = []
rows.append( ( 'Max size of duplicate filter pair batches:', self._duplicate_filter_max_batch_size ) )
gridbox = ClientGUICommon.WrapInGrid( self, rows )
QP.AddToLayout( vbox, gridbox, CC.FLAGS_EXPAND_SIZER_PERPENDICULAR )
vbox.addStretch( 1 )
self.setLayout( vbox )
def UpdateOptions( self ):
self._new_options.SetInteger( 'duplicate_comparison_score_higher_jpeg_quality', self._duplicate_comparison_score_higher_jpeg_quality.value() )
self._new_options.SetInteger( 'duplicate_comparison_score_much_higher_jpeg_quality', self._duplicate_comparison_score_much_higher_jpeg_quality.value() )
self._new_options.SetInteger( 'duplicate_comparison_score_higher_filesize', self._duplicate_comparison_score_higher_filesize.value() )
self._new_options.SetInteger( 'duplicate_comparison_score_much_higher_filesize', self._duplicate_comparison_score_much_higher_filesize.value() )
self._new_options.SetInteger( 'duplicate_comparison_score_higher_resolution', self._duplicate_comparison_score_higher_resolution.value() )
self._new_options.SetInteger( 'duplicate_comparison_score_much_higher_resolution', self._duplicate_comparison_score_much_higher_resolution.value() )
self._new_options.SetInteger( 'duplicate_comparison_score_more_tags', self._duplicate_comparison_score_more_tags.value() )
self._new_options.SetInteger( 'duplicate_comparison_score_older', self._duplicate_comparison_score_older.value() )
self._new_options.SetInteger( 'duplicate_comparison_score_nicer_ratio', self._duplicate_comparison_score_nicer_ratio.value() )
self._new_options.SetInteger( 'duplicate_filter_max_batch_size', self._duplicate_filter_max_batch_size.value() )
class _ExternalProgramsPanel( QW.QWidget ):
def __init__( self, parent ):
QW.QWidget.__init__( self, parent )
self._new_options = HG.client_controller.new_options
mime_panel = ClientGUICommon.StaticBox( self, '\'open externally\' launch paths' )
self._web_browser_path = QW.QLineEdit( mime_panel )
self._mime_launch_listctrl = ClientGUIListCtrl.BetterListCtrl( mime_panel, CGLC.COLUMN_LIST_EXTERNAL_PROGRAMS.ID, 15, self._ConvertMimeToListCtrlTuples, activation_callback = self._EditMimeLaunch )
#
web_browser_path = self._new_options.GetNoneableString( 'web_browser_path' )
if web_browser_path is not None:
self._web_browser_path.setText( web_browser_path )
for mime in HC.SEARCHABLE_MIMES:
launch_path = self._new_options.GetMimeLaunch( mime )
self._mime_launch_listctrl.AddDatas( [ ( mime, launch_path ) ] )
self._mime_launch_listctrl.Sort()
#
vbox = QP.VBoxLayout()
text = 'Setting a specific web browser path here--like \'C:\\program files\\firefox\\firefox.exe "%path%"\'--can help with the \'share->open->in web browser\' command, which is buggy working with OS defaults, particularly on Windows. It also fixes #anchors, which are dropped in some OSes using default means. Use the same %path% format for the \'open externally\' commands below.'
st = ClientGUICommon.BetterStaticText( mime_panel, text )
st.setWordWrap( True )
mime_panel.Add( st, CC.FLAGS_EXPAND_PERPENDICULAR )
rows = []
rows.append( ( 'Manual web browser launch path: ', self._web_browser_path ) )
gridbox = ClientGUICommon.WrapInGrid( mime_panel, rows )
mime_panel.Add( gridbox, CC.FLAGS_EXPAND_SIZER_PERPENDICULAR )
mime_panel.Add( self._mime_launch_listctrl, CC.FLAGS_EXPAND_BOTH_WAYS )
QP.AddToLayout( vbox, mime_panel, CC.FLAGS_EXPAND_BOTH_WAYS )
self.setLayout( vbox )
def _ConvertMimeToListCtrlTuples( self, data ):
( mime, launch_path ) = data
pretty_mime = HC.mime_string_lookup[ mime ]
if launch_path is None:
pretty_launch_path = 'default: {}'.format( HydrusPaths.GetDefaultLaunchPath() )
else:
pretty_launch_path = launch_path
display_tuple = ( pretty_mime, pretty_launch_path )
sort_tuple = display_tuple
return ( display_tuple, sort_tuple )
def _EditMimeLaunch( self ):
for ( mime, launch_path ) in self._mime_launch_listctrl.GetData( only_selected = True ):
message = 'Enter the new launch path for {}'.format( HC.mime_string_lookup[ mime ] )
message += os.linesep * 2
message += 'Hydrus will insert the file\'s full path wherever you put %path%, even multiple times!'
message += os.linesep * 2
message += 'Set as blank to reset to default.'
if launch_path is None:
default = 'program "%path%"'
else:
default = launch_path
with ClientGUIDialogs.DialogTextEntry( self, message, default = default, allow_blank = True ) as dlg:
if dlg.exec() == QW.QDialog.Accepted:
new_launch_path = dlg.GetValue()
if new_launch_path == '':
new_launch_path = None
if new_launch_path not in ( launch_path, default ):
self._mime_launch_listctrl.DeleteDatas( [ ( mime, launch_path ) ] )
self._mime_launch_listctrl.AddDatas( [ ( mime, new_launch_path ) ] )
else:
break
self._mime_launch_listctrl.Sort()
def UpdateOptions( self ):
web_browser_path = self._web_browser_path.text()
if web_browser_path == '':
web_browser_path = None
self._new_options.SetNoneableString( 'web_browser_path', web_browser_path )
for ( mime, launch_path ) in self._mime_launch_listctrl.GetData():
self._new_options.SetMimeLaunch( mime, launch_path )
class _FilesAndTrashPanel( QW.QWidget ):
def __init__( self, parent ):
QW.QWidget.__init__( self, parent )
self._new_options = HG.client_controller.new_options
self._export_location = QP.DirPickerCtrl( self )
self._prefix_hash_when_copying = QW.QCheckBox( self )
self._prefix_hash_when_copying.setToolTip( 'If you often paste hashes into boorus, check this to automatically prefix with the type, like "md5:2496dabcbd69e3c56a5d8caabb7acde5".' )
self._delete_to_recycle_bin = QW.QCheckBox( self )
self._confirm_trash = QW.QCheckBox( self )
self._confirm_archive = QW.QCheckBox( self )
self._remove_filtered_files = QW.QCheckBox( self )
self._remove_trashed_files = QW.QCheckBox( self )
self._trash_max_age = ClientGUICommon.NoneableSpinCtrl( self, '', none_phrase = 'no age limit', min = 0, max = 8640 )
self._trash_max_size = ClientGUICommon.NoneableSpinCtrl( self, '', none_phrase = 'no size limit', min = 0, max = 20480 )
delete_lock_panel = ClientGUICommon.StaticBox( self, 'delete lock' )
self._delete_lock_for_archived_files = QW.QCheckBox( delete_lock_panel )
advanced_file_deletion_panel = ClientGUICommon.StaticBox( self, 'advanced file deletion and custom reasons' )
self._use_advanced_file_deletion_dialog = QW.QCheckBox( advanced_file_deletion_panel )
self._use_advanced_file_deletion_dialog.setToolTip( 'If this is set, the client will present a more complicated file deletion confirmation dialog that will permit you to set your own deletion reason and perform \'clean\' deletes that leave no deletion record (making later re-import easier).' )
self._remember_last_advanced_file_deletion_special_action = QW.QCheckBox( advanced_file_deletion_panel )
self._remember_last_advanced_file_deletion_special_action.setToolTip( 'This will try to remember and restore the last action you set, whether that was trash, physical delete, or physical delete and clear history.')
self._remember_last_advanced_file_deletion_reason = QW.QCheckBox( advanced_file_deletion_panel )
self._remember_last_advanced_file_deletion_reason.setToolTip( 'This will remember and restore the last reason you set for a delete.' )
self._advanced_file_deletion_reasons = ClientGUIListBoxes.QueueListBox( advanced_file_deletion_panel, 5, str, add_callable = self._AddAFDR, edit_callable = self._EditAFDR )
#
if HC.options[ 'export_path' ] is not None:
abs_path = HydrusPaths.ConvertPortablePathToAbsPath( HC.options[ 'export_path' ] )
if abs_path is not None:
self._export_location.SetPath( abs_path )
self._prefix_hash_when_copying.setChecked( self._new_options.GetBoolean( 'prefix_hash_when_copying' ) )
self._delete_to_recycle_bin.setChecked( HC.options[ 'delete_to_recycle_bin' ] )
self._confirm_trash.setChecked( HC.options[ 'confirm_trash' ] )
self._confirm_archive.setChecked( HC.options[ 'confirm_archive' ] )
self._remove_filtered_files.setChecked( HC.options[ 'remove_filtered_files' ] )
self._remove_trashed_files.setChecked( HC.options[ 'remove_trashed_files' ] )
self._trash_max_age.SetValue( HC.options[ 'trash_max_age' ] )
self._trash_max_size.SetValue( HC.options[ 'trash_max_size' ] )
self._delete_lock_for_archived_files.setChecked( self._new_options.GetBoolean( 'delete_lock_for_archived_files' ) )
self._use_advanced_file_deletion_dialog.setChecked( self._new_options.GetBoolean( 'use_advanced_file_deletion_dialog' ) )
self._use_advanced_file_deletion_dialog.clicked.connect( self._UpdateAdvancedControls )
self._remember_last_advanced_file_deletion_special_action.setChecked( HG.client_controller.new_options.GetBoolean( 'remember_last_advanced_file_deletion_special_action' ) )
self._remember_last_advanced_file_deletion_reason.setChecked( HG.client_controller.new_options.GetBoolean( 'remember_last_advanced_file_deletion_reason' ) )
self._advanced_file_deletion_reasons.AddDatas( self._new_options.GetStringList( 'advanced_file_deletion_reasons' ) )
self._UpdateAdvancedControls()
#
vbox = QP.VBoxLayout()
text = 'If you set the default export directory blank, the client will use \'hydrus_export\' under the current user\'s home directory.'
QP.AddToLayout( vbox, ClientGUICommon.BetterStaticText(self,text), CC.FLAGS_CENTER )
rows = []
rows.append( ( 'When copying a file hashes, prefix with booru-friendly hash type: ', self._prefix_hash_when_copying ) )
rows.append( ( 'Confirm sending files to trash: ', self._confirm_trash ) )
rows.append( ( 'Confirm sending more than one file to archive or inbox: ', self._confirm_archive ) )
rows.append( ( 'When deleting files or folders, send them to the OS\'s recycle bin: ', self._delete_to_recycle_bin ) )
rows.append( ( 'Remove files from view when they are filtered: ', self._remove_filtered_files ) )
rows.append( ( 'Remove files from view when they are sent to the trash: ', self._remove_trashed_files ) )
rows.append( ( 'Number of hours a file can be in the trash before being deleted: ', self._trash_max_age ) )
rows.append( ( 'Maximum size of trash (MB): ', self._trash_max_size ) )
rows.append( ( 'Default export directory: ', self._export_location ) )
gridbox = ClientGUICommon.WrapInGrid( self, rows )
QP.AddToLayout( vbox, gridbox, CC.FLAGS_EXPAND_SIZER_PERPENDICULAR )
#
rows = []
rows.append( ( 'Do not permit archived files to be trashed or deleted: ', self._delete_lock_for_archived_files ) )
gridbox = ClientGUICommon.WrapInGrid( delete_lock_panel, rows )
delete_lock_panel.Add( gridbox, CC.FLAGS_EXPAND_SIZER_PERPENDICULAR )
QP.AddToLayout( vbox, delete_lock_panel, CC.FLAGS_EXPAND_PERPENDICULAR )
#
rows = []
rows.append( ( 'Use the advanced file deletion dialog: ', self._use_advanced_file_deletion_dialog ) )
rows.append( ( 'Remember the last action: ', self._remember_last_advanced_file_deletion_special_action ) )
rows.append( ( 'Remember the last reason: ', self._remember_last_advanced_file_deletion_reason ) )
gridbox = ClientGUICommon.WrapInGrid( advanced_file_deletion_panel, rows )
advanced_file_deletion_panel.Add( gridbox, CC.FLAGS_EXPAND_SIZER_PERPENDICULAR )
advanced_file_deletion_panel.Add( self._advanced_file_deletion_reasons, CC.FLAGS_EXPAND_BOTH_WAYS )
#
QP.AddToLayout( vbox, advanced_file_deletion_panel, CC.FLAGS_EXPAND_BOTH_WAYS )
self.setLayout( vbox )
def _AddAFDR( self ):
reason = 'I do not like the file.'
return self._EditAFDR( reason )
def _EditAFDR( self, reason ):
with ClientGUIDialogs.DialogTextEntry( self, 'enter the reason', default = reason, allow_blank = False ) as dlg:
if dlg.exec() == QW.QDialog.Accepted:
reason = dlg.GetValue()
return reason
else:
raise HydrusExceptions.VetoException()
def _UpdateAdvancedControls( self ):
advanced_enabled = self._use_advanced_file_deletion_dialog.isChecked()
self._remember_last_advanced_file_deletion_special_action.setEnabled( advanced_enabled )
self._remember_last_advanced_file_deletion_reason.setEnabled( advanced_enabled )
self._advanced_file_deletion_reasons.setEnabled( advanced_enabled )
def UpdateOptions( self ):
HC.options[ 'export_path' ] = HydrusPaths.ConvertAbsPathToPortablePath( self._export_location.GetPath() )
self._new_options.SetBoolean( 'prefix_hash_when_copying', self._prefix_hash_when_copying.isChecked() )
HC.options[ 'delete_to_recycle_bin' ] = self._delete_to_recycle_bin.isChecked()
HC.options[ 'confirm_trash' ] = self._confirm_trash.isChecked()
HC.options[ 'confirm_archive' ] = self._confirm_archive.isChecked()
HC.options[ 'remove_filtered_files' ] = self._remove_filtered_files.isChecked()
HC.options[ 'remove_trashed_files' ] = self._remove_trashed_files.isChecked()
HC.options[ 'trash_max_age' ] = self._trash_max_age.GetValue()
HC.options[ 'trash_max_size' ] = self._trash_max_size.GetValue()
self._new_options.SetBoolean( 'delete_lock_for_archived_files', self._delete_lock_for_archived_files.isChecked() )
self._new_options.SetBoolean( 'use_advanced_file_deletion_dialog', self._use_advanced_file_deletion_dialog.isChecked() )
self._new_options.SetStringList( 'advanced_file_deletion_reasons', self._advanced_file_deletion_reasons.GetData() )
HG.client_controller.new_options.SetBoolean( 'remember_last_advanced_file_deletion_special_action', self._remember_last_advanced_file_deletion_special_action.isChecked() )
HG.client_controller.new_options.SetBoolean( 'remember_last_advanced_file_deletion_reason', self._remember_last_advanced_file_deletion_reason.isChecked() )
class _FileViewingStatisticsPanel( QW.QWidget ):
def __init__( self, parent ):
QW.QWidget.__init__( self, parent )
self._new_options = HG.client_controller.new_options
self._file_viewing_statistics_active = QW.QCheckBox( self )
self._file_viewing_statistics_active_on_dupe_filter = QW.QCheckBox( self )
self._file_viewing_statistics_media_min_time = ClientGUICommon.NoneableSpinCtrl( self )
self._file_viewing_statistics_media_max_time = ClientGUICommon.NoneableSpinCtrl( self )
self._file_viewing_statistics_preview_min_time = ClientGUICommon.NoneableSpinCtrl( self )
self._file_viewing_statistics_preview_max_time = ClientGUICommon.NoneableSpinCtrl( self )
self._file_viewing_stats_menu_display = ClientGUICommon.BetterChoice( self )
self._file_viewing_stats_menu_display.addItem( 'do not show', CC.FILE_VIEWING_STATS_MENU_DISPLAY_NONE )
self._file_viewing_stats_menu_display.addItem( 'show media', CC.FILE_VIEWING_STATS_MENU_DISPLAY_MEDIA_ONLY )
self._file_viewing_stats_menu_display.addItem( 'show media, and put preview in a submenu', CC.FILE_VIEWING_STATS_MENU_DISPLAY_MEDIA_AND_PREVIEW_IN_SUBMENU )
self._file_viewing_stats_menu_display.addItem( 'show media and preview in two lines', CC.FILE_VIEWING_STATS_MENU_DISPLAY_MEDIA_AND_PREVIEW_STACKED )
self._file_viewing_stats_menu_display.addItem( 'show media and preview combined', CC.FILE_VIEWING_STATS_MENU_DISPLAY_MEDIA_AND_PREVIEW_SUMMED )
#
self._file_viewing_statistics_active.setChecked( self._new_options.GetBoolean( 'file_viewing_statistics_active' ) )
self._file_viewing_statistics_active_on_dupe_filter.setChecked( self._new_options.GetBoolean( 'file_viewing_statistics_active_on_dupe_filter' ) )
self._file_viewing_statistics_media_min_time.SetValue( self._new_options.GetNoneableInteger( 'file_viewing_statistics_media_min_time' ) )
self._file_viewing_statistics_media_max_time.SetValue( self._new_options.GetNoneableInteger( 'file_viewing_statistics_media_max_time' ) )
self._file_viewing_statistics_preview_min_time.SetValue( self._new_options.GetNoneableInteger( 'file_viewing_statistics_preview_min_time' ) )
self._file_viewing_statistics_preview_max_time.SetValue( self._new_options.GetNoneableInteger( 'file_viewing_statistics_preview_max_time' ) )
self._file_viewing_stats_menu_display.SetValue( self._new_options.GetInteger( 'file_viewing_stats_menu_display' ) )
#
vbox = QP.VBoxLayout()
rows = []
rows.append( ( 'Enable file viewing statistics tracking?:', self._file_viewing_statistics_active ) )
rows.append( ( 'Enable file viewing statistics tracking on the duplicate filter?:', self._file_viewing_statistics_active_on_dupe_filter ) )
rows.append( ( 'Min time to view on media viewer to count as a view (seconds):', self._file_viewing_statistics_media_min_time ) )
rows.append( ( 'Cap any view on the media viewer to this maximum time (seconds):', self._file_viewing_statistics_media_max_time ) )
rows.append( ( 'Min time to view on preview viewer to count as a view (seconds):', self._file_viewing_statistics_preview_min_time ) )
rows.append( ( 'Cap any view on the preview viewer to this maximum time (seconds):', self._file_viewing_statistics_preview_max_time ) )
rows.append( ( 'Show media/preview viewing stats on media right-click menus?:', self._file_viewing_stats_menu_display ) )
gridbox = ClientGUICommon.WrapInGrid( self, rows )
QP.AddToLayout( vbox, gridbox, CC.FLAGS_EXPAND_SIZER_PERPENDICULAR )
vbox.addStretch( 1 )
self.setLayout( vbox )
def UpdateOptions( self ):
self._new_options.SetBoolean( 'file_viewing_statistics_active', self._file_viewing_statistics_active.isChecked() )
self._new_options.SetBoolean( 'file_viewing_statistics_active_on_dupe_filter', self._file_viewing_statistics_active_on_dupe_filter.isChecked() )
self._new_options.SetNoneableInteger( 'file_viewing_statistics_media_min_time', self._file_viewing_statistics_media_min_time.GetValue() )
self._new_options.SetNoneableInteger( 'file_viewing_statistics_media_max_time', self._file_viewing_statistics_media_max_time.GetValue() )
self._new_options.SetNoneableInteger( 'file_viewing_statistics_preview_min_time', self._file_viewing_statistics_preview_min_time.GetValue() )
self._new_options.SetNoneableInteger( 'file_viewing_statistics_preview_max_time', self._file_viewing_statistics_preview_max_time.GetValue() )
self._new_options.SetInteger( 'file_viewing_stats_menu_display', self._file_viewing_stats_menu_display.GetValue() )
class _GUIPanel( QW.QWidget ):
def __init__( self, parent ):
QW.QWidget.__init__( self, parent )
self._main_gui_panel = ClientGUICommon.StaticBox( self, 'main window' )
self._app_display_name = QW.QLineEdit( self._main_gui_panel )
self._app_display_name.setToolTip( 'This is placed in every window title, with current version name. Rename if you want to personalise or differentiate.' )
self._confirm_client_exit = QW.QCheckBox( self._main_gui_panel )
self._activate_window_on_tag_search_page_activation = QW.QCheckBox( self._main_gui_panel )
tt = 'Middle-clicking one or more tags in a taglist will cause the creation of a new search page for those tags. If you do this from the media viewer or a child manage tags dialog, do you want to switch immediately to the main gui?'
self._activate_window_on_tag_search_page_activation.setToolTip( tt )
#
self._misc_panel = ClientGUICommon.StaticBox( self, 'misc' )
self._always_show_iso_time = QW.QCheckBox( self._misc_panel )
tt = 'In many places across the program (typically import status lists), the client will state a timestamp as "5 days ago". If you would prefer a standard ISO string, like "2018-03-01 12:40:23", check this.'
self._always_show_iso_time.setToolTip( tt )
self._human_bytes_sig_figs = QP.MakeQSpinBox( self._misc_panel, min = 1, max = 6 )
self._human_bytes_sig_figs.setToolTip( 'When the program presents a bytes size above 1KB, like 21.3KB or 4.11GB, how many total digits do we want in the number? 2 or 3 is best.')
self._discord_dnd_fix = QW.QCheckBox( self._misc_panel )
self._discord_dnd_fix.setToolTip( 'This makes small file drag-and-drops a little laggier in exchange for discord support.' )
self._discord_dnd_filename_pattern = QW.QLineEdit( self._misc_panel )
self._discord_dnd_filename_pattern.setToolTip( 'When discord DnD is enabled, this will use this export phrase to rename your files. If no filename can be generated, hash will be used instead.' )
self._secret_discord_dnd_fix = QW.QCheckBox( self._misc_panel )
self._secret_discord_dnd_fix.setToolTip( 'This saves the lag but is potentially dangerous, as it (may) treat the from-db-files-drag as a move rather than a copy and hence only works when the drop destination will not consume the files. It requires an additional secret Alternate key to unlock.' )
self._do_macos_debug_dialog_menus = QW.QCheckBox( self._misc_panel )
self._do_macos_debug_dialog_menus.setToolTip( 'There is a bug in Big Sur Qt regarding interacting with some menus in dialogs. The menus show but cannot be clicked. This shows the menu items in a debug dialog instead.' )
self._use_qt_file_dialogs = QW.QCheckBox( self._misc_panel )
self._use_qt_file_dialogs.setToolTip( 'If you get crashes opening file/directory dialogs, try this.' )
#
frame_locations_panel = ClientGUICommon.StaticBox( self, 'frame locations' )
self._frame_locations = ClientGUIListCtrl.BetterListCtrl( frame_locations_panel, CGLC.COLUMN_LIST_FRAME_LOCATIONS.ID, 15, data_to_tuples_func = lambda x: (self._GetPrettyFrameLocationInfo( x ), self._GetPrettyFrameLocationInfo( x )), activation_callback = self.EditFrameLocations )
self._frame_locations_edit_button = QW.QPushButton( 'edit', frame_locations_panel )
self._frame_locations_edit_button.clicked.connect( self.EditFrameLocations )
#
self._new_options = HG.client_controller.new_options
self._app_display_name.setText( self._new_options.GetString( 'app_display_name' ) )
self._confirm_client_exit.setChecked( HC.options[ 'confirm_client_exit' ] )
self._activate_window_on_tag_search_page_activation.setChecked( self._new_options.GetBoolean( 'activate_window_on_tag_search_page_activation' ) )
self._always_show_iso_time.setChecked( self._new_options.GetBoolean( 'always_show_iso_time' ) )
self._human_bytes_sig_figs.setValue( self._new_options.GetInteger( 'human_bytes_sig_figs' ) )
self._discord_dnd_fix.setChecked( self._new_options.GetBoolean( 'discord_dnd_fix' ) )
self._discord_dnd_filename_pattern.setText( self._new_options.GetString( 'discord_dnd_filename_pattern' ) )
self._secret_discord_dnd_fix.setChecked( self._new_options.GetBoolean( 'secret_discord_dnd_fix' ) )
self._do_macos_debug_dialog_menus.setChecked( self._new_options.GetBoolean( 'do_macos_debug_dialog_menus' ) )
self._use_qt_file_dialogs.setChecked( self._new_options.GetBoolean( 'use_qt_file_dialogs' ) )
for ( name, info ) in self._new_options.GetFrameLocations():
listctrl_list = QP.ListsToTuples( [ name ] + list( info ) )
self._frame_locations.AddDatas( ( listctrl_list, ) )
#self._frame_locations.SortListItems( col = 0 )
#
rows = []
rows.append( ( 'Application display name: ', self._app_display_name ) )
rows.append( ( 'Confirm client exit: ', self._confirm_client_exit ) )
rows.append( ( 'Switch to main window when opening tag search page from media viewer: ', self._activate_window_on_tag_search_page_activation ) )
gridbox = ClientGUICommon.WrapInGrid( self._main_gui_panel, rows )
self._main_gui_panel.Add( gridbox, CC.FLAGS_EXPAND_SIZER_PERPENDICULAR )
rows = []
rows.append( ( 'Prefer ISO time ("2018-03-01 12:40:23") to "5 days ago": ', self._always_show_iso_time ) )
rows.append( ( 'BUGFIX: Discord file drag-and-drop fix (works for <=25, <200MB file DnDs): ', self._discord_dnd_fix ) )
rows.append( ( 'Discord drag-and-drop filename pattern: ', self._discord_dnd_filename_pattern ) )
rows.append( ( 'Export pattern shortcuts: ', ClientGUICommon.ExportPatternButton( self ) ) )
rows.append( ( 'EXPERIMENTAL: Bytes strings >1KB pseudo significant figures: ', self._human_bytes_sig_figs ) )
rows.append( ( 'EXPERIMENTAL BUGFIX: Secret discord file drag-and-drop fix: ', self._secret_discord_dnd_fix ) )
rows.append( ( 'BUGFIX: If on macOS, show dialog menus in a debug menu: ', self._do_macos_debug_dialog_menus ) )
rows.append( ( 'ANTI-CRASH BUGFIX: Use Qt file/directory selection dialogs, rather than OS native: ', self._use_qt_file_dialogs ) )
gridbox = ClientGUICommon.WrapInGrid( self, rows )
self._misc_panel.Add( gridbox, CC.FLAGS_EXPAND_SIZER_PERPENDICULAR )
text = 'Here you can override the current and default values for many frame and dialog sizing and positioning variables.'
text += os.linesep
text += 'This is an advanced control. If you aren\'t confident of what you are doing here, come back later!'
frame_locations_panel.Add( QW.QLabel( text, frame_locations_panel ), CC.FLAGS_EXPAND_PERPENDICULAR )
frame_locations_panel.Add( self._frame_locations, CC.FLAGS_EXPAND_BOTH_WAYS )
frame_locations_panel.Add( self._frame_locations_edit_button, CC.FLAGS_ON_RIGHT )
vbox = QP.VBoxLayout()
QP.AddToLayout( vbox, self._main_gui_panel, CC.FLAGS_EXPAND_SIZER_PERPENDICULAR )
QP.AddToLayout( vbox, self._misc_panel, CC.FLAGS_EXPAND_SIZER_PERPENDICULAR )
QP.AddToLayout( vbox, frame_locations_panel, CC.FLAGS_EXPAND_BOTH_WAYS )
self.setLayout( vbox )
def _GetPrettyFrameLocationInfo( self, listctrl_list ):
pretty_listctrl_list = []
for item in listctrl_list:
pretty_listctrl_list.append( str( item ) )
return pretty_listctrl_list
def EditFrameLocations( self ):
for listctrl_list in self._frame_locations.GetData( only_selected = True ):
title = 'set frame location information'
with ClientGUITopLevelWindowsPanels.DialogEdit( self, title ) as dlg:
panel = ClientGUIScrolledPanelsEdit.EditFrameLocationPanel( dlg, listctrl_list )
dlg.SetPanel( panel )
if dlg.exec() == QW.QDialog.Accepted:
new_listctrl_list = panel.GetValue()
self._frame_locations.ReplaceData( listctrl_list, new_listctrl_list )
def UpdateOptions( self ):
HC.options[ 'confirm_client_exit' ] = self._confirm_client_exit.isChecked()
self._new_options.SetBoolean( 'always_show_iso_time', self._always_show_iso_time.isChecked() )
self._new_options.SetInteger( 'human_bytes_sig_figs', self._human_bytes_sig_figs.value() )
self._new_options.SetBoolean( 'activate_window_on_tag_search_page_activation', self._activate_window_on_tag_search_page_activation.isChecked() )
app_display_name = self._app_display_name.text()
if app_display_name == '':
app_display_name = 'hydrus client'
self._new_options.SetString( 'app_display_name', app_display_name )
self._new_options.SetBoolean( 'discord_dnd_fix', self._discord_dnd_fix.isChecked() )
self._new_options.SetString( 'discord_dnd_filename_pattern', self._discord_dnd_filename_pattern.text() )
self._new_options.SetBoolean( 'secret_discord_dnd_fix', self._secret_discord_dnd_fix.isChecked() )
self._new_options.SetBoolean( 'do_macos_debug_dialog_menus', self._do_macos_debug_dialog_menus.isChecked() )
self._new_options.SetBoolean( 'use_qt_file_dialogs', self._use_qt_file_dialogs.isChecked() )
for listctrl_list in self._frame_locations.GetData():
( name, remember_size, remember_position, last_size, last_position, default_gravity, default_position, maximised, fullscreen ) = listctrl_list
self._new_options.SetFrameLocation( name, remember_size, remember_position, last_size, last_position, default_gravity, default_position, maximised, fullscreen )
class _GUIPagesPanel( QW.QWidget ):
def __init__( self, parent, new_options ):
QW.QWidget.__init__( self, parent )
self._new_options = new_options
self._sessions_panel = ClientGUICommon.StaticBox( self, 'sessions' )
self._default_gui_session = QW.QComboBox( self._sessions_panel )
self._last_session_save_period_minutes = QP.MakeQSpinBox( self._sessions_panel, min = 1, max = 1440 )
self._only_save_last_session_during_idle = QW.QCheckBox( self._sessions_panel )
self._only_save_last_session_during_idle.setToolTip( 'This is useful if you usually have a very large session (200,000+ files/import items open) and a client that is always on.' )
self._number_of_gui_session_backups = QP.MakeQSpinBox( self._sessions_panel, min = 1, max = 32 )
self._number_of_gui_session_backups.setToolTip( 'The client keeps multiple rolling backups of your gui sessions. If you have very large sessions, you might like to reduce this number.' )
self._show_session_size_warnings = QW.QCheckBox( self._sessions_panel )
self._show_session_size_warnings.setToolTip( 'This will give you a once-per-boot warning popup if your active session contains more than 10M weight.' )
#
self._pages_panel = ClientGUICommon.StaticBox( self, 'pages' )
self._default_new_page_goes = ClientGUICommon.BetterChoice( self._pages_panel )
for value in [ CC.NEW_PAGE_GOES_FAR_LEFT, CC.NEW_PAGE_GOES_LEFT_OF_CURRENT, CC.NEW_PAGE_GOES_RIGHT_OF_CURRENT, CC.NEW_PAGE_GOES_FAR_RIGHT ]:
self._default_new_page_goes.addItem( CC.new_page_goes_string_lookup[ value], value )
self._notebook_tab_alignment = ClientGUICommon.BetterChoice( self._pages_panel )
for value in [ CC.DIRECTION_UP, CC.DIRECTION_LEFT, CC.DIRECTION_RIGHT, CC.DIRECTION_DOWN ]:
self._notebook_tab_alignment.addItem( CC.directions_alignment_string_lookup[ value ], value )
self._total_pages_warning = QP.MakeQSpinBox( self._pages_panel, min=5, max=500 )
self._reverse_page_shift_drag_behaviour = QW.QCheckBox( self._pages_panel )
self._reverse_page_shift_drag_behaviour.setToolTip( 'By default, holding down shift when you drop off a page tab means the client will not \'chase\' the page tab. This makes this behaviour default, with shift-drop meaning to chase.' )
#
self._page_names_panel = ClientGUICommon.StaticBox( self._pages_panel, 'page tab names' )
self._max_page_name_chars = QP.MakeQSpinBox( self._page_names_panel, min=1, max=256 )
self._elide_page_tab_names = QW.QCheckBox( self._page_names_panel )
self._page_file_count_display = ClientGUICommon.BetterChoice( self._page_names_panel )
for display_type in ( CC.PAGE_FILE_COUNT_DISPLAY_ALL, CC.PAGE_FILE_COUNT_DISPLAY_ONLY_IMPORTERS, CC.PAGE_FILE_COUNT_DISPLAY_NONE ):
self._page_file_count_display.addItem( CC.page_file_count_display_string_lookup[ display_type], display_type )
self._import_page_progress_display = QW.QCheckBox( self._page_names_panel )
#
self._controls_panel = ClientGUICommon.StaticBox( self, 'controls' )
self._set_search_focus_on_page_change = QW.QCheckBox( self._controls_panel )
self._hide_preview = QW.QCheckBox( self._controls_panel )
#
gui_session_names = HG.client_controller.Read( 'serialisable_names', HydrusSerialisable.SERIALISABLE_TYPE_GUI_SESSION_CONTAINER )
if CC.LAST_SESSION_SESSION_NAME not in gui_session_names:
gui_session_names.insert( 0, CC.LAST_SESSION_SESSION_NAME )
self._default_gui_session.addItem( 'just a blank page', None )
for name in gui_session_names:
self._default_gui_session.addItem( name, name )
try:
QP.SetStringSelection( self._default_gui_session, HC.options['default_gui_session'] )
except:
self._default_gui_session.setCurrentIndex( 0 )
self._last_session_save_period_minutes.setValue( self._new_options.GetInteger( 'last_session_save_period_minutes' ) )
self._only_save_last_session_during_idle.setChecked( self._new_options.GetBoolean( 'only_save_last_session_during_idle' ) )
self._number_of_gui_session_backups.setValue( self._new_options.GetInteger( 'number_of_gui_session_backups' ) )
self._show_session_size_warnings.setChecked( self._new_options.GetBoolean( 'show_session_size_warnings' ) )
self._default_new_page_goes.SetValue( self._new_options.GetInteger( 'default_new_page_goes' ) )
self._notebook_tab_alignment.SetValue( self._new_options.GetInteger( 'notebook_tab_alignment' ) )
self._max_page_name_chars.setValue( self._new_options.GetInteger( 'max_page_name_chars' ) )
self._elide_page_tab_names.setChecked( self._new_options.GetBoolean( 'elide_page_tab_names' ) )
self._page_file_count_display.SetValue( self._new_options.GetInteger( 'page_file_count_display' ) )
self._import_page_progress_display.setChecked( self._new_options.GetBoolean( 'import_page_progress_display' ) )
self._total_pages_warning.setValue( self._new_options.GetInteger( 'total_pages_warning' ) )
self._reverse_page_shift_drag_behaviour.setChecked( self._new_options.GetBoolean( 'reverse_page_shift_drag_behaviour' ) )
self._set_search_focus_on_page_change.setChecked( self._new_options.GetBoolean( 'set_search_focus_on_page_change' ) )
self._hide_preview.setChecked( HC.options[ 'hide_preview' ] )
#
rows = []
rows.append( ( 'Default session on startup: ', self._default_gui_session ) )
rows.append( ( 'If \'last session\' above, autosave it how often (minutes)?', self._last_session_save_period_minutes ) )
rows.append( ( 'If \'last session\' above, only autosave during idle time?', self._only_save_last_session_during_idle ) )
rows.append( ( 'Number of session backups to keep: ', self._number_of_gui_session_backups ) )
rows.append( ( 'Show warning popup if session size exceeds 10,000,000: ', self._show_session_size_warnings ) )
sessions_gridbox = ClientGUICommon.WrapInGrid( self._sessions_panel, rows )
self._sessions_panel.Add( sessions_gridbox, CC.FLAGS_EXPAND_SIZER_BOTH_WAYS )
rows = []
rows.append( ( 'By default, put new page tabs on: ', self._default_new_page_goes ) )
rows.append( ( 'Notebook tab alignment: ', self._notebook_tab_alignment ) )
rows.append( ( 'Reverse page tab shift-drag behaviour: ', self._reverse_page_shift_drag_behaviour ) )
rows.append( ( 'Warn at this many total pages: ', self._total_pages_warning ) )
gridbox = ClientGUICommon.WrapInGrid( self._pages_panel, rows )
rows = []
rows.append( ( 'Max characters to display in a page name: ', self._max_page_name_chars ) )
rows.append( ( 'When there are too many tabs to fit, \'...\' elide their names so they fit: ', self._elide_page_tab_names ) )
rows.append( ( 'Show page file count after its name: ', self._page_file_count_display ) )
rows.append( ( 'Show import page x/y progress after its name: ', self._import_page_progress_display ) )
page_names_gridbox = ClientGUICommon.WrapInGrid( self._page_names_panel, rows )
label = 'If you have enough pages in a row, left/right arrows will appear to navigate them back and forth.'
label += os.linesep
label += 'Due to an unfortunate Qt issue, the tab bar will scroll so the current tab is right-most visible whenever a page is renamed.'
label += os.linesep
label += 'Therefore, if you set pages to have current file count or import progress in their name (which will update from time to time), do not put import pages in a long row of tabs, as it will reset scroll position on every progress update.'
label += os.linesep
label += 'Just make some nested \'page of pages\' so they are not all in the same row.'
st = ClientGUICommon.BetterStaticText( self._page_names_panel, label )
st.setWordWrap( True )
self._page_names_panel.Add( st, CC.FLAGS_EXPAND_PERPENDICULAR )
self._page_names_panel.Add( page_names_gridbox, CC.FLAGS_EXPAND_SIZER_PERPENDICULAR )
self._pages_panel.Add( gridbox, CC.FLAGS_EXPAND_SIZER_PERPENDICULAR )
self._pages_panel.Add( self._page_names_panel, CC.FLAGS_EXPAND_SIZER_PERPENDICULAR )
rows = []
rows.append( ( 'When switching to a page, focus its text input field (if any): ', self._set_search_focus_on_page_change ) )
rows.append( ( 'Hide the bottom-left preview window: ', self._hide_preview ) )
gridbox = ClientGUICommon.WrapInGrid( self._controls_panel, rows )
self._controls_panel.Add( gridbox, CC.FLAGS_EXPAND_SIZER_BOTH_WAYS )
vbox = QP.VBoxLayout()
QP.AddToLayout( vbox, self._sessions_panel, CC.FLAGS_EXPAND_SIZER_PERPENDICULAR )
QP.AddToLayout( vbox, self._pages_panel, CC.FLAGS_EXPAND_PERPENDICULAR )
QP.AddToLayout( vbox, self._controls_panel, CC.FLAGS_EXPAND_PERPENDICULAR )
vbox.addStretch( 1 )
self.setLayout( vbox )
def UpdateOptions( self ):
HC.options[ 'default_gui_session' ] = self._default_gui_session.currentText()
self._new_options.SetInteger( 'notebook_tab_alignment', self._notebook_tab_alignment.GetValue() )
self._new_options.SetInteger( 'last_session_save_period_minutes', self._last_session_save_period_minutes.value() )
self._new_options.SetInteger( 'number_of_gui_session_backups', self._number_of_gui_session_backups.value() )
self._new_options.SetBoolean( 'show_session_size_warnings', self._show_session_size_warnings.isChecked() )
self._new_options.SetBoolean( 'only_save_last_session_during_idle', self._only_save_last_session_during_idle.isChecked() )
self._new_options.SetInteger( 'default_new_page_goes', self._default_new_page_goes.GetValue() )
self._new_options.SetInteger( 'max_page_name_chars', self._max_page_name_chars.value() )
self._new_options.SetBoolean( 'elide_page_tab_names', self._elide_page_tab_names.isChecked() )
self._new_options.SetInteger( 'page_file_count_display', self._page_file_count_display.GetValue() )
self._new_options.SetBoolean( 'import_page_progress_display', self._import_page_progress_display.isChecked() )
self._new_options.SetInteger( 'total_pages_warning', self._total_pages_warning.value() )
self._new_options.SetBoolean( 'reverse_page_shift_drag_behaviour', self._reverse_page_shift_drag_behaviour.isChecked() )
self._new_options.SetBoolean( 'set_search_focus_on_page_change', self._set_search_focus_on_page_change.isChecked() )
HC.options[ 'hide_preview' ] = self._hide_preview.isChecked()
class _ImportingPanel( QW.QWidget ):
def __init__( self, parent, new_options ):
QW.QWidget.__init__( self, parent )
self._new_options = new_options
#
default_fios = ClientGUICommon.StaticBox( self, 'default file import options' )
show_downloader_options = True
quiet_file_import_options = self._new_options.GetDefaultFileImportOptions( 'quiet' )
self._quiet_fios = ClientGUIImport.FileImportOptionsButton( default_fios, quiet_file_import_options, show_downloader_options )
loud_file_import_options = self._new_options.GetDefaultFileImportOptions( 'loud' )
self._loud_fios = ClientGUIImport.FileImportOptionsButton( default_fios, loud_file_import_options, show_downloader_options )
#
rows = []
rows.append( ( 'For \'quiet\' import contexts like import folders and subscriptions:', self._quiet_fios ) )
rows.append( ( 'For import contexts that work on pages:', self._loud_fios ) )
gridbox = ClientGUICommon.WrapInGrid( default_fios, rows )
default_fios.Add( gridbox, CC.FLAGS_EXPAND_SIZER_PERPENDICULAR )
#
vbox = QP.VBoxLayout()
QP.AddToLayout( vbox, default_fios, CC.FLAGS_EXPAND_PERPENDICULAR )
vbox.addStretch( 1 )
self.setLayout( vbox )
def UpdateOptions( self ):
self._new_options.SetDefaultFileImportOptions( 'quiet', self._quiet_fios.GetValue() )
self._new_options.SetDefaultFileImportOptions( 'loud', self._loud_fios.GetValue() )
class _MaintenanceAndProcessingPanel( QW.QWidget ):
def __init__( self, parent ):
QW.QWidget.__init__( self, parent )
self._new_options = HG.client_controller.new_options
self._jobs_panel = ClientGUICommon.StaticBox( self, 'when to run high cpu jobs' )
self._file_maintenance_panel = ClientGUICommon.StaticBox( self, 'file maintenance' )
self._idle_panel = ClientGUICommon.StaticBox( self._jobs_panel, 'idle' )
self._shutdown_panel = ClientGUICommon.StaticBox( self._jobs_panel, 'shutdown' )
#
self._idle_normal = QW.QCheckBox( self._idle_panel )
self._idle_normal.clicked.connect( self._EnableDisableIdleNormal )
self._idle_period = ClientGUICommon.NoneableSpinCtrl( self._idle_panel, '', min = 1, max = 1000, multiplier = 60, unit = 'minutes', none_phrase = 'ignore normal browsing' )
self._idle_mouse_period = ClientGUICommon.NoneableSpinCtrl( self._idle_panel, '', min = 1, max = 1000, multiplier = 60, unit = 'minutes', none_phrase = 'ignore mouse movements' )
self._idle_mode_client_api_timeout = ClientGUICommon.NoneableSpinCtrl( self._idle_panel, '', min = 1, max = 1000, multiplier = 60, unit = 'minutes', none_phrase = 'ignore client api' )
self._system_busy_cpu_percent = QP.MakeQSpinBox( self._idle_panel, min = 5, max = 99 )
self._system_busy_cpu_count = ClientGUICommon.NoneableSpinCtrl( self._idle_panel, min = 1, max = 64, unit = 'cores', none_phrase = 'ignore cpu usage' )
#
self._idle_shutdown = ClientGUICommon.BetterChoice( self._shutdown_panel )
for idle_id in ( CC.IDLE_NOT_ON_SHUTDOWN, CC.IDLE_ON_SHUTDOWN, CC.IDLE_ON_SHUTDOWN_ASK_FIRST ):
self._idle_shutdown.addItem( CC.idle_string_lookup[ idle_id], idle_id )
self._idle_shutdown.currentIndexChanged.connect( self._EnableDisableIdleShutdown )
self._idle_shutdown_max_minutes = QP.MakeQSpinBox( self._shutdown_panel, min=1, max=1440 )
self._shutdown_work_period = ClientGUITime.TimeDeltaButton( self._shutdown_panel, min = 60, days = True, hours = True, minutes = True )
#
min_unit_value = 1
max_unit_value = 1000
min_time_delta = 1
self._file_maintenance_during_idle = QW.QCheckBox( self._file_maintenance_panel )
self._file_maintenance_idle_throttle_velocity = ClientGUITime.VelocityCtrl( self._file_maintenance_panel, min_unit_value, max_unit_value, min_time_delta, minutes = True, seconds = True, per_phrase = 'every', unit = 'heavy work units' )
self._file_maintenance_during_active = QW.QCheckBox( self._file_maintenance_panel )
self._file_maintenance_active_throttle_velocity = ClientGUITime.VelocityCtrl( self._file_maintenance_panel, min_unit_value, max_unit_value, min_time_delta, minutes = True, seconds = True, per_phrase = 'every', unit = 'heavy work units' )
tt = 'Different jobs will count for more or less weight. A file metadata reparse will count as one work unit, but quicker jobs like checking for file presence will count as fractions of one and will will work more frequently.'
tt += os.linesep * 2
tt += 'Please note that this throttle is not rigorous for long timescales, as file processing history is not currently saved on client exit. If you restart the client, the file manager thinks it has run 0 jobs and will be happy to run until the throttle kicks in again.'
self._file_maintenance_idle_throttle_velocity.setToolTip( tt )
self._file_maintenance_active_throttle_velocity.setToolTip( tt )
#
self._idle_normal.setChecked( HC.options[ 'idle_normal' ] )
self._idle_period.SetValue( HC.options['idle_period'] )
self._idle_mouse_period.SetValue( HC.options['idle_mouse_period'] )
self._idle_mode_client_api_timeout.SetValue( self._new_options.GetNoneableInteger( 'idle_mode_client_api_timeout' ) )
self._system_busy_cpu_percent.setValue( self._new_options.GetInteger( 'system_busy_cpu_percent' ) )
self._system_busy_cpu_count.SetValue( self._new_options.GetNoneableInteger( 'system_busy_cpu_count' ) )
self._idle_shutdown.SetValue( HC.options[ 'idle_shutdown' ] )
self._idle_shutdown_max_minutes.setValue( HC.options['idle_shutdown_max_minutes'] )
self._shutdown_work_period.SetValue( self._new_options.GetInteger( 'shutdown_work_period' ) )
self._file_maintenance_during_idle.setChecked( self._new_options.GetBoolean( 'file_maintenance_during_idle' ) )
file_maintenance_idle_throttle_files = self._new_options.GetInteger( 'file_maintenance_idle_throttle_files' )
file_maintenance_idle_throttle_time_delta = self._new_options.GetInteger( 'file_maintenance_idle_throttle_time_delta' )
file_maintenance_idle_throttle_velocity = ( file_maintenance_idle_throttle_files, file_maintenance_idle_throttle_time_delta )
self._file_maintenance_idle_throttle_velocity.SetValue( file_maintenance_idle_throttle_velocity )
self._file_maintenance_during_active.setChecked( self._new_options.GetBoolean( 'file_maintenance_during_active' ) )
file_maintenance_active_throttle_files = self._new_options.GetInteger( 'file_maintenance_active_throttle_files' )
file_maintenance_active_throttle_time_delta = self._new_options.GetInteger( 'file_maintenance_active_throttle_time_delta' )
file_maintenance_active_throttle_velocity = ( file_maintenance_active_throttle_files, file_maintenance_active_throttle_time_delta )
self._file_maintenance_active_throttle_velocity.SetValue( file_maintenance_active_throttle_velocity )
#
rows = []
rows.append( ( 'Run maintenance jobs when the client is idle and the system is not otherwise busy: ', self._idle_normal ) )
rows.append( ( 'Permit idle mode if no general browsing activity has occurred in the past: ', self._idle_period ) )
rows.append( ( 'Permit idle mode if the mouse has not been moved in the past: ', self._idle_mouse_period ) )
rows.append( ( 'Permit idle mode if no Client API requests in the past: ', self._idle_mode_client_api_timeout ) )
hbox = QP.HBoxLayout()
QP.AddToLayout( hbox, self._system_busy_cpu_percent, CC.FLAGS_CENTER )
QP.AddToLayout( hbox, ClientGUICommon.BetterStaticText( self._idle_panel, label = '% on ' ), CC.FLAGS_CENTER )
QP.AddToLayout( hbox, self._system_busy_cpu_count, CC.FLAGS_CENTER )
import psutil
num_cores = psutil.cpu_count()
QP.AddToLayout( hbox, ClientGUICommon.BetterStaticText( self._idle_panel, label = '(you appear to have {} cores)'.format( num_cores ) ), CC.FLAGS_CENTER )
rows.append( ( 'Consider the system busy if CPU usage is above: ', hbox ) )
gridbox = ClientGUICommon.WrapInGrid( self._idle_panel, rows )
self._idle_panel.Add( gridbox, CC.FLAGS_EXPAND_SIZER_PERPENDICULAR )
#
rows = []
rows.append( ( 'Run jobs on shutdown: ', self._idle_shutdown ) )
rows.append( ( 'Only run shutdown jobs once per: ', self._shutdown_work_period ) )
rows.append( ( 'Max number of minutes to run shutdown jobs: ', self._idle_shutdown_max_minutes ) )
gridbox = ClientGUICommon.WrapInGrid( self._shutdown_panel, rows )
self._shutdown_panel.Add( gridbox, CC.FLAGS_EXPAND_SIZER_PERPENDICULAR )
#
text = '***'
text += os.linesep
text +='If you are a new user or do not completely understand these options, please do not touch them! Do not set the client to be idle all the time unless you know what you are doing or are testing something and are prepared for potential problems!'
text += os.linesep
text += '***'
text += os.linesep * 2
text += 'Sometimes, the client needs to do some heavy maintenance. This could be reformatting the database to keep it running fast or processing a large number of tags from a repository. Typically, these jobs will not allow you to use the gui while they run, and on slower computers--or those with not much memory--they can take a long time to complete.'
text += os.linesep * 2
text += 'You can set these jobs to run only when the client is idle, or only during shutdown, or neither, or both. If you leave the client on all the time in the background, focusing on \'idle time\' processing is often ideal. If you have a slow computer, relying on \'shutdown\' processing (which you can manually start when convenient), is often better.'
text += os.linesep * 2
text += 'If the client switches from idle to not idle during a job, it will try to abandon it and give you back control. This is not always possible, and even when it is, it will sometimes take several minutes, particularly on slower machines or those on HDDs rather than SSDs.'
text += os.linesep * 2
text += 'If the client believes the system is busy, it will generally not start jobs.'
st = ClientGUICommon.BetterStaticText( self._jobs_panel, label = text )
st.setWordWrap( True )
self._jobs_panel.Add( st, CC.FLAGS_EXPAND_PERPENDICULAR )
self._jobs_panel.Add( self._idle_panel, CC.FLAGS_EXPAND_PERPENDICULAR )
self._jobs_panel.Add( self._shutdown_panel, CC.FLAGS_EXPAND_PERPENDICULAR )
#
message = 'Scheduled jobs such as reparsing file metadata and regenerating thumbnails are performed in the background.'
self._file_maintenance_panel.Add( ClientGUICommon.BetterStaticText( self._file_maintenance_panel, label = message ), CC.FLAGS_EXPAND_PERPENDICULAR )
rows = []
rows.append( ( 'Run file maintenance during idle time: ', self._file_maintenance_during_idle ) )
rows.append( ( 'Idle throttle: ', self._file_maintenance_idle_throttle_velocity ) )
rows.append( ( 'Run file maintenance during normal time: ', self._file_maintenance_during_active ) )
rows.append( ( 'Normal throttle: ', self._file_maintenance_active_throttle_velocity ) )
gridbox = ClientGUICommon.WrapInGrid( self._file_maintenance_panel, rows )
self._file_maintenance_panel.Add( gridbox, CC.FLAGS_EXPAND_SIZER_PERPENDICULAR )
#
vbox = QP.VBoxLayout()
QP.AddToLayout( vbox, self._jobs_panel, CC.FLAGS_EXPAND_PERPENDICULAR )
QP.AddToLayout( vbox, self._file_maintenance_panel, CC.FLAGS_EXPAND_PERPENDICULAR )
vbox.addStretch( 1 )
self.setLayout( vbox )
self._EnableDisableIdleNormal()
self._EnableDisableIdleShutdown()
self._system_busy_cpu_count.valueChanged.connect( self._EnableDisableCPUPercent )
def _EnableDisableCPUPercent( self ):
enabled = self._system_busy_cpu_count.isEnabled() and self._system_busy_cpu_count.GetValue() is not None
self._system_busy_cpu_percent.setEnabled( enabled )
def _EnableDisableIdleNormal( self ):
enabled = self._idle_normal.isChecked()
self._idle_period.setEnabled( enabled )
self._idle_mouse_period.setEnabled( enabled )
self._idle_mode_client_api_timeout.setEnabled( enabled )
self._system_busy_cpu_count.setEnabled( enabled )
self._EnableDisableCPUPercent()
def _EnableDisableIdleShutdown( self ):
enabled = self._idle_shutdown.GetValue() != CC.IDLE_NOT_ON_SHUTDOWN
self._shutdown_work_period.setEnabled( enabled )
self._idle_shutdown_max_minutes.setEnabled( enabled )
def UpdateOptions( self ):
HC.options[ 'idle_normal' ] = self._idle_normal.isChecked()
HC.options[ 'idle_period' ] = self._idle_period.GetValue()
HC.options[ 'idle_mouse_period' ] = self._idle_mouse_period.GetValue()
self._new_options.SetNoneableInteger( 'idle_mode_client_api_timeout', self._idle_mode_client_api_timeout.GetValue() )
self._new_options.SetInteger( 'system_busy_cpu_percent', self._system_busy_cpu_percent.value() )
self._new_options.SetNoneableInteger( 'system_busy_cpu_count', self._system_busy_cpu_count.GetValue() )
HC.options[ 'idle_shutdown' ] = self._idle_shutdown.GetValue()
HC.options[ 'idle_shutdown_max_minutes' ] = self._idle_shutdown_max_minutes.value()
self._new_options.SetInteger( 'shutdown_work_period', self._shutdown_work_period.GetValue() )
self._new_options.SetBoolean( 'file_maintenance_during_idle', self._file_maintenance_during_idle.isChecked() )
file_maintenance_idle_throttle_velocity = self._file_maintenance_idle_throttle_velocity.GetValue()
( file_maintenance_idle_throttle_files, file_maintenance_idle_throttle_time_delta ) = file_maintenance_idle_throttle_velocity
self._new_options.SetInteger( 'file_maintenance_idle_throttle_files', file_maintenance_idle_throttle_files )
self._new_options.SetInteger( 'file_maintenance_idle_throttle_time_delta', file_maintenance_idle_throttle_time_delta )
self._new_options.SetBoolean( 'file_maintenance_during_active', self._file_maintenance_during_active.isChecked() )
file_maintenance_active_throttle_velocity = self._file_maintenance_active_throttle_velocity.GetValue()
( file_maintenance_active_throttle_files, file_maintenance_active_throttle_time_delta ) = file_maintenance_active_throttle_velocity
self._new_options.SetInteger( 'file_maintenance_active_throttle_files', file_maintenance_active_throttle_files )
self._new_options.SetInteger( 'file_maintenance_active_throttle_time_delta', file_maintenance_active_throttle_time_delta )
class _MediaPanel( QW.QWidget ):
def __init__( self, parent ):
QW.QWidget.__init__( self, parent )
self._new_options = HG.client_controller.new_options
self._animation_start_position = QP.MakeQSpinBox( self, min=0, max=100 )
self._disable_cv_for_gifs = QW.QCheckBox( self )
self._disable_cv_for_gifs.setToolTip( 'OpenCV is good at rendering gifs, but if you have problems with it and your graphics card, check this and the less reliable and slower PIL will be used instead. EDIT: OpenCV is much better these days--this is mostly not needed.' )
self._load_images_with_pil = QW.QCheckBox( self )
self._load_images_with_pil.setToolTip( 'OpenCV is much faster than PIL, but it is sometimes less reliable. Switch this on if you experience crashes or other unusual problems while importing or viewing certain images. EDIT: OpenCV is much better these days--this is mostly not needed.' )
self._use_system_ffmpeg = QW.QCheckBox( self )
self._use_system_ffmpeg.setToolTip( 'Check this to always default to the system ffmpeg in your path, rather than using the static ffmpeg in hydrus\'s bin directory. (requires restart)' )
self._always_loop_gifs = QW.QCheckBox( self )
self._always_loop_gifs.setToolTip( 'Some GIFS have metadata specifying how many times they should be played, usually 1. Uncheck this to obey that number.' )
self._media_viewer_cursor_autohide_time_ms = ClientGUICommon.NoneableSpinCtrl( self, none_phrase = 'do not autohide', min = 100, max = 100000, unit = 'ms' )
self._anchor_and_hide_canvas_drags = QW.QCheckBox( self )
self._touchscreen_canvas_drags_unanchor = QW.QCheckBox( self )
from hydrus.client.gui.canvas import ClientGUICanvas
self._media_viewer_zoom_center = ClientGUICommon.BetterChoice()
for zoom_centerpoint_type in ClientGUICanvas.ZOOM_CENTERPOINT_TYPES:
self._media_viewer_zoom_center.addItem( ClientGUICanvas.zoom_centerpoints_str_lookup[ zoom_centerpoint_type ], zoom_centerpoint_type )
tt = 'When you zoom in or out, there is a centerpoint about which the image zooms. This point \'stays still\' while the image expands or shrinks around it. Different centerpoints give different feels, especially if you drag images around a bit.'
self._media_viewer_zoom_center.setToolTip( tt )
self._media_zooms = QW.QLineEdit( self )
self._media_zooms.textChanged.connect( self.EventZoomsChanged )
self._mpv_conf_path = QP.FilePickerCtrl( self, starting_directory = os.path.join( HC.STATIC_DIR, 'mpv-conf' ) )
self._animated_scanbar_height = QP.MakeQSpinBox( self, min=1, max=255 )
self._animated_scanbar_nub_width = QP.MakeQSpinBox( self, min=1, max=63 )
self._media_viewer_panel = ClientGUICommon.StaticBox( self, 'media viewer mime handling' )
media_viewer_list_panel = ClientGUIListCtrl.BetterListCtrlPanel( self._media_viewer_panel )
self._media_viewer_options = ClientGUIListCtrl.BetterListCtrl( media_viewer_list_panel, CGLC.COLUMN_LIST_MEDIA_VIEWER_OPTIONS.ID, 20, data_to_tuples_func = self._GetListCtrlData, activation_callback = self.EditMediaViewerOptions, use_simple_delete = True )
media_viewer_list_panel.SetListCtrl( self._media_viewer_options )
media_viewer_list_panel.AddButton( 'add', self.AddMediaViewerOptions, enabled_check_func = self._CanAddMediaViewOption )
media_viewer_list_panel.AddButton( 'edit', self.EditMediaViewerOptions, enabled_only_on_selection = True )
media_viewer_list_panel.AddDeleteButton( enabled_check_func = self._CanDeleteMediaViewOptions )
#
self._animation_start_position.setValue( int( HC.options['animation_start_position'] * 100.0 ) )
self._disable_cv_for_gifs.setChecked( self._new_options.GetBoolean( 'disable_cv_for_gifs' ) )
self._load_images_with_pil.setChecked( self._new_options.GetBoolean( 'load_images_with_pil' ) )
self._use_system_ffmpeg.setChecked( self._new_options.GetBoolean( 'use_system_ffmpeg' ) )
self._always_loop_gifs.setChecked( self._new_options.GetBoolean( 'always_loop_gifs' ) )
self._media_viewer_cursor_autohide_time_ms.SetValue( self._new_options.GetNoneableInteger( 'media_viewer_cursor_autohide_time_ms' ) )
self._anchor_and_hide_canvas_drags.setChecked( self._new_options.GetBoolean( 'anchor_and_hide_canvas_drags' ) )
self._touchscreen_canvas_drags_unanchor.setChecked( self._new_options.GetBoolean( 'touchscreen_canvas_drags_unanchor' ) )
self._animated_scanbar_height.setValue( self._new_options.GetInteger( 'animated_scanbar_height' ) )
self._animated_scanbar_nub_width.setValue( self._new_options.GetInteger( 'animated_scanbar_nub_width' ) )
self._media_viewer_zoom_center.SetValue( self._new_options.GetInteger( 'media_viewer_zoom_center' ) )
media_zooms = self._new_options.GetMediaZooms()
self._media_zooms.setText( ','.join( ( str( media_zoom ) for media_zoom in media_zooms ) ) )
all_media_view_options = self._new_options.GetMediaViewOptions()
for ( mime, view_options ) in all_media_view_options.items():
data = QP.ListsToTuples( [ mime ] + list( view_options ) )
self._media_viewer_options.AddDatas( ( data, ) )
self._media_viewer_options.Sort()
#
vbox = QP.VBoxLayout()
text = 'Please be warned that hydrus does not currently zoom in very efficiently at high zooms!'
text += os.linesep
text += 'Just be careful at >400%, particularly for already large files--it can lag out and eat a chunk of memory.'
st = ClientGUICommon.BetterStaticText( self, text )
st.setObjectName( 'HydrusWarning' )
QP.AddToLayout( vbox, st )
rows = []
rows.append( ( 'Start animations this % in:', self._animation_start_position ) )
rows.append( ( 'Prefer system FFMPEG:', self._use_system_ffmpeg ) )
rows.append( ( 'Always Loop GIFs:', self._always_loop_gifs ) )
rows.append( ( 'Centerpoint for media zooming:', self._media_viewer_zoom_center ) )
rows.append( ( 'Media zooms:', self._media_zooms ) )
rows.append( ( 'Set a new mpv.conf on dialog ok?:', self._mpv_conf_path ) )
rows.append( ( 'Animation scanbar height:', self._animated_scanbar_height ) )
rows.append( ( 'Animation scanbar nub width:', self._animated_scanbar_nub_width ) )
rows.append( ( 'Time until mouse cursor autohides on media viewer:', self._media_viewer_cursor_autohide_time_ms ) )
rows.append( ( 'RECOMMEND WINDOWS ONLY: Hide and anchor mouse cursor on media viewer drags:', self._anchor_and_hide_canvas_drags ) )
rows.append( ( 'RECOMMEND WINDOWS ONLY: If set to hide and anchor, undo on apparent touchscreen drag:', self._touchscreen_canvas_drags_unanchor ) )
rows.append( ( 'BUGFIX: Load images with PIL (slower):', self._load_images_with_pil ) )
rows.append( ( 'BUGFIX: Load gifs with PIL instead of OpenCV (slower, bad transparency):', self._disable_cv_for_gifs ) )
gridbox = ClientGUICommon.WrapInGrid( self, rows )
QP.AddToLayout( vbox, gridbox, CC.FLAGS_EXPAND_SIZER_PERPENDICULAR )
self._media_viewer_panel.Add( media_viewer_list_panel, CC.FLAGS_EXPAND_BOTH_WAYS )
QP.AddToLayout( vbox, self._media_viewer_panel, CC.FLAGS_EXPAND_BOTH_WAYS )
self.setLayout( vbox )
def _CanAddMediaViewOption( self ):
return len( self._GetUnsetMediaViewFiletypes() ) > 0
def _CanDeleteMediaViewOptions( self ):
deletable_mimes = set( HC.SEARCHABLE_MIMES )
selected_mimes = set()
for ( mime, media_show_action, media_start_paused, media_start_with_embed, preview_show_action, preview_start_paused, preview_start_with_embed, zoom_info ) in self._media_viewer_options.GetData( only_selected = True ):
selected_mimes.add( mime )
if len( selected_mimes ) == 0:
return False
all_selected_are_deletable = selected_mimes.issubset( deletable_mimes )
return all_selected_are_deletable
def _GetCopyOfGeneralMediaViewOptions( self, desired_mime ):
general_mime_type = HC.mimes_to_general_mimetypes[ desired_mime ]
for ( mime, media_show_action, media_start_paused, media_start_with_embed, preview_show_action, preview_start_paused, preview_start_with_embed, zoom_info ) in self._media_viewer_options.GetData():
if mime == general_mime_type:
view_options = ( desired_mime, media_show_action, media_start_paused, media_start_with_embed, preview_show_action, preview_start_paused, preview_start_with_embed, zoom_info )
return view_options
def _GetUnsetMediaViewFiletypes( self ):
editable_mimes = set( HC.SEARCHABLE_MIMES )
set_mimes = set()
for ( mime, media_show_action, media_start_paused, media_start_with_embed, preview_show_action, preview_start_paused, preview_start_with_embed, zoom_info ) in self._media_viewer_options.GetData():
set_mimes.add( mime )
unset_mimes = editable_mimes.difference( set_mimes )
return unset_mimes
def _GetListCtrlData( self, data ):
( mime, media_show_action, media_start_paused, media_start_with_embed, preview_show_action, preview_start_paused, preview_start_with_embed, zoom_info ) = data
pretty_mime = self._GetPrettyMime( mime )
pretty_media_show_action = CC.media_viewer_action_string_lookup[ media_show_action ]
if media_start_paused:
pretty_media_show_action += ', start paused'
if media_start_with_embed:
pretty_media_show_action += ', start with embed button'
pretty_preview_show_action = CC.media_viewer_action_string_lookup[ preview_show_action ]
if preview_start_paused:
pretty_preview_show_action += ', start paused'
if preview_start_with_embed:
pretty_preview_show_action += ', start with embed button'
no_show = { media_show_action, preview_show_action }.isdisjoint( { CC.MEDIA_VIEWER_ACTION_SHOW_WITH_NATIVE, CC.MEDIA_VIEWER_ACTION_SHOW_WITH_MPV } )
if no_show:
pretty_zoom_info = ''
else:
pretty_zoom_info = str( zoom_info )
display_tuple = ( pretty_mime, pretty_media_show_action, pretty_preview_show_action, pretty_zoom_info )
sort_tuple = ( pretty_mime, pretty_media_show_action, pretty_preview_show_action, pretty_zoom_info )
return ( display_tuple, sort_tuple )
def _GetPrettyMime( self, mime ):
pretty_mime = HC.mime_string_lookup[ mime ]
if mime not in HC.GENERAL_FILETYPES:
pretty_mime = '{}: {}'.format( HC.mime_string_lookup[ HC.mimes_to_general_mimetypes[ mime ] ], pretty_mime )
return pretty_mime
def AddMediaViewerOptions( self ):
unset_filetypes = self._GetUnsetMediaViewFiletypes()
if len( unset_filetypes ) == 0:
QW.QMessageBox.warning( self, 'Warning', 'You cannot add any more specific filetype options!' )
return
choice_tuples = [ ( self._GetPrettyMime( mime ), mime ) for mime in unset_filetypes ]
try:
mime = ClientGUIDialogsQuick.SelectFromList( self, 'select the filetype to add', choice_tuples, sort_tuples = True )
except HydrusExceptions.CancelledException:
return
data = self._GetCopyOfGeneralMediaViewOptions( mime )
title = 'add media view options information'
with ClientGUITopLevelWindowsPanels.DialogEdit( self, title ) as dlg:
panel = ClientGUIScrolledPanelsEdit.EditMediaViewOptionsPanel( dlg, data )
dlg.SetPanel( panel )
if dlg.exec() == QW.QDialog.Accepted:
new_data = panel.GetValue()
self._media_viewer_options.AddDatas( ( new_data, ) )
def | ( self ):
for data in self._media_viewer_options.GetData( only_selected = True ):
title = 'edit media view options information'
with ClientGUITopLevelWindowsPanels.DialogEdit( self, title ) as dlg:
panel = ClientGUIScrolledPanelsEdit.EditMediaViewOptionsPanel( dlg, data )
dlg.SetPanel( panel )
if dlg.exec() == QW.QDialog.Accepted:
new_data = panel.GetValue()
self._media_viewer_options.ReplaceData( data, new_data )
def EventZoomsChanged( self, text ):
try:
media_zooms = [ float( media_zoom ) for media_zoom in self._media_zooms.text().split( ',' ) ]
self._media_zooms.setObjectName( '' )
except ValueError:
self._media_zooms.setObjectName( 'HydrusInvalid' )
self._media_zooms.style().polish( self._media_zooms )
self._media_zooms.update()
def UpdateOptions( self ):
HC.options[ 'animation_start_position' ] = self._animation_start_position.value() / 100.0
self._new_options.SetBoolean( 'disable_cv_for_gifs', self._disable_cv_for_gifs.isChecked() )
self._new_options.SetBoolean( 'load_images_with_pil', self._load_images_with_pil.isChecked() )
self._new_options.SetBoolean( 'use_system_ffmpeg', self._use_system_ffmpeg.isChecked() )
self._new_options.SetBoolean( 'always_loop_gifs', self._always_loop_gifs.isChecked() )
self._new_options.SetBoolean( 'anchor_and_hide_canvas_drags', self._anchor_and_hide_canvas_drags.isChecked() )
self._new_options.SetBoolean( 'touchscreen_canvas_drags_unanchor', self._touchscreen_canvas_drags_unanchor.isChecked() )
self._new_options.SetNoneableInteger( 'media_viewer_cursor_autohide_time_ms', self._media_viewer_cursor_autohide_time_ms.GetValue() )
mpv_conf_path = self._mpv_conf_path.GetPath()
if mpv_conf_path is not None and mpv_conf_path != '' and os.path.exists( mpv_conf_path ) and os.path.isfile( mpv_conf_path ):
dest_mpv_conf_path = HG.client_controller.GetMPVConfPath()
try:
HydrusPaths.MirrorFile( mpv_conf_path, dest_mpv_conf_path )
except Exception as e:
HydrusData.ShowText( 'Could not set the mpv conf path "{}" to "{}"! Error follows!'.format( mpv_conf_path, dest_mpv_conf_path ) )
HydrusData.ShowException( e )
self._new_options.SetInteger( 'animated_scanbar_height', self._animated_scanbar_height.value() )
self._new_options.SetInteger( 'animated_scanbar_nub_width', self._animated_scanbar_nub_width.value() )
self._new_options.SetInteger( 'media_viewer_zoom_center', self._media_viewer_zoom_center.GetValue() )
try:
media_zooms = [ float( media_zoom ) for media_zoom in self._media_zooms.text().split( ',' ) ]
media_zooms = [ media_zoom for media_zoom in media_zooms if media_zoom > 0.0 ]
if len( media_zooms ) > 0:
self._new_options.SetMediaZooms( media_zooms )
except ValueError:
HydrusData.ShowText( 'Could not parse those zooms, so they were not saved!' )
mimes_to_media_view_options = {}
for data in self._media_viewer_options.GetData():
data = list( data )
mime = data[0]
value = data[1:]
mimes_to_media_view_options[ mime ] = value
self._new_options.SetMediaViewOptions( mimes_to_media_view_options )
class _PopupPanel( QW.QWidget ):
def __init__( self, parent, new_options ):
QW.QWidget.__init__( self, parent )
self._new_options = new_options
#
self._popup_panel = ClientGUICommon.StaticBox( self, 'popup window toaster' )
self._popup_message_character_width = QP.MakeQSpinBox( self._popup_panel, min = 16, max = 256 )
self._popup_message_force_min_width = QW.QCheckBox( self._popup_panel )
self._freeze_message_manager_when_mouse_on_other_monitor = QW.QCheckBox( self._popup_panel )
self._freeze_message_manager_when_mouse_on_other_monitor.setToolTip( 'This is useful if you have a virtual desktop and find the popup manager restores strangely when you hop back to the hydrus display.' )
self._freeze_message_manager_when_main_gui_minimised = QW.QCheckBox( self._popup_panel )
self._freeze_message_manager_when_main_gui_minimised.setToolTip( 'This is useful if the popup toaster restores strangely after minimised changes.' )
self._hide_message_manager_on_gui_iconise = QW.QCheckBox( self._popup_panel )
self._hide_message_manager_on_gui_iconise.setToolTip( 'If your message manager does not automatically minimise with your main gui, try this. It can lead to unusual show and positioning behaviour on window managers that do not support it, however.' )
self._hide_message_manager_on_gui_deactive = QW.QCheckBox( self._popup_panel )
self._hide_message_manager_on_gui_deactive.setToolTip( 'If your message manager stays up after you minimise the program to the system tray using a custom window manager, try this out! It hides the popup messages as soon as the main gui loses focus.' )
self._notify_client_api_cookies = QW.QCheckBox( self._popup_panel )
self._notify_client_api_cookies.setToolTip( 'This will make a short-lived popup message every time you get new cookie information over the Client API.' )
#
self._popup_message_character_width.setValue( self._new_options.GetInteger( 'popup_message_character_width' ) )
self._popup_message_force_min_width.setChecked( self._new_options.GetBoolean( 'popup_message_force_min_width' ) )
self._freeze_message_manager_when_mouse_on_other_monitor.setChecked( self._new_options.GetBoolean( 'freeze_message_manager_when_mouse_on_other_monitor' ) )
self._freeze_message_manager_when_main_gui_minimised.setChecked( self._new_options.GetBoolean( 'freeze_message_manager_when_main_gui_minimised' ) )
self._hide_message_manager_on_gui_iconise.setChecked( self._new_options.GetBoolean( 'hide_message_manager_on_gui_iconise' ) )
self._hide_message_manager_on_gui_deactive.setChecked( self._new_options.GetBoolean( 'hide_message_manager_on_gui_deactive' ) )
self._notify_client_api_cookies.setChecked( self._new_options.GetBoolean( 'notify_client_api_cookies' ) )
#
rows = []
rows.append( ( 'Approximate max width of popup messages (in characters): ', self._popup_message_character_width ) )
rows.append( ( 'BUGFIX: Force this width as the minimum width for all popup messages: ', self._popup_message_force_min_width ) )
rows.append( ( 'Freeze the popup toaster when mouse is on another display: ', self._freeze_message_manager_when_mouse_on_other_monitor ) )
rows.append( ( 'Freeze the popup toaster when the main gui is minimised: ', self._freeze_message_manager_when_main_gui_minimised ) )
rows.append( ( 'BUGFIX: Hide the popup toaster when the main gui is minimised: ', self._hide_message_manager_on_gui_iconise ) )
rows.append( ( 'BUGFIX: Hide the popup toaster when the main gui loses focus: ', self._hide_message_manager_on_gui_deactive ) )
rows.append( ( 'Make a short-lived popup on cookie updates through the Client API: ', self._notify_client_api_cookies ) )
gridbox = ClientGUICommon.WrapInGrid( self._popup_panel, rows )
self._popup_panel.Add( gridbox, CC.FLAGS_EXPAND_SIZER_PERPENDICULAR )
vbox = QP.VBoxLayout()
QP.AddToLayout( vbox, self._popup_panel, CC.FLAGS_EXPAND_SIZER_PERPENDICULAR )
vbox.addStretch( 1 )
self.setLayout( vbox )
def UpdateOptions( self ):
self._new_options.SetInteger( 'popup_message_character_width', self._popup_message_character_width.value() )
self._new_options.SetBoolean( 'popup_message_force_min_width', self._popup_message_force_min_width.isChecked() )
self._new_options.SetBoolean( 'freeze_message_manager_when_mouse_on_other_monitor', self._freeze_message_manager_when_mouse_on_other_monitor.isChecked() )
self._new_options.SetBoolean( 'freeze_message_manager_when_main_gui_minimised', self._freeze_message_manager_when_main_gui_minimised.isChecked() )
self._new_options.SetBoolean( 'hide_message_manager_on_gui_iconise', self._hide_message_manager_on_gui_iconise.isChecked() )
self._new_options.SetBoolean( 'hide_message_manager_on_gui_deactive', self._hide_message_manager_on_gui_deactive.isChecked() )
self._new_options.SetBoolean( 'notify_client_api_cookies', self._notify_client_api_cookies.isChecked() )
class _RegexPanel( QW.QWidget ):
def __init__( self, parent ):
QW.QWidget.__init__( self, parent )
regex_favourites = HC.options[ 'regex_favourites' ]
self._regex_panel = ClientGUIScrolledPanelsEdit.EditRegexFavourites( self, regex_favourites )
vbox = QP.VBoxLayout()
QP.AddToLayout( vbox, self._regex_panel, CC.FLAGS_EXPAND_BOTH_WAYS )
self.setLayout( vbox )
def UpdateOptions( self ):
regex_favourites = self._regex_panel.GetValue()
HC.options[ 'regex_favourites' ] = regex_favourites
class _SearchPanel( QW.QWidget ):
def __init__( self, parent, new_options ):
QW.QWidget.__init__( self, parent )
self._new_options = new_options
#
self._autocomplete_panel = ClientGUICommon.StaticBox( self, 'autocomplete' )
self._autocomplete_float_main_gui = QW.QCheckBox( self._autocomplete_panel )
tt = 'The autocomplete dropdown can either \'float\' on top of the main window, or if that does not work well for you, it can embed into the parent panel.'
self._autocomplete_float_main_gui.setToolTip( tt )
self._autocomplete_float_frames = QW.QCheckBox( self._autocomplete_panel )
tt = 'The autocomplete dropdown can either \'float\' on top of dialogs like _manage tags_, or if that does not work well for you (it can sometimes annoyingly overlap the ok/cancel buttons), it can embed into the parent dialog panel.'
self._autocomplete_float_frames.setToolTip( tt )
self._ac_read_list_height_num_chars = QP.MakeQSpinBox( self._autocomplete_panel, min = 1, max = 128 )
tt = 'Read autocompletes are those in search pages, where you are looking through existing tags to find your files.'
self._ac_read_list_height_num_chars.setToolTip( tt )
self._ac_write_list_height_num_chars = QP.MakeQSpinBox( self._autocomplete_panel, min = 1, max = 128 )
tt = 'Write autocompletes are those in most dialogs, where you are adding new tags to files.'
self._ac_write_list_height_num_chars.setToolTip( tt )
self._always_show_system_everything = QW.QCheckBox( self._autocomplete_panel )
tt = 'After users get some experience with the program and a larger collection, they tend to have less use for system:everything.'
self._always_show_system_everything.setToolTip( tt )
self._filter_inbox_and_archive_predicates = QW.QCheckBox( self._autocomplete_panel )
tt = 'If everything is current in the inbox (or archive), then there is no use listing it or its opposite--it either does not change the search or it produces nothing. If you find it jarring though, turn it off here!'
self._filter_inbox_and_archive_predicates.setToolTip( tt )
#
self._autocomplete_float_main_gui.setChecked( self._new_options.GetBoolean( 'autocomplete_float_main_gui' ) )
self._autocomplete_float_frames.setChecked( self._new_options.GetBoolean( 'autocomplete_float_frames' ) )
self._ac_read_list_height_num_chars.setValue( self._new_options.GetInteger( 'ac_read_list_height_num_chars' ) )
self._ac_write_list_height_num_chars.setValue( self._new_options.GetInteger( 'ac_write_list_height_num_chars' ) )
self._always_show_system_everything.setChecked( self._new_options.GetBoolean( 'always_show_system_everything' ) )
self._filter_inbox_and_archive_predicates.setChecked( self._new_options.GetBoolean( 'filter_inbox_and_archive_predicates' ) )
#
vbox = QP.VBoxLayout()
message = 'The autocomplete dropdown list is the panel that hangs below the tag input text box on search pages.'
st = ClientGUICommon.BetterStaticText( self._autocomplete_panel, label = message )
self._autocomplete_panel.Add( st, CC.FLAGS_CENTER )
rows = []
#
rows.append( ( 'Autocomplete results float in main gui: ', self._autocomplete_float_main_gui ) )
rows.append( ( 'Autocomplete results float in other windows: ', self._autocomplete_float_frames ) )
rows.append( ( '\'Read\' autocomplete list height: ', self._ac_read_list_height_num_chars ) )
rows.append( ( '\'Write\' autocomplete list height: ', self._ac_write_list_height_num_chars ) )
rows.append( ( 'show system:everything even if total files is over 10,000: ', self._always_show_system_everything ) )
rows.append( ( 'hide inbox and archive system predicates if either has no files: ', self._filter_inbox_and_archive_predicates ) )
gridbox = ClientGUICommon.WrapInGrid( self._autocomplete_panel, rows )
self._autocomplete_panel.Add( gridbox, CC.FLAGS_EXPAND_SIZER_BOTH_WAYS )
#
QP.AddToLayout( vbox, self._autocomplete_panel, CC.FLAGS_EXPAND_PERPENDICULAR )
vbox.addStretch( 1 )
self.setLayout( vbox )
def UpdateOptions( self ):
self._new_options.SetBoolean( 'autocomplete_float_main_gui', self._autocomplete_float_main_gui.isChecked() )
self._new_options.SetBoolean( 'autocomplete_float_frames', self._autocomplete_float_frames.isChecked() )
self._new_options.SetInteger( 'ac_read_list_height_num_chars', self._ac_read_list_height_num_chars.value() )
self._new_options.SetInteger( 'ac_write_list_height_num_chars', self._ac_write_list_height_num_chars.value() )
self._new_options.SetBoolean( 'always_show_system_everything', self._always_show_system_everything.isChecked() )
self._new_options.SetBoolean( 'filter_inbox_and_archive_predicates', self._filter_inbox_and_archive_predicates.isChecked() )
class _SortCollectPanel( QW.QWidget ):
def __init__( self, parent ):
QW.QWidget.__init__( self, parent )
self._default_media_sort = ClientGUIResultsSortCollect.MediaSortControl( self )
self._fallback_media_sort = ClientGUIResultsSortCollect.MediaSortControl( self )
self._save_page_sort_on_change = QW.QCheckBox( self )
self._default_media_collect = ClientGUIResultsSortCollect.MediaCollectControl( self, silent = True )
namespace_sorting_box = ClientGUICommon.StaticBox( self, 'namespace sorting' )
self._namespace_sort_by = ClientGUIListBoxes.QueueListBox( namespace_sorting_box, 8, self._ConvertNamespaceTupleToSortString, self._AddNamespaceSort, self._EditNamespaceSort )
#
self._new_options = HG.client_controller.new_options
try:
self._default_media_sort.SetSort( self._new_options.GetDefaultSort() )
except:
media_sort = ClientMedia.MediaSort( ( 'system', CC.SORT_FILES_BY_FILESIZE ), CC.SORT_ASC )
self._default_media_sort.SetSort( media_sort )
try:
self._fallback_media_sort.SetSort( self._new_options.GetFallbackSort() )
except:
media_sort = ClientMedia.MediaSort( ( 'system', CC.SORT_FILES_BY_IMPORT_TIME ), CC.SORT_ASC )
self._fallback_media_sort.SetSort( media_sort )
self._namespace_sort_by.AddDatas( [ media_sort.sort_type[1] for media_sort in HG.client_controller.new_options.GetDefaultNamespaceSorts() ] )
self._save_page_sort_on_change.setChecked( self._new_options.GetBoolean( 'save_page_sort_on_change' ) )
#
sort_by_text = 'You can manage your namespace sorting schemes here.'
sort_by_text += os.linesep
sort_by_text += 'The client will sort media by comparing their namespaces, moving from left to right until an inequality is found.'
sort_by_text += os.linesep
sort_by_text += 'Any namespaces here will also appear in your collect-by dropdowns.'
namespace_sorting_box.Add( ClientGUICommon.BetterStaticText( namespace_sorting_box, sort_by_text ), CC.FLAGS_EXPAND_PERPENDICULAR )
namespace_sorting_box.Add( self._namespace_sort_by, CC.FLAGS_EXPAND_BOTH_WAYS )
rows = []
rows.append( ( 'Default sort: ', self._default_media_sort ) )
rows.append( ( 'Secondary sort (when primary gives two equal values): ', self._fallback_media_sort ) )
rows.append( ( 'Update default sort every time a new sort is manually chosen: ', self._save_page_sort_on_change ) )
rows.append( ( 'Default collect: ', self._default_media_collect ) )
gridbox = ClientGUICommon.WrapInGrid( self, rows )
vbox = QP.VBoxLayout()
QP.AddToLayout( vbox, gridbox, CC.FLAGS_EXPAND_SIZER_PERPENDICULAR )
QP.AddToLayout( vbox, namespace_sorting_box, CC.FLAGS_EXPAND_BOTH_WAYS )
self.setLayout( vbox )
def _AddNamespaceSort( self ):
default = ( ( 'creator', 'series', 'page' ), ClientTags.TAG_DISPLAY_ACTUAL )
return self._EditNamespaceSort( default )
def _ConvertNamespaceTupleToSortString( self, sort_data ):
( namespaces, tag_display_type ) = sort_data
return '-'.join( namespaces )
def _EditNamespaceSort( self, sort_data ):
return ClientGUITags.EditNamespaceSort( self, sort_data )
def UpdateOptions( self ):
self._new_options.SetDefaultSort( self._default_media_sort.GetSort() )
self._new_options.SetFallbackSort( self._fallback_media_sort.GetSort() )
self._new_options.SetBoolean( 'save_page_sort_on_change', self._save_page_sort_on_change.isChecked() )
self._new_options.SetDefaultCollect( self._default_media_collect.GetValue() )
namespace_sorts = [ ClientMedia.MediaSort( sort_type = ( 'namespaces', sort_data ) ) for sort_data in self._namespace_sort_by.GetData() ]
self._new_options.SetDefaultNamespaceSorts( namespace_sorts )
class _SpeedAndMemoryPanel( QW.QWidget ):
def __init__( self, parent, new_options ):
QW.QWidget.__init__( self, parent )
self._new_options = new_options
#
thumbnail_cache_panel = ClientGUICommon.StaticBox( self, 'thumbnail cache' )
self._thumbnail_cache_size = QP.MakeQSpinBox( thumbnail_cache_panel, min=5, max=3000 )
self._thumbnail_cache_size.valueChanged.connect( self.EventThumbnailsUpdate )
self._estimated_number_thumbnails = QW.QLabel( '', thumbnail_cache_panel )
self._thumbnail_cache_timeout = ClientGUITime.TimeDeltaButton( thumbnail_cache_panel, min = 300, days = True, hours = True, minutes = True )
self._thumbnail_cache_timeout.setToolTip( 'The amount of time after which a thumbnail in the cache will naturally be removed, if it is not shunted out due to a new member exceeding the size limit.' )
image_cache_panel = ClientGUICommon.StaticBox( self, 'image cache' )
self._fullscreen_cache_size = QP.MakeQSpinBox( image_cache_panel, min=25, max=8192 )
self._fullscreen_cache_size.valueChanged.connect( self.EventImageCacheUpdate )
self._estimated_number_fullscreens = QW.QLabel( '', image_cache_panel )
self._image_cache_timeout = ClientGUITime.TimeDeltaButton( image_cache_panel, min = 300, days = True, hours = True, minutes = True )
self._image_cache_timeout.setToolTip( 'The amount of time after which a rendered image in the cache will naturally be removed, if it is not shunted out due to a new member exceeding the size limit.' )
self._media_viewer_prefetch_delay_base_ms = QP.MakeQSpinBox( image_cache_panel, min = 0, max = 2000 )
tt = 'How long to wait, after the current image is rendered, to start rendering neighbours. Does not matter so much any more, but if you have CPU lag, you can try boosting it a bit.'
self._media_viewer_prefetch_delay_base_ms.setToolTip( tt )
self._media_viewer_prefetch_num_previous = QP.MakeQSpinBox( image_cache_panel, min = 0, max = 5 )
self._media_viewer_prefetch_num_next = QP.MakeQSpinBox( image_cache_panel, min = 0, max = 5 )
self._image_cache_storage_limit_percentage = QP.MakeQSpinBox( image_cache_panel, min = 20, max = 50 )
self._image_cache_storage_limit_percentage_st = ClientGUICommon.BetterStaticText( image_cache_panel, label = '' )
self._image_cache_prefetch_limit_percentage = QP.MakeQSpinBox( image_cache_panel, min = 5, max = 20 )
self._image_cache_prefetch_limit_percentage_st = ClientGUICommon.BetterStaticText( image_cache_panel, label = '' )
image_tile_cache_panel = ClientGUICommon.StaticBox( self, 'image tile cache' )
self._image_tile_cache_size = ClientGUIControls.BytesControl( image_tile_cache_panel )
self._image_tile_cache_size.valueChanged.connect( self.EventImageTilesUpdate )
self._estimated_number_image_tiles = QW.QLabel( '', image_tile_cache_panel )
self._image_tile_cache_timeout = ClientGUITime.TimeDeltaButton( image_tile_cache_panel, min = 300, hours = True, minutes = True )
self._image_tile_cache_timeout.setToolTip( 'The amount of time after which a rendered image tile in the cache will naturally be removed, if it is not shunted out due to a new member exceeding the size limit.' )
self._ideal_tile_dimension = QP.MakeQSpinBox( image_tile_cache_panel, min = 256, max = 4096 )
self._ideal_tile_dimension.setToolTip( 'This is the square size the system will aim for. Smaller tiles are more memory efficient but prone to warping and other artifacts. Extreme values may waste CPU.' )
#
buffer_panel = ClientGUICommon.StaticBox( self, 'video buffer' )
self._video_buffer_size_mb = QP.MakeQSpinBox( buffer_panel, min=48, max=16*1024 )
self._video_buffer_size_mb.valueChanged.connect( self.EventVideoBufferUpdate )
self._estimated_number_video_frames = QW.QLabel( '', buffer_panel )
#
misc_panel = ClientGUICommon.StaticBox( self, 'misc' )
self._forced_search_limit = ClientGUICommon.NoneableSpinCtrl( misc_panel, '', min = 1, max = 100000 )
#
self._thumbnail_cache_size.setValue( int( HC.options['thumbnail_cache_size'] // 1048576 ) )
self._fullscreen_cache_size.setValue( int( HC.options['fullscreen_cache_size'] // 1048576 ) )
self._image_tile_cache_size.SetValue( self._new_options.GetInteger( 'image_tile_cache_size' ) )
self._thumbnail_cache_timeout.SetValue( self._new_options.GetInteger( 'thumbnail_cache_timeout' ) )
self._image_cache_timeout.SetValue( self._new_options.GetInteger( 'image_cache_timeout' ) )
self._image_tile_cache_timeout.SetValue( self._new_options.GetInteger( 'image_tile_cache_timeout' ) )
self._ideal_tile_dimension.setValue( self._new_options.GetInteger( 'ideal_tile_dimension' ) )
self._video_buffer_size_mb.setValue( self._new_options.GetInteger( 'video_buffer_size_mb' ) )
self._forced_search_limit.SetValue( self._new_options.GetNoneableInteger( 'forced_search_limit' ) )
self._media_viewer_prefetch_delay_base_ms.setValue( self._new_options.GetInteger( 'media_viewer_prefetch_delay_base_ms' ) )
self._media_viewer_prefetch_num_previous.setValue( self._new_options.GetInteger( 'media_viewer_prefetch_num_previous' ) )
self._media_viewer_prefetch_num_next.setValue( self._new_options.GetInteger( 'media_viewer_prefetch_num_next' ) )
self._image_cache_storage_limit_percentage.setValue( self._new_options.GetInteger( 'image_cache_storage_limit_percentage' ) )
self._image_cache_prefetch_limit_percentage.setValue( self._new_options.GetInteger( 'image_cache_prefetch_limit_percentage' ) )
#
vbox = QP.VBoxLayout()
text = 'These options are advanced! PROTIP: Do not go crazy here.'
st = ClientGUICommon.BetterStaticText( self, text )
QP.AddToLayout( vbox, st, CC.FLAGS_CENTER )
#
thumbnails_sizer = QP.HBoxLayout()
QP.AddToLayout( thumbnails_sizer, self._thumbnail_cache_size, CC.FLAGS_CENTER_PERPENDICULAR )
QP.AddToLayout( thumbnails_sizer, self._estimated_number_thumbnails, CC.FLAGS_CENTER_PERPENDICULAR )
fullscreens_sizer = QP.HBoxLayout()
QP.AddToLayout( fullscreens_sizer, self._fullscreen_cache_size, CC.FLAGS_CENTER_PERPENDICULAR )
QP.AddToLayout( fullscreens_sizer, self._estimated_number_fullscreens, CC.FLAGS_CENTER_PERPENDICULAR )
image_tiles_sizer = QP.HBoxLayout()
QP.AddToLayout( image_tiles_sizer, self._image_tile_cache_size, CC.FLAGS_CENTER_PERPENDICULAR )
QP.AddToLayout( image_tiles_sizer, self._estimated_number_image_tiles, CC.FLAGS_CENTER_PERPENDICULAR )
image_cache_storage_sizer = QP.HBoxLayout()
QP.AddToLayout( image_cache_storage_sizer, self._image_cache_storage_limit_percentage, CC.FLAGS_CENTER_PERPENDICULAR )
QP.AddToLayout( image_cache_storage_sizer, self._image_cache_storage_limit_percentage_st, CC.FLAGS_CENTER_PERPENDICULAR )
image_cache_prefetch_sizer = QP.HBoxLayout()
QP.AddToLayout( image_cache_prefetch_sizer, self._image_cache_prefetch_limit_percentage, CC.FLAGS_CENTER_PERPENDICULAR )
QP.AddToLayout( image_cache_prefetch_sizer, self._image_cache_prefetch_limit_percentage_st, CC.FLAGS_CENTER_PERPENDICULAR )
video_buffer_sizer = QP.HBoxLayout()
QP.AddToLayout( video_buffer_sizer, self._video_buffer_size_mb, CC.FLAGS_CENTER_PERPENDICULAR )
QP.AddToLayout( video_buffer_sizer, self._estimated_number_video_frames, CC.FLAGS_CENTER_PERPENDICULAR )
#
text = 'Does not change much, thumbs are cheap.'
st = ClientGUICommon.BetterStaticText( thumbnail_cache_panel, text )
thumbnail_cache_panel.Add( st, CC.FLAGS_EXPAND_PERPENDICULAR )
rows = []
rows.append( ( 'MB memory reserved for thumbnail cache:', thumbnails_sizer ) )
rows.append( ( 'Thumbnail cache timeout:', self._thumbnail_cache_timeout ) )
gridbox = ClientGUICommon.WrapInGrid( thumbnail_cache_panel, rows )
thumbnail_cache_panel.Add( gridbox, CC.FLAGS_EXPAND_SIZER_PERPENDICULAR )
QP.AddToLayout( vbox, thumbnail_cache_panel, CC.FLAGS_EXPAND_PERPENDICULAR )
#
text = 'Important if you want smooth navigation between different images in the media viewer. If you deal with huge images, bump up cache size and max size that can be cached or prefetched, but be prepared to pay the memory price.'
text += os.linesep * 2
text += 'Allowing more prefetch is great, but it needs CPU.'
st = ClientGUICommon.BetterStaticText( image_cache_panel, text )
st.setWordWrap( True )
image_cache_panel.Add( st, CC.FLAGS_EXPAND_PERPENDICULAR )
rows = []
rows.append( ( 'MB memory reserved for image cache:', fullscreens_sizer ) )
rows.append( ( 'Image cache timeout:', self._image_cache_timeout ) )
rows.append( ( 'Maximum image size (in % of cache) that can be cached:', image_cache_storage_sizer ) )
rows.append( ( 'Maximum image size (in % of cache) that will be prefetched:', image_cache_prefetch_sizer ) )
rows.append( ( 'Base ms delay for media viewer neighbour render prefetch:', self._media_viewer_prefetch_delay_base_ms ) )
rows.append( ( 'Num previous to prefetch:', self._media_viewer_prefetch_num_previous ) )
rows.append( ( 'Num next to prefetch:', self._media_viewer_prefetch_num_next ) )
gridbox = ClientGUICommon.WrapInGrid( image_cache_panel, rows )
image_cache_panel.Add( gridbox, CC.FLAGS_EXPAND_SIZER_PERPENDICULAR )
QP.AddToLayout( vbox, image_cache_panel, CC.FLAGS_EXPAND_PERPENDICULAR )
#
text = 'Important if you do a lot of zooming in and out on the same image or a small number of comparison images.'
st = ClientGUICommon.BetterStaticText( image_tile_cache_panel, text )
image_tile_cache_panel.Add( st, CC.FLAGS_EXPAND_PERPENDICULAR )
rows = []
rows.append( ( 'MB memory reserved for image tile cache:', image_tiles_sizer ) )
rows.append( ( 'Image tile cache timeout:', self._image_tile_cache_timeout ) )
rows.append( ( 'Ideal tile width/height px:', self._ideal_tile_dimension ) )
gridbox = ClientGUICommon.WrapInGrid( image_tile_cache_panel, rows )
image_tile_cache_panel.Add( gridbox, CC.FLAGS_EXPAND_SIZER_PERPENDICULAR )
QP.AddToLayout( vbox, image_tile_cache_panel, CC.FLAGS_EXPAND_PERPENDICULAR )
#
text = 'This old option does not apply to mpv! It only applies to the native hydrus animation renderer!'
text += os.linesep
text += 'Hydrus video rendering is CPU intensive.'
text += os.linesep
text += 'If you have a lot of memory, you can set a generous potential video buffer to compensate.'
text += os.linesep
text += 'If the video buffer can hold an entire video, it only needs to be rendered once and will play and loop very smoothly.'
text += os.linesep
text += 'PROTIP: Do not go crazy here.'
st = ClientGUICommon.BetterStaticText( buffer_panel, text )
st.setWordWrap( True )
buffer_panel.Add( st, CC.FLAGS_EXPAND_PERPENDICULAR )
rows = []
rows.append( ( 'MB memory for video buffer: ', video_buffer_sizer ) )
gridbox = ClientGUICommon.WrapInGrid( buffer_panel, rows )
buffer_panel.Add( gridbox, CC.FLAGS_EXPAND_SIZER_PERPENDICULAR )
QP.AddToLayout( vbox, buffer_panel, CC.FLAGS_EXPAND_PERPENDICULAR )
#
rows = []
rows.append( ( 'Forced system:limit for all searches: ', self._forced_search_limit ) )
gridbox = ClientGUICommon.WrapInGrid( misc_panel, rows )
misc_panel.Add( gridbox, CC.FLAGS_EXPAND_SIZER_PERPENDICULAR )
QP.AddToLayout( vbox, misc_panel, CC.FLAGS_EXPAND_PERPENDICULAR )
#
vbox.addStretch( 1 )
self.setLayout( vbox )
#
self._image_cache_storage_limit_percentage.valueChanged.connect( self.EventImageCacheUpdate )
self._image_cache_prefetch_limit_percentage.valueChanged.connect( self.EventImageCacheUpdate )
self.EventImageCacheUpdate()
self.EventThumbnailsUpdate( self._thumbnail_cache_size.value() )
self.EventImageTilesUpdate()
self.EventVideoBufferUpdate( self._video_buffer_size_mb.value() )
def EventImageCacheUpdate( self ):
cache_size = self._fullscreen_cache_size.value() * 1048576
display_size = ClientGUIFunctions.GetDisplaySize( self )
estimated_bytes_per_fullscreen = 3 * display_size.width() * display_size.height()
estimate = cache_size // estimated_bytes_per_fullscreen
self._estimated_number_fullscreens.setText( '(about {}-{} images the size of your screen)'.format( HydrusData.ToHumanInt( estimate // 2 ), HydrusData.ToHumanInt( estimate * 2 ) ) )
num_pixels = cache_size * ( self._image_cache_storage_limit_percentage.value() / 100 ) / 3
unit_square = num_pixels / ( 16 * 9 )
unit_length = unit_square ** 0.5
resolution = ( int( 16 * unit_length ), int( 9 * unit_length ) )
self._image_cache_storage_limit_percentage_st.setText( 'about a {} image'.format( HydrusData.ConvertResolutionToPrettyString( resolution ) ) )
num_pixels = cache_size * ( self._image_cache_prefetch_limit_percentage.value() / 100 ) / 3
unit_square = num_pixels / ( 16 * 9 )
unit_length = unit_square ** 0.5
resolution = ( int( 16 * unit_length ), int( 9 * unit_length ) )
self._image_cache_prefetch_limit_percentage_st.setText( 'about a {} image'.format( HydrusData.ConvertResolutionToPrettyString( resolution ) ) )
def EventImageTilesUpdate( self ):
value = self._image_tile_cache_size.GetValue()
display_size = ClientGUIFunctions.GetDisplaySize( self )
estimated_bytes_per_fullscreen = 3 * display_size.width() * display_size.height()
estimate = value // estimated_bytes_per_fullscreen
self._estimated_number_image_tiles.setText( '(about {} fullscreens)'.format( HydrusData.ToHumanInt( estimate ) ) )
def EventThumbnailsUpdate( self, value ):
( thumbnail_width, thumbnail_height ) = HC.options[ 'thumbnail_dimensions' ]
res_string = HydrusData.ConvertResolutionToPrettyString( ( thumbnail_width, thumbnail_height ) )
estimated_bytes_per_thumb = 3 * thumbnail_width * thumbnail_height
estimated_thumbs = ( value * 1024 * 1024 ) // estimated_bytes_per_thumb
self._estimated_number_thumbnails.setText( '(at '+res_string+', about '+HydrusData.ToHumanInt(estimated_thumbs)+' thumbnails)' )
def EventVideoBufferUpdate( self, value ):
estimated_720p_frames = int( ( value * 1024 * 1024 ) // ( 1280 * 720 * 3 ) )
self._estimated_number_video_frames.setText( '(about '+HydrusData.ToHumanInt(estimated_720p_frames)+' frames of 720p video)' )
def UpdateOptions( self ):
HC.options[ 'thumbnail_cache_size' ] = self._thumbnail_cache_size.value() * 1048576
HC.options[ 'fullscreen_cache_size' ] = self._fullscreen_cache_size.value() * 1048576
self._new_options.SetInteger( 'image_tile_cache_size', self._image_tile_cache_size.GetValue() )
self._new_options.SetInteger( 'thumbnail_cache_timeout', self._thumbnail_cache_timeout.GetValue() )
self._new_options.SetInteger( 'image_cache_timeout', self._image_cache_timeout.GetValue() )
self._new_options.SetInteger( 'image_tile_cache_timeout', self._image_tile_cache_timeout.GetValue() )
self._new_options.SetInteger( 'ideal_tile_dimension', self._ideal_tile_dimension.value() )
self._new_options.SetInteger( 'media_viewer_prefetch_delay_base_ms', self._media_viewer_prefetch_delay_base_ms.value() )
self._new_options.SetInteger( 'media_viewer_prefetch_num_previous', self._media_viewer_prefetch_num_previous.value() )
self._new_options.SetInteger( 'media_viewer_prefetch_num_next', self._media_viewer_prefetch_num_next.value() )
self._new_options.SetInteger( 'image_cache_storage_limit_percentage', self._image_cache_storage_limit_percentage.value() )
self._new_options.SetInteger( 'image_cache_prefetch_limit_percentage', self._image_cache_prefetch_limit_percentage.value() )
self._new_options.SetInteger( 'video_buffer_size_mb', self._video_buffer_size_mb.value() )
self._new_options.SetNoneableInteger( 'forced_search_limit', self._forced_search_limit.GetValue() )
class _StylePanel( QW.QWidget ):
def __init__( self, parent, new_options ):
QW.QWidget.__init__( self, parent )
self._new_options = new_options
#
self._qt_style_name = ClientGUICommon.BetterChoice( self )
self._qt_stylesheet_name = ClientGUICommon.BetterChoice( self )
self._qt_style_name.addItem( 'use default ("{}")'.format( ClientGUIStyle.ORIGINAL_STYLE_NAME ), None )
try:
for name in ClientGUIStyle.GetAvailableStyles():
self._qt_style_name.addItem( name, name )
except HydrusExceptions.DataMissing as e:
HydrusData.ShowException( e )
self._qt_stylesheet_name.addItem( 'use default', None )
try:
for name in ClientGUIStyle.GetAvailableStylesheets():
self._qt_stylesheet_name.addItem( name, name )
except HydrusExceptions.DataMissing as e:
HydrusData.ShowException( e )
#
self._qt_style_name.SetValue( self._new_options.GetNoneableString( 'qt_style_name' ) )
self._qt_stylesheet_name.SetValue( self._new_options.GetNoneableString( 'qt_stylesheet_name' ) )
#
vbox = QP.VBoxLayout()
#
text = 'The current styles are what your Qt has available, the stylesheets are what .css and .qss files are currently in install_dir/static/qss.'
st = ClientGUICommon.BetterStaticText( self, label = text )
QP.AddToLayout( vbox, st, CC.FLAGS_EXPAND_PERPENDICULAR )
rows = []
rows.append( ( 'Qt style:', self._qt_style_name ) )
rows.append( ( 'Qt stylesheet:', self._qt_stylesheet_name ) )
gridbox = ClientGUICommon.WrapInGrid( self, rows )
QP.AddToLayout( vbox, gridbox, CC.FLAGS_EXPAND_SIZER_PERPENDICULAR )
self.setLayout( vbox )
self._qt_style_name.currentIndexChanged.connect( self.StyleChanged )
self._qt_stylesheet_name.currentIndexChanged.connect( self.StyleChanged )
def StyleChanged( self ):
qt_style_name = self._qt_style_name.GetValue()
qt_stylesheet_name = self._qt_stylesheet_name.GetValue()
try:
if qt_style_name is None:
ClientGUIStyle.SetStyleFromName( ClientGUIStyle.ORIGINAL_STYLE_NAME )
else:
ClientGUIStyle.SetStyleFromName( qt_style_name )
except Exception as e:
QW.QMessageBox.critical( self, 'Critical', 'Could not apply style: {}'.format( str( e ) ) )
try:
if qt_stylesheet_name is None:
ClientGUIStyle.ClearStylesheet()
else:
ClientGUIStyle.SetStylesheetFromPath( qt_stylesheet_name )
except Exception as e:
QW.QMessageBox.critical( self, 'Critical', 'Could not apply stylesheet: {}'.format( str( e ) ) )
def UpdateOptions( self ):
self._new_options.SetNoneableString( 'qt_style_name', self._qt_style_name.GetValue() )
self._new_options.SetNoneableString( 'qt_stylesheet_name', self._qt_stylesheet_name.GetValue() )
class _SystemPanel( QW.QWidget ):
def __init__( self, parent, new_options ):
QW.QWidget.__init__( self, parent )
self._new_options = new_options
#
sleep_panel = ClientGUICommon.StaticBox( self, 'system sleep' )
self._wake_delay_period = QP.MakeQSpinBox( sleep_panel, min = 0, max = 60 )
tt = 'It sometimes takes a few seconds for your network adapter to reconnect after a wake. This adds a grace period after a detected wake-from-sleep to allow your OS to sort that out before Hydrus starts making requests.'
self._wake_delay_period.setToolTip( tt )
self._file_system_waits_on_wakeup = QW.QCheckBox( sleep_panel )
self._file_system_waits_on_wakeup.setToolTip( 'This is useful if your hydrus is stored on a NAS that takes a few seconds to get going after your machine resumes from sleep.' )
#
self._wake_delay_period.setValue( self._new_options.GetInteger( 'wake_delay_period' ) )
self._file_system_waits_on_wakeup.setChecked( self._new_options.GetBoolean( 'file_system_waits_on_wakeup' ) )
#
rows = []
rows.append( ( 'After a wake from system sleep, wait this many seconds before allowing new network access:', self._wake_delay_period ) )
rows.append( ( 'Include the file system in this wait: ', self._file_system_waits_on_wakeup ) )
gridbox = ClientGUICommon.WrapInGrid( sleep_panel, rows )
sleep_panel.Add( gridbox, CC.FLAGS_EXPAND_SIZER_PERPENDICULAR )
#
vbox = QP.VBoxLayout()
QP.AddToLayout( vbox, sleep_panel, CC.FLAGS_EXPAND_PERPENDICULAR )
vbox.addStretch( 1 )
self.setLayout( vbox )
def UpdateOptions( self ):
self._new_options.SetInteger( 'wake_delay_period', self._wake_delay_period.value() )
self._new_options.SetBoolean( 'file_system_waits_on_wakeup', self._file_system_waits_on_wakeup.isChecked() )
class _SystemTrayPanel( QW.QWidget ):
def __init__( self, parent, new_options ):
QW.QWidget.__init__( self, parent )
self._new_options = new_options
self._always_show_system_tray_icon = QW.QCheckBox( self )
self._minimise_client_to_system_tray = QW.QCheckBox( self )
self._close_client_to_system_tray = QW.QCheckBox( self )
self._start_client_in_system_tray = QW.QCheckBox( self )
#
self._always_show_system_tray_icon.setChecked( self._new_options.GetBoolean( 'always_show_system_tray_icon' ) )
self._minimise_client_to_system_tray.setChecked( self._new_options.GetBoolean( 'minimise_client_to_system_tray' ) )
self._close_client_to_system_tray.setChecked( self._new_options.GetBoolean( 'close_client_to_system_tray' ) )
self._start_client_in_system_tray.setChecked( self._new_options.GetBoolean( 'start_client_in_system_tray' ) )
#
vbox = QP.VBoxLayout()
rows = []
rows.append( ( 'Always show the hydrus system tray icon: ', self._always_show_system_tray_icon ) )
rows.append( ( 'Minimise the main window to system tray: ', self._minimise_client_to_system_tray ) )
rows.append( ( 'Close the main window to system tray: ', self._close_client_to_system_tray ) )
rows.append( ( 'Start the client minimised to system tray: ', self._start_client_in_system_tray ) )
gridbox = ClientGUICommon.WrapInGrid( self, rows )
from hydrus.client.gui import ClientGUISystemTray
if not ClientGUISystemTray.SystemTrayAvailable():
QP.AddToLayout( vbox, ClientGUICommon.BetterStaticText( self, 'Unfortunately, your system does not seem to have a supported system tray.' ), CC.FLAGS_EXPAND_PERPENDICULAR )
self._always_show_system_tray_icon.setEnabled( False )
self._minimise_client_to_system_tray.setEnabled( False )
self._close_client_to_system_tray.setEnabled( False )
self._start_client_in_system_tray.setEnabled( False )
elif not HC.PLATFORM_WINDOWS:
if not HG.client_controller.new_options.GetBoolean( 'advanced_mode' ):
label = 'This is turned off for non-advanced non-Windows users for now.'
self._always_show_system_tray_icon.setEnabled( False )
self._minimise_client_to_system_tray.setEnabled( False )
self._close_client_to_system_tray.setEnabled( False )
self._start_client_in_system_tray.setEnabled( False )
else:
label = 'This can be buggy/crashy on non-Windows, hydev will keep working on this.'
QP.AddToLayout( vbox, ClientGUICommon.BetterStaticText( self, label ), CC.FLAGS_EXPAND_PERPENDICULAR )
QP.AddToLayout( vbox, gridbox, CC.FLAGS_EXPAND_PERPENDICULAR )
vbox.addStretch( 1 )
self.setLayout( vbox )
def UpdateOptions( self ):
self._new_options.SetBoolean( 'always_show_system_tray_icon', self._always_show_system_tray_icon.isChecked() )
self._new_options.SetBoolean( 'minimise_client_to_system_tray', self._minimise_client_to_system_tray.isChecked() )
self._new_options.SetBoolean( 'close_client_to_system_tray', self._close_client_to_system_tray.isChecked() )
self._new_options.SetBoolean( 'start_client_in_system_tray', self._start_client_in_system_tray.isChecked() )
class _TagsPanel( QW.QWidget ):
def __init__( self, parent, new_options ):
QW.QWidget.__init__( self, parent )
self._new_options = new_options
#
general_panel = ClientGUICommon.StaticBox( self, 'general tag options' )
self._default_tag_sort = ClientGUITagSorting.TagSortControl( general_panel, self._new_options.GetDefaultTagSort(), show_siblings = True )
self._default_tag_repository = ClientGUICommon.BetterChoice( general_panel )
self._default_tag_service_search_page = ClientGUICommon.BetterChoice( general_panel )
self._expand_parents_on_storage_taglists = QW.QCheckBox( general_panel )
self._expand_parents_on_storage_autocomplete_taglists = QW.QCheckBox( general_panel )
self._ac_select_first_with_count = QW.QCheckBox( general_panel )
#
favourites_panel = ClientGUICommon.StaticBox( self, 'favourite tags' )
desc = 'These tags will appear in your tag autocomplete results area, under the \'favourites\' tab.'
favourites_st = ClientGUICommon.BetterStaticText( favourites_panel, desc )
favourites_st.setWordWrap( True )
self._favourites = ClientGUIListBoxes.ListBoxTagsStringsAddRemove( favourites_panel, CC.COMBINED_TAG_SERVICE_KEY, ClientTags.TAG_DISPLAY_STORAGE )
self._favourites_input = ClientGUIACDropdown.AutoCompleteDropdownTagsWrite( favourites_panel, self._favourites.AddTags, CC.LOCAL_FILE_SERVICE_KEY, CC.COMBINED_TAG_SERVICE_KEY, show_paste_button = True )
#
self._default_tag_service_search_page.addItem( 'all known tags', CC.COMBINED_TAG_SERVICE_KEY )
services = HG.client_controller.services_manager.GetServices( HC.REAL_TAG_SERVICES )
for service in services:
self._default_tag_repository.addItem( service.GetName(), service.GetServiceKey() )
self._default_tag_service_search_page.addItem( service.GetName(), service.GetServiceKey() )
default_tag_repository_key = HC.options[ 'default_tag_repository' ]
self._default_tag_repository.SetValue( default_tag_repository_key )
self._default_tag_service_search_page.SetValue( new_options.GetKey( 'default_tag_service_search_page' ) )
self._expand_parents_on_storage_taglists.setChecked( self._new_options.GetBoolean( 'expand_parents_on_storage_taglists' ) )
self._expand_parents_on_storage_taglists.setToolTip( 'This affects taglists in places like the manage tags dialog, where you edit tags as they actually are, and implied parents hang below tags.' )
self._expand_parents_on_storage_autocomplete_taglists.setChecked( self._new_options.GetBoolean( 'expand_parents_on_storage_autocomplete_taglists' ) )
self._expand_parents_on_storage_autocomplete_taglists.setToolTip( 'This affects the autocomplete results taglist.' )
self._ac_select_first_with_count.setChecked( self._new_options.GetBoolean( 'ac_select_first_with_count' ) )
#
self._favourites.SetTags( new_options.GetStringList( 'favourite_tags' ) )
#
vbox = QP.VBoxLayout()
rows = []
rows.append( ( 'Default tag service in manage tag dialogs: ', self._default_tag_repository ) )
rows.append( ( 'Default tag service in search pages: ', self._default_tag_service_search_page ) )
rows.append( ( 'Default tag sort: ', self._default_tag_sort ) )
rows.append( ( 'Show parents expanded by default on edit/write taglists: ', self._expand_parents_on_storage_taglists ) )
rows.append( ( 'Show parents expanded by default on edit/write autocomplete taglists: ', self._expand_parents_on_storage_autocomplete_taglists ) )
rows.append( ( 'By default, select the first tag result with actual count in write-autocomplete: ', self._ac_select_first_with_count ) )
gridbox = ClientGUICommon.WrapInGrid( general_panel, rows )
general_panel.Add( gridbox, CC.FLAGS_EXPAND_SIZER_PERPENDICULAR )
QP.AddToLayout( vbox, general_panel, CC.FLAGS_EXPAND_PERPENDICULAR )
#
favourites_panel.Add( favourites_st, CC.FLAGS_EXPAND_PERPENDICULAR )
favourites_panel.Add( self._favourites, CC.FLAGS_EXPAND_BOTH_WAYS )
favourites_panel.Add( self._favourites_input )
QP.AddToLayout( vbox, favourites_panel, CC.FLAGS_EXPAND_BOTH_WAYS )
#
self.setLayout( vbox )
def UpdateOptions( self ):
HC.options[ 'default_tag_repository' ] = self._default_tag_repository.GetValue()
self._new_options.SetDefaultTagSort( self._default_tag_sort.GetValue() )
self._new_options.SetBoolean( 'expand_parents_on_storage_taglists', self._expand_parents_on_storage_taglists.isChecked() )
self._new_options.SetBoolean( 'expand_parents_on_storage_autocomplete_taglists', self._expand_parents_on_storage_autocomplete_taglists.isChecked() )
self._new_options.SetBoolean( 'ac_select_first_with_count', self._ac_select_first_with_count.isChecked() )
self._new_options.SetKey( 'default_tag_service_search_page', self._default_tag_service_search_page.GetValue() )
#
self._new_options.SetStringList( 'favourite_tags', list( self._favourites.GetTags() ) )
class _TagPresentationPanel( QW.QWidget ):
def __init__( self, parent, new_options ):
QW.QWidget.__init__( self, parent )
self._new_options = new_options
#
tag_summary_generator = self._new_options.GetTagSummaryGenerator( 'thumbnail_top' )
self._thumbnail_top = ClientGUITags.TagSummaryGeneratorButton( self, tag_summary_generator )
tag_summary_generator = self._new_options.GetTagSummaryGenerator( 'thumbnail_bottom_right' )
self._thumbnail_bottom_right = ClientGUITags.TagSummaryGeneratorButton( self, tag_summary_generator )
tag_summary_generator = self._new_options.GetTagSummaryGenerator( 'media_viewer_top' )
self._media_viewer_top = ClientGUITags.TagSummaryGeneratorButton( self, tag_summary_generator )
#
render_panel = ClientGUICommon.StaticBox( self, 'namespace rendering' )
render_st = ClientGUICommon.BetterStaticText( render_panel, label = 'Namespaced tags are stored and directly edited in hydrus as "namespace:subtag", but most presentation windows can display them differently.' )
self._show_namespaces = QW.QCheckBox( render_panel )
self._namespace_connector = QW.QLineEdit( render_panel )
self._replace_tag_underscores_with_spaces = QW.QCheckBox( render_panel )
#
namespace_colours_panel = ClientGUICommon.StaticBox( self, 'namespace colours' )
self._namespace_colours = ClientGUIListBoxes.ListBoxTagsColourOptions( namespace_colours_panel, HC.options[ 'namespace_colours' ] )
self._edit_namespace_colour = QW.QPushButton( 'edit selected', namespace_colours_panel )
self._edit_namespace_colour.clicked.connect( self.EventEditNamespaceColour )
self._new_namespace_colour = QW.QLineEdit( namespace_colours_panel )
self._new_namespace_colour.installEventFilter( ClientGUICommon.TextCatchEnterEventFilter( self._new_namespace_colour, self.AddNamespaceColour ) )
#
self._show_namespaces.setChecked( new_options.GetBoolean( 'show_namespaces' ) )
self._namespace_connector.setText( new_options.GetString( 'namespace_connector' ) )
self._replace_tag_underscores_with_spaces.setChecked( new_options.GetBoolean( 'replace_tag_underscores_with_spaces' ) )
#
namespace_colours_panel.Add( self._namespace_colours, CC.FLAGS_EXPAND_BOTH_WAYS )
namespace_colours_panel.Add( self._new_namespace_colour, CC.FLAGS_EXPAND_PERPENDICULAR )
namespace_colours_panel.Add( self._edit_namespace_colour, CC.FLAGS_EXPAND_PERPENDICULAR )
#
vbox = QP.VBoxLayout()
#
rows = []
rows.append( ( 'On thumbnail top:', self._thumbnail_top ) )
rows.append( ( 'On thumbnail bottom-right:', self._thumbnail_bottom_right ) )
rows.append( ( 'On media viewer top:', self._media_viewer_top ) )
gridbox = ClientGUICommon.WrapInGrid( self, rows )
QP.AddToLayout( vbox, gridbox, CC.FLAGS_EXPAND_SIZER_PERPENDICULAR )
#
rows = []
rows.append( ( 'Show namespaces: ', self._show_namespaces ) )
rows.append( ( 'If shown, namespace connecting string: ', self._namespace_connector ) )
rows.append( ( 'EXPERIMENTAL: Replace all underscores with spaces: ', self._replace_tag_underscores_with_spaces ) )
gridbox = ClientGUICommon.WrapInGrid( render_panel, rows )
render_panel.Add( render_st, CC.FLAGS_EXPAND_PERPENDICULAR )
render_panel.Add( gridbox, CC.FLAGS_EXPAND_SIZER_PERPENDICULAR )
QP.AddToLayout( vbox, render_panel, CC.FLAGS_EXPAND_PERPENDICULAR )
#
QP.AddToLayout( vbox, namespace_colours_panel, CC.FLAGS_EXPAND_BOTH_WAYS )
#
self.setLayout( vbox )
def EventEditNamespaceColour( self ):
results = self._namespace_colours.GetSelectedNamespaceColours()
for ( namespace, ( r, g, b ) ) in list( results.items() ):
colour = QG.QColor( r, g, b )
colour = QW.QColorDialog.getColor( colour, self, 'Namespace colour', QW.QColorDialog.ShowAlphaChannel )
if colour.isValid():
self._namespace_colours.SetNamespaceColour( namespace, colour )
def AddNamespaceColour( self ):
namespace = self._new_namespace_colour.text()
if namespace != '':
self._namespace_colours.SetNamespaceColour( namespace, QG.QColor( random.randint(0,255), random.randint(0,255), random.randint(0,255) ) )
self._new_namespace_colour.clear()
def UpdateOptions( self ):
self._new_options.SetTagSummaryGenerator( 'thumbnail_top', self._thumbnail_top.GetValue() )
self._new_options.SetTagSummaryGenerator( 'thumbnail_bottom_right', self._thumbnail_bottom_right.GetValue() )
self._new_options.SetTagSummaryGenerator( 'media_viewer_top', self._media_viewer_top.GetValue() )
self._new_options.SetBoolean( 'show_namespaces', self._show_namespaces.isChecked() )
self._new_options.SetString( 'namespace_connector', self._namespace_connector.text() )
self._new_options.SetBoolean( 'replace_tag_underscores_with_spaces', self._replace_tag_underscores_with_spaces.isChecked() )
HC.options[ 'namespace_colours' ] = self._namespace_colours.GetNamespaceColours()
class _TagSuggestionsPanel( QW.QWidget ):
def __init__( self, parent, new_options ):
QW.QWidget.__init__( self, parent )
self._new_options = new_options
suggested_tags_panel = ClientGUICommon.StaticBox( self, 'suggested tags' )
self._suggested_tags_width = QP.MakeQSpinBox( suggested_tags_panel, min=20, max=65535 )
self._suggested_tags_layout = ClientGUICommon.BetterChoice( suggested_tags_panel )
self._suggested_tags_layout.addItem( 'notebook', 'notebook' )
self._suggested_tags_layout.addItem( 'side-by-side', 'columns' )
suggest_tags_panel_notebook = QW.QTabWidget( suggested_tags_panel )
#
suggested_tags_favourites_panel = QW.QWidget( suggest_tags_panel_notebook )
suggested_tags_favourites_panel.setMinimumWidth( 400 )
self._suggested_favourites_services = ClientGUICommon.BetterChoice( suggested_tags_favourites_panel )
tag_services = HG.client_controller.services_manager.GetServices( HC.REAL_TAG_SERVICES )
for tag_service in tag_services:
self._suggested_favourites_services.addItem( tag_service.GetName(), tag_service.GetServiceKey() )
self._suggested_favourites = ClientGUIListBoxes.ListBoxTagsStringsAddRemove( suggested_tags_favourites_panel, CC.COMBINED_TAG_SERVICE_KEY, ClientTags.TAG_DISPLAY_STORAGE )
self._current_suggested_favourites_service = None
self._suggested_favourites_dict = {}
self._suggested_favourites_input = ClientGUIACDropdown.AutoCompleteDropdownTagsWrite( suggested_tags_favourites_panel, self._suggested_favourites.AddTags, CC.LOCAL_FILE_SERVICE_KEY, CC.COMBINED_TAG_SERVICE_KEY, show_paste_button = True )
#
suggested_tags_related_panel = QW.QWidget( suggest_tags_panel_notebook )
self._show_related_tags = QW.QCheckBox( suggested_tags_related_panel )
self._related_tags_search_1_duration_ms = QP.MakeQSpinBox( suggested_tags_related_panel, min=50, max=60000 )
self._related_tags_search_2_duration_ms = QP.MakeQSpinBox( suggested_tags_related_panel, min=50, max=60000 )
self._related_tags_search_3_duration_ms = QP.MakeQSpinBox( suggested_tags_related_panel, min=50, max=60000 )
#
suggested_tags_file_lookup_script_panel = QW.QWidget( suggest_tags_panel_notebook )
self._show_file_lookup_script_tags = QW.QCheckBox( suggested_tags_file_lookup_script_panel )
self._favourite_file_lookup_script = ClientGUICommon.BetterChoice( suggested_tags_file_lookup_script_panel )
script_names = sorted( HG.client_controller.Read( 'serialisable_names', HydrusSerialisable.SERIALISABLE_TYPE_PARSE_ROOT_FILE_LOOKUP ) )
for name in script_names:
self._favourite_file_lookup_script.addItem( name, name )
#
suggested_tags_recent_panel = QW.QWidget( suggest_tags_panel_notebook )
self._num_recent_tags = ClientGUICommon.NoneableSpinCtrl( suggested_tags_recent_panel, 'number of recent tags to show', min = 1, none_phrase = 'do not show' )
#
self._suggested_tags_width.setValue( self._new_options.GetInteger( 'suggested_tags_width' ) )
self._suggested_tags_layout.SetValue( self._new_options.GetNoneableString( 'suggested_tags_layout' ) )
self._show_related_tags.setChecked( self._new_options.GetBoolean( 'show_related_tags' ) )
self._related_tags_search_1_duration_ms.setValue( self._new_options.GetInteger( 'related_tags_search_1_duration_ms' ) )
self._related_tags_search_2_duration_ms.setValue( self._new_options.GetInteger( 'related_tags_search_2_duration_ms' ) )
self._related_tags_search_3_duration_ms.setValue( self._new_options.GetInteger( 'related_tags_search_3_duration_ms' ) )
self._show_file_lookup_script_tags.setChecked( self._new_options.GetBoolean( 'show_file_lookup_script_tags' ) )
self._favourite_file_lookup_script.SetValue( self._new_options.GetNoneableString( 'favourite_file_lookup_script' ) )
self._num_recent_tags.SetValue( self._new_options.GetNoneableInteger( 'num_recent_tags' ) )
#
panel_vbox = QP.VBoxLayout()
QP.AddToLayout( panel_vbox, self._suggested_favourites_services, CC.FLAGS_EXPAND_PERPENDICULAR )
QP.AddToLayout( panel_vbox, self._suggested_favourites, CC.FLAGS_EXPAND_BOTH_WAYS )
QP.AddToLayout( panel_vbox, self._suggested_favourites_input, CC.FLAGS_EXPAND_PERPENDICULAR )
suggested_tags_favourites_panel.setLayout( panel_vbox )
#
panel_vbox = QP.VBoxLayout()
rows = []
rows.append( ( 'Show related tags on single-file manage tags windows: ', self._show_related_tags ) )
rows.append( ( 'Initial search duration (ms): ', self._related_tags_search_1_duration_ms ) )
rows.append( ( 'Medium search duration (ms): ', self._related_tags_search_2_duration_ms ) )
rows.append( ( 'Thorough search duration (ms): ', self._related_tags_search_3_duration_ms ) )
gridbox = ClientGUICommon.WrapInGrid( suggested_tags_related_panel, rows )
desc = 'This will search the database for statistically related tags based on what your focused file already has.'
QP.AddToLayout( panel_vbox, ClientGUICommon.BetterStaticText(suggested_tags_related_panel,desc), CC.FLAGS_EXPAND_PERPENDICULAR )
QP.AddToLayout( panel_vbox, gridbox, CC.FLAGS_EXPAND_SIZER_PERPENDICULAR )
suggested_tags_related_panel.setLayout( panel_vbox )
#
panel_vbox = QP.VBoxLayout()
rows = []
rows.append( ( 'Show file lookup scripts on single-file manage tags windows: ', self._show_file_lookup_script_tags ) )
rows.append( ( 'Favourite file lookup script: ', self._favourite_file_lookup_script ) )
gridbox = ClientGUICommon.WrapInGrid( suggested_tags_file_lookup_script_panel, rows )
QP.AddToLayout( panel_vbox, gridbox, CC.FLAGS_EXPAND_SIZER_PERPENDICULAR )
suggested_tags_file_lookup_script_panel.setLayout( panel_vbox )
#
panel_vbox = QP.VBoxLayout()
QP.AddToLayout( panel_vbox, self._num_recent_tags, CC.FLAGS_EXPAND_PERPENDICULAR )
panel_vbox.addStretch( 1 )
suggested_tags_recent_panel.setLayout( panel_vbox )
#
suggest_tags_panel_notebook.addTab( suggested_tags_favourites_panel, 'favourites' )
suggest_tags_panel_notebook.addTab( suggested_tags_related_panel, 'related' )
suggest_tags_panel_notebook.addTab( suggested_tags_file_lookup_script_panel, 'file lookup scripts' )
suggest_tags_panel_notebook.addTab( suggested_tags_recent_panel, 'recent' )
#
rows = []
rows.append( ( 'Width of suggested tags columns: ', self._suggested_tags_width ) )
rows.append( ( 'Column layout: ', self._suggested_tags_layout ) )
gridbox = ClientGUICommon.WrapInGrid( suggested_tags_panel, rows )
desc = 'The manage tags dialog can provide several kinds of tag suggestions. For simplicity, most are turned off by default.'
suggested_tags_panel.Add( ClientGUICommon.BetterStaticText( suggested_tags_panel, desc ), CC.FLAGS_EXPAND_PERPENDICULAR )
suggested_tags_panel.Add( gridbox, CC.FLAGS_EXPAND_SIZER_PERPENDICULAR )
suggested_tags_panel.Add( suggest_tags_panel_notebook, CC.FLAGS_EXPAND_BOTH_WAYS )
#
vbox = QP.VBoxLayout()
QP.AddToLayout( vbox, suggested_tags_panel, CC.FLAGS_EXPAND_BOTH_WAYS )
self.setLayout( vbox )
#
self._suggested_favourites_services.currentIndexChanged.connect( self.EventSuggestedFavouritesService )
self.EventSuggestedFavouritesService( None )
def _SaveCurrentSuggestedFavourites( self ):
if self._current_suggested_favourites_service is not None:
self._suggested_favourites_dict[ self._current_suggested_favourites_service ] = self._suggested_favourites.GetTags()
def EventSuggestedFavouritesService( self, index ):
self._SaveCurrentSuggestedFavourites()
self._current_suggested_favourites_service = self._suggested_favourites_services.GetValue()
if self._current_suggested_favourites_service in self._suggested_favourites_dict:
favourites = self._suggested_favourites_dict[ self._current_suggested_favourites_service ]
else:
favourites = self._new_options.GetSuggestedTagsFavourites( self._current_suggested_favourites_service )
self._suggested_favourites.SetTagServiceKey( self._current_suggested_favourites_service )
self._suggested_favourites.SetTags( favourites )
self._suggested_favourites_input.SetTagServiceKey( self._current_suggested_favourites_service )
self._suggested_favourites_input.SetDisplayTagServiceKey( self._current_suggested_favourites_service )
def UpdateOptions( self ):
self._new_options.SetInteger( 'suggested_tags_width', self._suggested_tags_width.value() )
self._new_options.SetNoneableString( 'suggested_tags_layout', self._suggested_tags_layout.GetValue() )
self._SaveCurrentSuggestedFavourites()
for ( service_key, favourites ) in list(self._suggested_favourites_dict.items()):
self._new_options.SetSuggestedTagsFavourites( service_key, favourites )
self._new_options.SetBoolean( 'show_related_tags', self._show_related_tags.isChecked() )
self._new_options.SetInteger( 'related_tags_search_1_duration_ms', self._related_tags_search_1_duration_ms.value() )
self._new_options.SetInteger( 'related_tags_search_2_duration_ms', self._related_tags_search_2_duration_ms.value() )
self._new_options.SetInteger( 'related_tags_search_3_duration_ms', self._related_tags_search_3_duration_ms.value() )
self._new_options.SetBoolean( 'show_file_lookup_script_tags', self._show_file_lookup_script_tags.isChecked() )
self._new_options.SetNoneableString( 'favourite_file_lookup_script', self._favourite_file_lookup_script.GetValue() )
self._new_options.SetNoneableInteger( 'num_recent_tags', self._num_recent_tags.GetValue() )
class _ThumbnailsPanel( QW.QWidget ):
def __init__( self, parent, new_options ):
QW.QWidget.__init__( self, parent )
self._new_options = new_options
self._thumbnail_width = QP.MakeQSpinBox( self, min=20, max=2048 )
self._thumbnail_height = QP.MakeQSpinBox( self, min=20, max=2048 )
self._thumbnail_border = QP.MakeQSpinBox( self, min=0, max=20 )
self._thumbnail_margin = QP.MakeQSpinBox( self, min=0, max=20 )
self._video_thumbnail_percentage_in = QP.MakeQSpinBox( self, min=0, max=100 )
self._thumbnail_scroll_rate = QW.QLineEdit( self )
self._thumbnail_fill = QW.QCheckBox( self )
self._thumbnail_visibility_scroll_percent = QP.MakeQSpinBox( self, min=1, max=99 )
self._thumbnail_visibility_scroll_percent.setToolTip( 'Lower numbers will cause fewer scrolls, higher numbers more.' )
self._media_background_bmp_path = QP.FilePickerCtrl( self )
#
( thumbnail_width, thumbnail_height ) = HC.options[ 'thumbnail_dimensions' ]
self._thumbnail_width.setValue( thumbnail_width )
self._thumbnail_height.setValue( thumbnail_height )
self._thumbnail_border.setValue( self._new_options.GetInteger( 'thumbnail_border' ) )
self._thumbnail_margin.setValue( self._new_options.GetInteger( 'thumbnail_margin' ) )
self._video_thumbnail_percentage_in.setValue( self._new_options.GetInteger( 'video_thumbnail_percentage_in' ) )
self._thumbnail_scroll_rate.setText( self._new_options.GetString( 'thumbnail_scroll_rate' ) )
self._thumbnail_fill.setChecked( self._new_options.GetBoolean( 'thumbnail_fill' ) )
self._thumbnail_visibility_scroll_percent.setValue( self._new_options.GetInteger( 'thumbnail_visibility_scroll_percent' ) )
media_background_bmp_path = self._new_options.GetNoneableString( 'media_background_bmp_path' )
if media_background_bmp_path is not None:
self._media_background_bmp_path.SetPath( media_background_bmp_path )
#
rows = []
rows.append( ( 'Thumbnail width: ', self._thumbnail_width ) )
rows.append( ( 'Thumbnail height: ', self._thumbnail_height ) )
rows.append( ( 'Thumbnail border: ', self._thumbnail_border ) )
rows.append( ( 'Thumbnail margin: ', self._thumbnail_margin ) )
rows.append( ( 'Generate video thumbnails this % in: ', self._video_thumbnail_percentage_in ) )
rows.append( ( 'Do not scroll down on key navigation if thumbnail at least this % visible: ', self._thumbnail_visibility_scroll_percent ) )
rows.append( ( 'EXPERIMENTAL: Scroll thumbnails at this rate per scroll tick: ', self._thumbnail_scroll_rate ) )
rows.append( ( 'EXPERIMENTAL: Zoom thumbnails so they \'fill\' their space: ', self._thumbnail_fill ) )
rows.append( ( 'EXPERIMENTAL: Image path for thumbnail panel background image (set blank to clear): ', self._media_background_bmp_path ) )
gridbox = ClientGUICommon.WrapInGrid( self, rows )
vbox = QP.VBoxLayout()
QP.AddToLayout( vbox, gridbox, CC.FLAGS_EXPAND_SIZER_PERPENDICULAR )
self.setLayout( vbox )
def UpdateOptions( self ):
new_thumbnail_dimensions = [self._thumbnail_width.value(), self._thumbnail_height.value()]
HC.options[ 'thumbnail_dimensions' ] = new_thumbnail_dimensions
self._new_options.SetInteger( 'thumbnail_border', self._thumbnail_border.value() )
self._new_options.SetInteger( 'thumbnail_margin', self._thumbnail_margin.value() )
self._new_options.SetInteger( 'video_thumbnail_percentage_in', self._video_thumbnail_percentage_in.value() )
try:
thumbnail_scroll_rate = self._thumbnail_scroll_rate.text()
float( thumbnail_scroll_rate )
self._new_options.SetString( 'thumbnail_scroll_rate', thumbnail_scroll_rate )
except:
pass
self._new_options.SetBoolean( 'thumbnail_fill', self._thumbnail_fill.isChecked() )
self._new_options.SetInteger( 'thumbnail_visibility_scroll_percent', self._thumbnail_visibility_scroll_percent.value() )
media_background_bmp_path = self._media_background_bmp_path.GetPath()
if media_background_bmp_path == '':
media_background_bmp_path = None
self._new_options.SetNoneableString( 'media_background_bmp_path', media_background_bmp_path )
def CommitChanges( self ):
for page in self._listbook.GetActivePages():
page.UpdateOptions()
try:
HG.client_controller.WriteSynchronous( 'save_options', HC.options )
HG.client_controller.WriteSynchronous( 'serialisable', self._new_options )
except:
QW.QMessageBox.critical( self, 'Error', traceback.format_exc() )
class ManageURLsPanel( ClientGUIScrolledPanels.ManagePanel ):
def __init__( self, parent, media ):
ClientGUIScrolledPanels.ManagePanel.__init__( self, parent )
media = ClientMedia.FlattenMedia( media )
self._current_media = [ m.Duplicate() for m in media ]
self._multiple_files_warning = ClientGUICommon.BetterStaticText( self, label = 'Warning: you are editing urls for multiple files!\nBe very careful about adding URLs here, as they will apply to everything.\nAdding the same URL to multiple files is only appropriate for gallery-type URLs!' )
self._multiple_files_warning.setObjectName( 'HydrusWarning' )
if len( self._current_media ) == 1:
self._multiple_files_warning.hide()
self._urls_listbox = QW.QListWidget( self )
self._urls_listbox.setSortingEnabled( True )
self._urls_listbox.setSelectionMode( QW.QAbstractItemView.ExtendedSelection )
self._urls_listbox.itemDoubleClicked.connect( self.EventListDoubleClick )
self._listbox_event_filter = QP.WidgetEventFilter( self._urls_listbox )
self._listbox_event_filter.EVT_KEY_DOWN( self.EventListKeyDown )
( width, height ) = ClientGUIFunctions.ConvertTextToPixels( self._urls_listbox, ( 120, 10 ) )
self._urls_listbox.setMinimumWidth( width )
self._urls_listbox.setMinimumHeight( height )
self._url_input = QW.QLineEdit( self )
self._url_input.installEventFilter( ClientGUICommon.TextCatchEnterEventFilter( self._url_input, self.AddURL ) )
self._copy_button = ClientGUICommon.BetterButton( self, 'copy all', self._Copy )
self._paste_button = ClientGUICommon.BetterButton( self, 'paste', self._Paste )
self._urls_to_add = set()
self._urls_to_remove = set()
#
self._pending_content_updates = []
self._current_urls_count = collections.Counter()
self._UpdateList()
#
hbox = QP.HBoxLayout()
QP.AddToLayout( hbox, self._copy_button, CC.FLAGS_CENTER_PERPENDICULAR )
QP.AddToLayout( hbox, self._paste_button, CC.FLAGS_CENTER_PERPENDICULAR )
vbox = QP.VBoxLayout()
QP.AddToLayout( vbox, self._multiple_files_warning, CC.FLAGS_EXPAND_PERPENDICULAR )
QP.AddToLayout( vbox, self._urls_listbox, CC.FLAGS_EXPAND_BOTH_WAYS )
QP.AddToLayout( vbox, self._url_input, CC.FLAGS_EXPAND_PERPENDICULAR )
QP.AddToLayout( vbox, hbox, CC.FLAGS_ON_RIGHT )
self.widget().setLayout( vbox )
self._my_shortcut_handler = ClientGUIShortcuts.ShortcutsHandler( self, [ 'global', 'media', 'main_gui' ] )
ClientGUIFunctions.SetFocusLater( self._url_input )
def _Copy( self ):
urls = sorted( self._current_urls_count.keys() )
text = os.linesep.join( urls )
HG.client_controller.pub( 'clipboard', 'text', text )
def _EnterURL( self, url, only_add = False ):
normalised_url = HG.client_controller.network_engine.domain_manager.NormaliseURL( url )
addee_media = set()
for m in self._current_media:
locations_manager = m.GetLocationsManager()
if normalised_url not in locations_manager.GetURLs():
addee_media.add( m )
if len( addee_media ) > 0:
addee_hashes = { m.GetHash() for m in addee_media }
content_update = HydrusData.ContentUpdate( HC.CONTENT_TYPE_URLS, HC.CONTENT_UPDATE_ADD, ( ( normalised_url, ), addee_hashes ) )
for m in addee_media:
m.GetMediaResult().ProcessContentUpdate( CC.COMBINED_LOCAL_FILE_SERVICE_KEY, content_update )
self._pending_content_updates.append( content_update )
#
self._UpdateList()
def _Paste( self ):
try:
raw_text = HG.client_controller.GetClipboardText()
except HydrusExceptions.DataMissing as e:
QW.QMessageBox.warning( self, 'Warning', str(e) )
return
try:
for url in HydrusText.DeserialiseNewlinedTexts( raw_text ):
if url != '':
self._EnterURL( url, only_add = True )
except Exception as e:
QW.QMessageBox.warning( self, 'Warning', 'I could not understand what was in the clipboard: {}'.format( e ) )
def _RemoveURL( self, url ):
removee_media = set()
for m in self._current_media:
locations_manager = m.GetLocationsManager()
if url in locations_manager.GetURLs():
removee_media.add( m )
if len( removee_media ) > 0:
removee_hashes = { m.GetHash() for m in removee_media }
content_update = HydrusData.ContentUpdate( HC.CONTENT_TYPE_URLS, HC.CONTENT_UPDATE_DELETE, ( ( url, ), removee_hashes ) )
for m in removee_media:
m.GetMediaResult().ProcessContentUpdate( CC.COMBINED_LOCAL_FILE_SERVICE_KEY, content_update )
self._pending_content_updates.append( content_update )
#
self._UpdateList()
def _SetSearchFocus( self ):
self._url_input.setFocus( QC.Qt.OtherFocusReason )
def _UpdateList( self ):
self._urls_listbox.clear()
self._current_urls_count = collections.Counter()
for m in self._current_media:
locations_manager = m.GetLocationsManager()
for url in locations_manager.GetURLs():
self._current_urls_count[ url ] += 1
for ( url, count ) in self._current_urls_count.items():
if len( self._current_media ) == 1:
label = url
else:
label = '{} ({})'.format( url, count )
item = QW.QListWidgetItem()
item.setText( label )
item.setData( QC.Qt.UserRole, url )
self._urls_listbox.addItem( item )
def EventListDoubleClick( self, item ):
urls = [ QP.GetClientData( self._urls_listbox, selection.row() ) for selection in list( self._urls_listbox.selectedIndexes() ) ]
for url in urls:
self._RemoveURL( url )
if len( urls ) == 1:
url = urls[0]
self._url_input.setText( url )
def EventListKeyDown( self, event ):
( modifier, key ) = ClientGUIShortcuts.ConvertKeyEventToSimpleTuple( event )
if key in ClientGUIShortcuts.DELETE_KEYS_QT:
urls = [ QP.GetClientData( self._urls_listbox, selection.row() ) for selection in list( self._urls_listbox.selectedIndexes() ) ]
for url in urls:
self._RemoveURL( url )
else:
return True # was: event.ignore()
def AddURL( self ):
url = self._url_input.text()
if url == '':
self.parentWidget().DoOK()
else:
try:
self._EnterURL( url )
self._url_input.clear()
except Exception as e:
QW.QMessageBox.warning( self, 'Warning', 'I could not add that URL: {}'.format( e ) )
def CommitChanges( self ):
if len( self._pending_content_updates ) > 0:
service_keys_to_content_updates = { CC.COMBINED_LOCAL_FILE_SERVICE_KEY : self._pending_content_updates }
HG.client_controller.WriteSynchronous( 'content_updates', service_keys_to_content_updates )
def ProcessApplicationCommand( self, command: CAC.ApplicationCommand ):
command_processed = True
if command.IsSimpleCommand():
action = command.GetSimpleAction()
if action == CAC.SIMPLE_MANAGE_FILE_URLS:
self._OKParent()
elif action == CAC.SIMPLE_SET_SEARCH_FOCUS:
self._SetSearchFocus()
else:
command_processed = False
else:
command_processed = False
return command_processed
class RepairFileSystemPanel( ClientGUIScrolledPanels.ManagePanel ):
def __init__( self, parent, missing_locations ):
ClientGUIScrolledPanels.ManagePanel.__init__( self, parent )
self._only_thumbs = True
self._incorrect_locations = {}
self._correct_locations = {}
for ( incorrect_location, prefix ) in missing_locations:
self._incorrect_locations[ prefix ] = incorrect_location
if prefix.startswith( 'f' ):
self._only_thumbs = False
text = 'This dialog has launched because some expected file storage directories were not found. This is a serious error. You have two options:'
text += os.linesep * 2
text += '1) If you know what these should be (e.g. you recently remapped their external drive to another location), update the paths here manually. For most users, this will be clicking _add a possibly correct location_ and then select the new folder where the subdirectories all went. You can repeat this if your folders are missing in multiple locations. Check everything reports _ok!_'
text += os.linesep * 2
text += 'Although it is best if you can find everything, you only _have_ to fix the subdirectories starting with \'f\', which store your original files. Those starting \'t\' and \'r\' are for your thumbnails, which can be regenerated with a bit of work.'
text += os.linesep * 2
text += 'Then hit \'apply\', and the client will launch. You should double-check all your locations under database->migrate database immediately.'
text += os.linesep * 2
text += '2) If the locations are not available, or you do not know what they should be, or you wish to fix this outside of the program, hit \'cancel\' to gracefully cancel client boot. Feel free to contact hydrus dev for help.'
if self._only_thumbs:
text += os.linesep * 2
text += 'SPECIAL NOTE FOR YOUR SITUATION: The only paths missing are thumbnail paths. If you cannot recover these folders, you can hit apply to create empty paths at the original or corrected locations and then run a maintenance routine to regenerate the thumbnails from their originals.'
st = ClientGUICommon.BetterStaticText( self, text )
st.setWordWrap( True )
self._locations = ClientGUIListCtrl.BetterListCtrl( self, CGLC.COLUMN_LIST_REPAIR_LOCATIONS.ID, 12, self._ConvertPrefixToListCtrlTuples, activation_callback = self._SetLocations )
self._set_button = ClientGUICommon.BetterButton( self, 'set correct location', self._SetLocations )
self._add_button = ClientGUICommon.BetterButton( self, 'add a possibly correct location (let the client figure out what it contains)', self._AddLocation )
# add a button here for 'try to fill them in for me'. you give it a dir, and it tries to figure out and fill in the prefixes for you
#
self._locations.AddDatas( [ prefix for ( incorrect_location, prefix ) in missing_locations ] )
self._locations.Sort()
#
vbox = QP.VBoxLayout()
QP.AddToLayout( vbox, st, CC.FLAGS_EXPAND_PERPENDICULAR )
QP.AddToLayout( vbox, self._locations, CC.FLAGS_EXPAND_BOTH_WAYS )
QP.AddToLayout( vbox, self._set_button, CC.FLAGS_ON_RIGHT )
QP.AddToLayout( vbox, self._add_button, CC.FLAGS_ON_RIGHT )
self.widget().setLayout( vbox )
def _AddLocation( self ):
with QP.DirDialog( self, 'Select the potential correct location.' ) as dlg:
if dlg.exec() == QW.QDialog.Accepted:
path = dlg.GetPath()
for prefix in self._locations.GetData():
ok = os.path.exists( os.path.join( path, prefix ) )
if ok:
self._correct_locations[ prefix ] = ( path, ok )
self._locations.UpdateDatas()
def _ConvertPrefixToListCtrlTuples( self, prefix ):
incorrect_location = self._incorrect_locations[ prefix ]
if prefix in self._correct_locations:
( correct_location, ok ) = self._correct_locations[ prefix ]
if ok:
pretty_ok = 'ok!'
else:
pretty_ok = 'not found'
else:
correct_location = ''
ok = None
pretty_ok = ''
pretty_incorrect_location = incorrect_location
pretty_prefix = prefix
pretty_correct_location = correct_location
display_tuple = ( pretty_incorrect_location, pretty_prefix, pretty_correct_location, pretty_ok )
sort_tuple = ( incorrect_location, prefix, correct_location, ok )
return ( display_tuple, sort_tuple )
def _GetValue( self ):
correct_rows = []
thumb_problems = False
for prefix in self._locations.GetData():
incorrect_location = self._incorrect_locations[ prefix ]
if prefix not in self._correct_locations:
if prefix.startswith( 'f' ):
raise HydrusExceptions.VetoException( 'You did not correct all the file locations!' )
else:
thumb_problems = True
correct_location = incorrect_location
else:
( correct_location, ok ) = self._correct_locations[ prefix ]
if not ok:
if prefix.startswith( 'f' ):
raise HydrusExceptions.VetoException( 'You did not find all the correct file locations!' )
else:
thumb_problems = True
correct_rows.append( ( prefix, correct_location ) )
return ( correct_rows, thumb_problems )
def _SetLocations( self ):
prefixes = self._locations.GetData( only_selected = True )
if len( prefixes ) > 0:
with QP.DirDialog( self, 'Select correct location.' ) as dlg:
if dlg.exec() == QW.QDialog.Accepted:
path = dlg.GetPath()
for prefix in prefixes:
ok = os.path.exists( os.path.join( path, prefix ) )
self._correct_locations[ prefix ] = ( path, ok )
self._locations.UpdateDatas()
def CheckValid( self ):
# raises veto if invalid
self._GetValue()
def CommitChanges( self ):
( correct_rows, thumb_problems ) = self._GetValue()
HG.client_controller.WriteSynchronous( 'repair_client_files', correct_rows )
def UserIsOKToOK( self ):
( correct_rows, thumb_problems ) = self._GetValue()
if thumb_problems:
message = 'Some or all of your incorrect paths have not been corrected, but they are all thumbnail paths.'
message += os.linesep * 2
message += 'Would you like instead to create new empty subdirectories at the previous (or corrected, if you have entered them) locations?'
message += os.linesep * 2
message += 'You can run database->regenerate->thumbnails to fill them up again.'
result = ClientGUIDialogsQuick.GetYesNo( self, message )
if result != QW.QDialog.Accepted:
return False
return True
| EditMediaViewerOptions |
api_file.go | /*
* Copyright 2020 zhaoyunxing.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package dingtalk
import (
"net/http"
"net/url"
"github.com/zhaoyunxing92/dingtalk/v2/constant"
"github.com/zhaoyunxing92/dingtalk/v2/request"
"github.com/zhaoyunxing92/dingtalk/v2/response"
)
| func (ding *DingTalk) MediaUpload(req request.UploadFile) (media response.MediaUpload, err error) {
query := url.Values{}
query.Add("type", req.Genre)
return media, ding.Request(http.MethodPost, constant.MediaUploadKey, query, req, &media)
} | // MediaUpload 上传媒体文件 |
organisation_model.py | from peewee import *
import datetime
from config import *
database = PostgresqlDatabase(POSTGRES_DATABASE, user=POSTGRES_USER, password=POSTGRES_PASSWORD, host=POSTGRES_HOST)
class TblOrganisation(Model):
id = PrimaryKeyField()
identifier = CharField()
type = IntegerField()
country = CharField()
is_org_file = BooleanField(default=False)
is_publisher = BooleanField(default=False)
last_updated = DateTimeField(null=True, default=datetime.datetime.now().strftime('%Y-%m-%d'))
class Meta:
|
class TblName(Model):
organisation = ForeignKeyField(TblOrganisation, to_field="id", related_name='names')
name = TextField()
is_primary = BooleanField(default=True)
language = CharField()
class Meta:
db_table = "names"
database = database
def getLanguages(row):
knownheader = ["name", "identifier", "type", "country", "countrycode", "is_org_file", "is_publisher", "last_updated"]
languages = []
for key in row.keys():
key = key.strip()
if not key in knownheader and not key in languages:
languages.append(key)
return languages
| db_table = "organisations"
database = database |
spacesBucketObject.go | // *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
// *** Do not edit by hand unless you're certain you know what you are doing! ***
package digitalocean
import (
"context"
"reflect"
"github.com/pkg/errors"
"github.com/pulumi/pulumi/sdk/v3/go/pulumi"
)
// Provides a bucket object resource for Spaces, DigitalOcean's object storage product.
// The `SpacesBucketObject` resource allows the provider to upload content
// to Spaces.
//
// The [Spaces API](https://docs.digitalocean.com/reference/api/spaces-api/) was
// designed to be interoperable with Amazon's AWS S3 API. This allows users to
// interact with the service while using the tools they already know. Spaces
// mirrors S3's authentication framework and requests to Spaces require a key pair
// similar to Amazon's Access ID and Secret Key.
//
// The authentication requirement can be met by either setting the
// `SPACES_ACCESS_KEY_ID` and `SPACES_SECRET_ACCESS_KEY` environment variables or
// the provider's `spacesAccessId` and `spacesSecretKey` arguments to the
// access ID and secret you generate via the DigitalOcean control panel. For
// example:
//
// ```go
// package main
//
// import (
// "github.com/pulumi/pulumi-digitalocean/sdk/v4/go/digitalocean"
// "github.com/pulumi/pulumi/sdk/v3/go/pulumi"
// )
//
// func main() {
// pulumi.Run(func(ctx *pulumi.Context) error {
// _, err := digitalocean.NewSpacesBucket(ctx, "static_assets", nil)
// if err != nil {
// return err
// }
// return nil
// })
// }
// ```
//
// For more information, See [An Introduction to DigitalOcean Spaces](https://www.digitalocean.com/community/tutorials/an-introduction-to-digitalocean-spaces)
//
// ## Example Usage
// ### Create a Key in a Spaces Bucket
//
// ```go
// package main
//
// import (
// "github.com/pulumi/pulumi-digitalocean/sdk/v4/go/digitalocean"
// "github.com/pulumi/pulumi/sdk/v3/go/pulumi"
// )
//
// func main() {
// pulumi.Run(func(ctx *pulumi.Context) error {
// foobar, err := digitalocean.NewSpacesBucket(ctx, "foobar", &digitalocean.SpacesBucketArgs{
// Region: pulumi.String("nyc3"),
// })
// if err != nil {
// return err
// }
// _, err = digitalocean.NewSpacesBucketObject(ctx, "index", &digitalocean.SpacesBucketObjectArgs{
// Region: foobar.Region,
// Bucket: foobar.Name,
// Key: pulumi.String("index.html"),
// Content: pulumi.String("<html><body><p>This page is empty.</p></body></html>"),
// ContentType: pulumi.String("text/html"),
// })
// if err != nil {
// return err
// }
// return nil
// })
// }
// ```
//
// ## Import
//
// Importing this resource is not supported.
type SpacesBucketObject struct {
pulumi.CustomResourceState
// The canned ACL to apply. DigitalOcean supports "private" and "public-read". (Defaults to "private".)
Acl pulumi.StringPtrOutput `pulumi:"acl"`
// The name of the bucket to put the file in.
Bucket pulumi.StringOutput `pulumi:"bucket"`
// Specifies caching behavior along the request/reply chain Read [w3c cacheControl](http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9) for further details.
CacheControl pulumi.StringPtrOutput `pulumi:"cacheControl"`
// Literal string value to use as the object content, which will be uploaded as UTF-8-encoded text.
Content pulumi.StringPtrOutput `pulumi:"content"`
// Base64-encoded data that will be decoded and uploaded as raw bytes for the object content. This allows safely uploading non-UTF8 binary data, but is recommended only for small content such as the result of the `gzipbase64` function with small text strings. For larger objects, use `source` to stream the content from a disk file.
ContentBase64 pulumi.StringPtrOutput `pulumi:"contentBase64"`
// Specifies presentational information for the object. Read [w3c contentDisposition](http://www.w3.org/Protocols/rfc2616/rfc2616-sec19.html#sec19.5.1) for further information.
ContentDisposition pulumi.StringPtrOutput `pulumi:"contentDisposition"`
// Specifies what content encodings have been applied to the object and thus what decoding mechanisms must be applied to obtain the media-type referenced by the Content-Type header field. Read [w3c content encoding](http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.11) for further information.
ContentEncoding pulumi.StringPtrOutput `pulumi:"contentEncoding"`
// The language the content is in e.g. en-US or en-GB.
ContentLanguage pulumi.StringPtrOutput `pulumi:"contentLanguage"`
// A standard MIME type describing the format of the object data, e.g. application/octet-stream. All Valid MIME Types are valid for this input.
ContentType pulumi.StringOutput `pulumi:"contentType"`
// Used to trigger updates.
Etag pulumi.StringOutput `pulumi:"etag"`
// Allow the object to be deleted by removing any legal hold on any object version.
// Default is `false`. This value should be set to `true` only if the bucket has S3 object lock enabled.
ForceDestroy pulumi.BoolPtrOutput `pulumi:"forceDestroy"`
// The name of the object once it is in the bucket.
Key pulumi.StringOutput `pulumi:"key"`
// A mapping of keys/values to provision metadata (will be automatically prefixed by `x-amz-meta-`, note that only lowercase label are currently supported by the AWS Go API).
Metadata pulumi.StringMapOutput `pulumi:"metadata"`
// The region where the bucket resides (Defaults to `nyc3`)
Region pulumi.StringOutput `pulumi:"region"`
// The path to a file that will be read and uploaded as raw bytes for the object content.
Source pulumi.StringPtrOutput `pulumi:"source"`
// A unique version ID value for the object, if bucket versioning is enabled.
VersionId pulumi.StringOutput `pulumi:"versionId"`
// Specifies a target URL for [website redirect](http://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html).
WebsiteRedirect pulumi.StringPtrOutput `pulumi:"websiteRedirect"`
}
// NewSpacesBucketObject registers a new resource with the given unique name, arguments, and options.
func NewSpacesBucketObject(ctx *pulumi.Context,
name string, args *SpacesBucketObjectArgs, opts ...pulumi.ResourceOption) (*SpacesBucketObject, error) {
if args == nil {
return nil, errors.New("missing one or more required arguments")
}
if args.Bucket == nil {
return nil, errors.New("invalid value for required argument 'Bucket'")
}
if args.Key == nil {
return nil, errors.New("invalid value for required argument 'Key'")
}
if args.Region == nil {
return nil, errors.New("invalid value for required argument 'Region'")
}
var resource SpacesBucketObject
err := ctx.RegisterResource("digitalocean:index/spacesBucketObject:SpacesBucketObject", name, args, &resource, opts...)
if err != nil |
return &resource, nil
}
// GetSpacesBucketObject gets an existing SpacesBucketObject resource's state with the given name, ID, and optional
// state properties that are used to uniquely qualify the lookup (nil if not required).
func GetSpacesBucketObject(ctx *pulumi.Context,
name string, id pulumi.IDInput, state *SpacesBucketObjectState, opts ...pulumi.ResourceOption) (*SpacesBucketObject, error) {
var resource SpacesBucketObject
err := ctx.ReadResource("digitalocean:index/spacesBucketObject:SpacesBucketObject", name, id, state, &resource, opts...)
if err != nil {
return nil, err
}
return &resource, nil
}
// Input properties used for looking up and filtering SpacesBucketObject resources.
type spacesBucketObjectState struct {
// The canned ACL to apply. DigitalOcean supports "private" and "public-read". (Defaults to "private".)
Acl *string `pulumi:"acl"`
// The name of the bucket to put the file in.
Bucket *string `pulumi:"bucket"`
// Specifies caching behavior along the request/reply chain Read [w3c cacheControl](http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9) for further details.
CacheControl *string `pulumi:"cacheControl"`
// Literal string value to use as the object content, which will be uploaded as UTF-8-encoded text.
Content *string `pulumi:"content"`
// Base64-encoded data that will be decoded and uploaded as raw bytes for the object content. This allows safely uploading non-UTF8 binary data, but is recommended only for small content such as the result of the `gzipbase64` function with small text strings. For larger objects, use `source` to stream the content from a disk file.
ContentBase64 *string `pulumi:"contentBase64"`
// Specifies presentational information for the object. Read [w3c contentDisposition](http://www.w3.org/Protocols/rfc2616/rfc2616-sec19.html#sec19.5.1) for further information.
ContentDisposition *string `pulumi:"contentDisposition"`
// Specifies what content encodings have been applied to the object and thus what decoding mechanisms must be applied to obtain the media-type referenced by the Content-Type header field. Read [w3c content encoding](http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.11) for further information.
ContentEncoding *string `pulumi:"contentEncoding"`
// The language the content is in e.g. en-US or en-GB.
ContentLanguage *string `pulumi:"contentLanguage"`
// A standard MIME type describing the format of the object data, e.g. application/octet-stream. All Valid MIME Types are valid for this input.
ContentType *string `pulumi:"contentType"`
// Used to trigger updates.
Etag *string `pulumi:"etag"`
// Allow the object to be deleted by removing any legal hold on any object version.
// Default is `false`. This value should be set to `true` only if the bucket has S3 object lock enabled.
ForceDestroy *bool `pulumi:"forceDestroy"`
// The name of the object once it is in the bucket.
Key *string `pulumi:"key"`
// A mapping of keys/values to provision metadata (will be automatically prefixed by `x-amz-meta-`, note that only lowercase label are currently supported by the AWS Go API).
Metadata map[string]string `pulumi:"metadata"`
// The region where the bucket resides (Defaults to `nyc3`)
Region *string `pulumi:"region"`
// The path to a file that will be read and uploaded as raw bytes for the object content.
Source *string `pulumi:"source"`
// A unique version ID value for the object, if bucket versioning is enabled.
VersionId *string `pulumi:"versionId"`
// Specifies a target URL for [website redirect](http://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html).
WebsiteRedirect *string `pulumi:"websiteRedirect"`
}
type SpacesBucketObjectState struct {
// The canned ACL to apply. DigitalOcean supports "private" and "public-read". (Defaults to "private".)
Acl pulumi.StringPtrInput
// The name of the bucket to put the file in.
Bucket pulumi.StringPtrInput
// Specifies caching behavior along the request/reply chain Read [w3c cacheControl](http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9) for further details.
CacheControl pulumi.StringPtrInput
// Literal string value to use as the object content, which will be uploaded as UTF-8-encoded text.
Content pulumi.StringPtrInput
// Base64-encoded data that will be decoded and uploaded as raw bytes for the object content. This allows safely uploading non-UTF8 binary data, but is recommended only for small content such as the result of the `gzipbase64` function with small text strings. For larger objects, use `source` to stream the content from a disk file.
ContentBase64 pulumi.StringPtrInput
// Specifies presentational information for the object. Read [w3c contentDisposition](http://www.w3.org/Protocols/rfc2616/rfc2616-sec19.html#sec19.5.1) for further information.
ContentDisposition pulumi.StringPtrInput
// Specifies what content encodings have been applied to the object and thus what decoding mechanisms must be applied to obtain the media-type referenced by the Content-Type header field. Read [w3c content encoding](http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.11) for further information.
ContentEncoding pulumi.StringPtrInput
// The language the content is in e.g. en-US or en-GB.
ContentLanguage pulumi.StringPtrInput
// A standard MIME type describing the format of the object data, e.g. application/octet-stream. All Valid MIME Types are valid for this input.
ContentType pulumi.StringPtrInput
// Used to trigger updates.
Etag pulumi.StringPtrInput
// Allow the object to be deleted by removing any legal hold on any object version.
// Default is `false`. This value should be set to `true` only if the bucket has S3 object lock enabled.
ForceDestroy pulumi.BoolPtrInput
// The name of the object once it is in the bucket.
Key pulumi.StringPtrInput
// A mapping of keys/values to provision metadata (will be automatically prefixed by `x-amz-meta-`, note that only lowercase label are currently supported by the AWS Go API).
Metadata pulumi.StringMapInput
// The region where the bucket resides (Defaults to `nyc3`)
Region pulumi.StringPtrInput
// The path to a file that will be read and uploaded as raw bytes for the object content.
Source pulumi.StringPtrInput
// A unique version ID value for the object, if bucket versioning is enabled.
VersionId pulumi.StringPtrInput
// Specifies a target URL for [website redirect](http://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html).
WebsiteRedirect pulumi.StringPtrInput
}
func (SpacesBucketObjectState) ElementType() reflect.Type {
return reflect.TypeOf((*spacesBucketObjectState)(nil)).Elem()
}
type spacesBucketObjectArgs struct {
// The canned ACL to apply. DigitalOcean supports "private" and "public-read". (Defaults to "private".)
Acl *string `pulumi:"acl"`
// The name of the bucket to put the file in.
Bucket string `pulumi:"bucket"`
// Specifies caching behavior along the request/reply chain Read [w3c cacheControl](http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9) for further details.
CacheControl *string `pulumi:"cacheControl"`
// Literal string value to use as the object content, which will be uploaded as UTF-8-encoded text.
Content *string `pulumi:"content"`
// Base64-encoded data that will be decoded and uploaded as raw bytes for the object content. This allows safely uploading non-UTF8 binary data, but is recommended only for small content such as the result of the `gzipbase64` function with small text strings. For larger objects, use `source` to stream the content from a disk file.
ContentBase64 *string `pulumi:"contentBase64"`
// Specifies presentational information for the object. Read [w3c contentDisposition](http://www.w3.org/Protocols/rfc2616/rfc2616-sec19.html#sec19.5.1) for further information.
ContentDisposition *string `pulumi:"contentDisposition"`
// Specifies what content encodings have been applied to the object and thus what decoding mechanisms must be applied to obtain the media-type referenced by the Content-Type header field. Read [w3c content encoding](http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.11) for further information.
ContentEncoding *string `pulumi:"contentEncoding"`
// The language the content is in e.g. en-US or en-GB.
ContentLanguage *string `pulumi:"contentLanguage"`
// A standard MIME type describing the format of the object data, e.g. application/octet-stream. All Valid MIME Types are valid for this input.
ContentType *string `pulumi:"contentType"`
// Used to trigger updates.
Etag *string `pulumi:"etag"`
// Allow the object to be deleted by removing any legal hold on any object version.
// Default is `false`. This value should be set to `true` only if the bucket has S3 object lock enabled.
ForceDestroy *bool `pulumi:"forceDestroy"`
// The name of the object once it is in the bucket.
Key string `pulumi:"key"`
// A mapping of keys/values to provision metadata (will be automatically prefixed by `x-amz-meta-`, note that only lowercase label are currently supported by the AWS Go API).
Metadata map[string]string `pulumi:"metadata"`
// The region where the bucket resides (Defaults to `nyc3`)
Region string `pulumi:"region"`
// The path to a file that will be read and uploaded as raw bytes for the object content.
Source *string `pulumi:"source"`
// Specifies a target URL for [website redirect](http://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html).
WebsiteRedirect *string `pulumi:"websiteRedirect"`
}
// The set of arguments for constructing a SpacesBucketObject resource.
type SpacesBucketObjectArgs struct {
// The canned ACL to apply. DigitalOcean supports "private" and "public-read". (Defaults to "private".)
Acl pulumi.StringPtrInput
// The name of the bucket to put the file in.
Bucket pulumi.StringInput
// Specifies caching behavior along the request/reply chain Read [w3c cacheControl](http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9) for further details.
CacheControl pulumi.StringPtrInput
// Literal string value to use as the object content, which will be uploaded as UTF-8-encoded text.
Content pulumi.StringPtrInput
// Base64-encoded data that will be decoded and uploaded as raw bytes for the object content. This allows safely uploading non-UTF8 binary data, but is recommended only for small content such as the result of the `gzipbase64` function with small text strings. For larger objects, use `source` to stream the content from a disk file.
ContentBase64 pulumi.StringPtrInput
// Specifies presentational information for the object. Read [w3c contentDisposition](http://www.w3.org/Protocols/rfc2616/rfc2616-sec19.html#sec19.5.1) for further information.
ContentDisposition pulumi.StringPtrInput
// Specifies what content encodings have been applied to the object and thus what decoding mechanisms must be applied to obtain the media-type referenced by the Content-Type header field. Read [w3c content encoding](http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.11) for further information.
ContentEncoding pulumi.StringPtrInput
// The language the content is in e.g. en-US or en-GB.
ContentLanguage pulumi.StringPtrInput
// A standard MIME type describing the format of the object data, e.g. application/octet-stream. All Valid MIME Types are valid for this input.
ContentType pulumi.StringPtrInput
// Used to trigger updates.
Etag pulumi.StringPtrInput
// Allow the object to be deleted by removing any legal hold on any object version.
// Default is `false`. This value should be set to `true` only if the bucket has S3 object lock enabled.
ForceDestroy pulumi.BoolPtrInput
// The name of the object once it is in the bucket.
Key pulumi.StringInput
// A mapping of keys/values to provision metadata (will be automatically prefixed by `x-amz-meta-`, note that only lowercase label are currently supported by the AWS Go API).
Metadata pulumi.StringMapInput
// The region where the bucket resides (Defaults to `nyc3`)
Region pulumi.StringInput
// The path to a file that will be read and uploaded as raw bytes for the object content.
Source pulumi.StringPtrInput
// Specifies a target URL for [website redirect](http://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html).
WebsiteRedirect pulumi.StringPtrInput
}
func (SpacesBucketObjectArgs) ElementType() reflect.Type {
return reflect.TypeOf((*spacesBucketObjectArgs)(nil)).Elem()
}
type SpacesBucketObjectInput interface {
pulumi.Input
ToSpacesBucketObjectOutput() SpacesBucketObjectOutput
ToSpacesBucketObjectOutputWithContext(ctx context.Context) SpacesBucketObjectOutput
}
func (*SpacesBucketObject) ElementType() reflect.Type {
return reflect.TypeOf((*SpacesBucketObject)(nil))
}
func (i *SpacesBucketObject) ToSpacesBucketObjectOutput() SpacesBucketObjectOutput {
return i.ToSpacesBucketObjectOutputWithContext(context.Background())
}
func (i *SpacesBucketObject) ToSpacesBucketObjectOutputWithContext(ctx context.Context) SpacesBucketObjectOutput {
return pulumi.ToOutputWithContext(ctx, i).(SpacesBucketObjectOutput)
}
func (i *SpacesBucketObject) ToSpacesBucketObjectPtrOutput() SpacesBucketObjectPtrOutput {
return i.ToSpacesBucketObjectPtrOutputWithContext(context.Background())
}
func (i *SpacesBucketObject) ToSpacesBucketObjectPtrOutputWithContext(ctx context.Context) SpacesBucketObjectPtrOutput {
return pulumi.ToOutputWithContext(ctx, i).(SpacesBucketObjectPtrOutput)
}
type SpacesBucketObjectPtrInput interface {
pulumi.Input
ToSpacesBucketObjectPtrOutput() SpacesBucketObjectPtrOutput
ToSpacesBucketObjectPtrOutputWithContext(ctx context.Context) SpacesBucketObjectPtrOutput
}
type spacesBucketObjectPtrType SpacesBucketObjectArgs
func (*spacesBucketObjectPtrType) ElementType() reflect.Type {
return reflect.TypeOf((**SpacesBucketObject)(nil))
}
func (i *spacesBucketObjectPtrType) ToSpacesBucketObjectPtrOutput() SpacesBucketObjectPtrOutput {
return i.ToSpacesBucketObjectPtrOutputWithContext(context.Background())
}
func (i *spacesBucketObjectPtrType) ToSpacesBucketObjectPtrOutputWithContext(ctx context.Context) SpacesBucketObjectPtrOutput {
return pulumi.ToOutputWithContext(ctx, i).(SpacesBucketObjectPtrOutput)
}
// SpacesBucketObjectArrayInput is an input type that accepts SpacesBucketObjectArray and SpacesBucketObjectArrayOutput values.
// You can construct a concrete instance of `SpacesBucketObjectArrayInput` via:
//
// SpacesBucketObjectArray{ SpacesBucketObjectArgs{...} }
type SpacesBucketObjectArrayInput interface {
pulumi.Input
ToSpacesBucketObjectArrayOutput() SpacesBucketObjectArrayOutput
ToSpacesBucketObjectArrayOutputWithContext(context.Context) SpacesBucketObjectArrayOutput
}
type SpacesBucketObjectArray []SpacesBucketObjectInput
func (SpacesBucketObjectArray) ElementType() reflect.Type {
return reflect.TypeOf(([]*SpacesBucketObject)(nil))
}
func (i SpacesBucketObjectArray) ToSpacesBucketObjectArrayOutput() SpacesBucketObjectArrayOutput {
return i.ToSpacesBucketObjectArrayOutputWithContext(context.Background())
}
func (i SpacesBucketObjectArray) ToSpacesBucketObjectArrayOutputWithContext(ctx context.Context) SpacesBucketObjectArrayOutput {
return pulumi.ToOutputWithContext(ctx, i).(SpacesBucketObjectArrayOutput)
}
// SpacesBucketObjectMapInput is an input type that accepts SpacesBucketObjectMap and SpacesBucketObjectMapOutput values.
// You can construct a concrete instance of `SpacesBucketObjectMapInput` via:
//
// SpacesBucketObjectMap{ "key": SpacesBucketObjectArgs{...} }
type SpacesBucketObjectMapInput interface {
pulumi.Input
ToSpacesBucketObjectMapOutput() SpacesBucketObjectMapOutput
ToSpacesBucketObjectMapOutputWithContext(context.Context) SpacesBucketObjectMapOutput
}
type SpacesBucketObjectMap map[string]SpacesBucketObjectInput
func (SpacesBucketObjectMap) ElementType() reflect.Type {
return reflect.TypeOf((map[string]*SpacesBucketObject)(nil))
}
func (i SpacesBucketObjectMap) ToSpacesBucketObjectMapOutput() SpacesBucketObjectMapOutput {
return i.ToSpacesBucketObjectMapOutputWithContext(context.Background())
}
func (i SpacesBucketObjectMap) ToSpacesBucketObjectMapOutputWithContext(ctx context.Context) SpacesBucketObjectMapOutput {
return pulumi.ToOutputWithContext(ctx, i).(SpacesBucketObjectMapOutput)
}
type SpacesBucketObjectOutput struct {
*pulumi.OutputState
}
func (SpacesBucketObjectOutput) ElementType() reflect.Type {
return reflect.TypeOf((*SpacesBucketObject)(nil))
}
func (o SpacesBucketObjectOutput) ToSpacesBucketObjectOutput() SpacesBucketObjectOutput {
return o
}
func (o SpacesBucketObjectOutput) ToSpacesBucketObjectOutputWithContext(ctx context.Context) SpacesBucketObjectOutput {
return o
}
func (o SpacesBucketObjectOutput) ToSpacesBucketObjectPtrOutput() SpacesBucketObjectPtrOutput {
return o.ToSpacesBucketObjectPtrOutputWithContext(context.Background())
}
func (o SpacesBucketObjectOutput) ToSpacesBucketObjectPtrOutputWithContext(ctx context.Context) SpacesBucketObjectPtrOutput {
return o.ApplyT(func(v SpacesBucketObject) *SpacesBucketObject {
return &v
}).(SpacesBucketObjectPtrOutput)
}
type SpacesBucketObjectPtrOutput struct {
*pulumi.OutputState
}
func (SpacesBucketObjectPtrOutput) ElementType() reflect.Type {
return reflect.TypeOf((**SpacesBucketObject)(nil))
}
func (o SpacesBucketObjectPtrOutput) ToSpacesBucketObjectPtrOutput() SpacesBucketObjectPtrOutput {
return o
}
func (o SpacesBucketObjectPtrOutput) ToSpacesBucketObjectPtrOutputWithContext(ctx context.Context) SpacesBucketObjectPtrOutput {
return o
}
type SpacesBucketObjectArrayOutput struct{ *pulumi.OutputState }
func (SpacesBucketObjectArrayOutput) ElementType() reflect.Type {
return reflect.TypeOf((*[]SpacesBucketObject)(nil))
}
func (o SpacesBucketObjectArrayOutput) ToSpacesBucketObjectArrayOutput() SpacesBucketObjectArrayOutput {
return o
}
func (o SpacesBucketObjectArrayOutput) ToSpacesBucketObjectArrayOutputWithContext(ctx context.Context) SpacesBucketObjectArrayOutput {
return o
}
func (o SpacesBucketObjectArrayOutput) Index(i pulumi.IntInput) SpacesBucketObjectOutput {
return pulumi.All(o, i).ApplyT(func(vs []interface{}) SpacesBucketObject {
return vs[0].([]SpacesBucketObject)[vs[1].(int)]
}).(SpacesBucketObjectOutput)
}
type SpacesBucketObjectMapOutput struct{ *pulumi.OutputState }
func (SpacesBucketObjectMapOutput) ElementType() reflect.Type {
return reflect.TypeOf((*map[string]SpacesBucketObject)(nil))
}
func (o SpacesBucketObjectMapOutput) ToSpacesBucketObjectMapOutput() SpacesBucketObjectMapOutput {
return o
}
func (o SpacesBucketObjectMapOutput) ToSpacesBucketObjectMapOutputWithContext(ctx context.Context) SpacesBucketObjectMapOutput {
return o
}
func (o SpacesBucketObjectMapOutput) MapIndex(k pulumi.StringInput) SpacesBucketObjectOutput {
return pulumi.All(o, k).ApplyT(func(vs []interface{}) SpacesBucketObject {
return vs[0].(map[string]SpacesBucketObject)[vs[1].(string)]
}).(SpacesBucketObjectOutput)
}
func init() {
pulumi.RegisterOutputType(SpacesBucketObjectOutput{})
pulumi.RegisterOutputType(SpacesBucketObjectPtrOutput{})
pulumi.RegisterOutputType(SpacesBucketObjectArrayOutput{})
pulumi.RegisterOutputType(SpacesBucketObjectMapOutput{})
}
| {
return nil, err
} |
trans-boat-with-nine.e8b346c1269c1e2163f4.js | !function(e){var t={};function | (r){if(t[r])return t[r].exports;var o=t[r]={i:r,l:!1,exports:{}};return e[r].call(o.exports,o,o.exports,n),o.l=!0,o.exports}n.m=e,n.c=t,n.d=function(e,t,r){n.o(e,t)||Object.defineProperty(e,t,{enumerable:!0,get:r})},n.r=function(e){"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(e,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(e,"__esModule",{value:!0})},n.t=function(e,t){if(1&t&&(e=n(e)),8&t)return e;if(4&t&&"object"==typeof e&&e&&e.__esModule)return e;var r=Object.create(null);if(n.r(r),Object.defineProperty(r,"default",{enumerable:!0,value:e}),2&t&&"string"!=typeof e)for(var o in e)n.d(r,o,function(t){return e[t]}.bind(null,o));return r},n.n=function(e){var t=e&&e.__esModule?function(){return e.default}:function(){return e};return n.d(t,"a",t),t},n.o=function(e,t){return Object.prototype.hasOwnProperty.call(e,t)},n.p="",n(n.s=334)}({334:function(e,t,n){"use strict";t.__esModule=!0,t.default={title:"Trans-boat with nine",life:[[0,0,0,0,0,0,0,0],[0,1,1,0,0,0,0,0],[0,1,0,1,0,0,0,0],[0,0,1,0,0,0,0,0],[0,0,0,1,1,1,0,0],[0,0,0,0,0,0,1,0],[0,0,0,0,0,1,1,0],[0,0,0,0,0,0,0,0]]}}}); | n |
service.go | package http_service
import (
"context"
"github.com/capella-pw/queue/cluster"
"github.com/capella-pw/queue/cluster/cap"
"github.com/valyala/fasthttp"
)
// FastHTTPHandler - fasthttp fast http handler
func FastHTTPHandler(sc *cluster.ClusterService,
prepareCtxGenerator func() (ctx context.Context, doOnCompete func()),
addFunc []cluster.AdditionalCallFuncInClusterFunc,
) func(ctx *fasthttp.RequestCtx) {
return func(ctx *fasthttp.RequestCtx) {
decompressAlg := ""
ctx.Request.Header.VisitAll(func(key []byte, value []byte) {
if string(key) == cap.CompressTypeHeader |
})
ctxPrep, doOnCompete := prepareCtxGenerator()
defer doOnCompete()
bodyOut, outContentType, htmlCode := sc.Call(ctxPrep, decompressAlg, ctx.Request.Body(), addFunc)
ctx.Response.SetStatusCode(htmlCode)
ctx.Response.Header.Add(cap.CompressTypeHeader, outContentType)
ctx.Response.SetBody(bodyOut)
}
}
| {
decompressAlg = string(value)
} |
timer.py | import time
class | :
"""Simple Timer"""
def __init__(self):
self.start = time.perf_counter()
def end(self, precision: int = 3) -> str:
return '%.{}f'.format(precision) % (time.perf_counter() - self.start)
| Timer |
index.tsx | /*
* Copyright © 2019 Cask Data, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
import React from 'react';
import { objectQuery } from 'services/helpers';
import ToggleSwitch from 'components/ToggleSwitch';
import { IWidgetProps } from 'components/AbstractWidget';
import { WIDGET_PROPTYPES } from 'components/AbstractWidget/constants';
import withStyles, { WithStyles, StyleRules } from '@material-ui/core/styles/withStyles';
import ThemeWrapper from 'components/ThemeWrapper';
const styles = (): StyleRules => {
return {
root: {
paddingTop: '7px',
},
};
};
interface IToggle {
label: string;
value: string;
}
interface IToggleWidgetProps {
on: IToggle;
off: IToggle;
default?: string;
}
interface IToggleToggleSwitchProps
extends IWidgetProps<IToggleWidgetProps>,
WithStyles<typeof styles> {}
const ToggleSwitchWidgetView: React.FC<IToggleToggleSwitchProps> = ({
widgetProps,
value,
onChange,
disabled,
classes,
dataCy,
}) => {
const onValue = objectQuery(widgetProps, 'on', 'value') || 'on';
const offValue = objectQuery(widgetProps, 'off', 'value') || 'off';
const onLabel = objectQuery(widgetProps, 'on', 'label') || 'On';
const offLabel = objectQuery(widgetProps, 'off', 'label') || 'Off';
const isOn = value === onValue;
function toggleSwitch() {
onChange(isOn ? offValue : onValue);
}
return (
<div className={classes.root}>
<ToggleSwitch
isOn={isOn}
onToggle={toggleSwitch}
disabled={disabled}
onLabel={onLabel}
offLabel={offLabel}
dataCy={dataCy}
/>
</div>
);
};
const StyledToggleSwitchWidget = withStyles(styles)(ToggleSwitchWidgetView);
function T | props) {
return (
<ThemeWrapper>
<StyledToggleSwitchWidget {...props} />
</ThemeWrapper>
);
}
export default ToggleSwitchWidget;
(ToggleSwitchWidget as any).propTypes = WIDGET_PROPTYPES;
| oggleSwitchWidget( |
t100.go | // Copyright 2022 Elapse and contributors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package tlv
import (
"github.com/elap5e/penguin/pkg/bytes"
)
type T100 struct {
*TLV
appID uint64
subAppID uint64
acVer uint32
sigMap uint32
ssoVer uint32
}
func NewT100(appID, subAppID uint64, acVer, sigMap, ssoVer uint32) *T100 |
func (t *T100) ReadFrom(b *bytes.Buffer) error {
if err := t.TLV.ReadFrom(b); err != nil {
return err
}
v, err := t.TLV.GetValue()
if err != nil {
return err
}
if _, err = v.ReadUint16(); err != nil {
return err
}
if _, err = v.ReadUint32(); err != nil {
return err
}
appID, err := v.ReadUint32()
if err != nil {
return err
}
t.appID = uint64(appID)
subAppID, err := v.ReadUint32()
if err != nil {
return err
}
t.subAppID = uint64(subAppID)
if t.acVer, err = v.ReadUint32(); err != nil {
return err
}
if t.sigMap, err = v.ReadUint32(); err != nil {
return err
}
return nil
}
func (t *T100) WriteTo(b *bytes.Buffer) error {
v := bytes.NewBuffer([]byte{})
v.WriteUint16(0x0001)
v.WriteUint32(t.ssoVer)
v.WriteUint32(uint32(t.appID))
v.WriteUint32(uint32(t.subAppID))
v.WriteUint32(t.acVer)
v.WriteUint32(t.sigMap)
t.TLV.SetValue(v)
return t.TLV.WriteTo(b)
}
| {
return &T100{
TLV: NewTLV(0x0100, 0x0000, nil),
appID: appID,
subAppID: subAppID,
acVer: acVer,
sigMap: sigMap,
ssoVer: ssoVer,
}
} |
dna.rs | use std::fs;
fn count(bases: String) -> Box<[usize; 4]> {
let mut counts = [0usize; 4];
bases.trim().chars().for_each(|c| match c {
'A' => counts[0] += 1,
'C' => counts[1] += 1,
'G' => counts[2] += 1,
'T' => counts[3] += 1,
_ => panic!("Unrecognized nucleotide {}", c),
});
Box::new(counts)
}
pub fn solve() -> std::io::Result<()> {
let input = fs::read_to_string("inputs/rosalind_dna.txt")?;
let output = *count(input);
println!("{:?}", output);
Ok(())
}
| let output = [20, 12, 17, 21];
assert_eq!(*count(input), output);
} | #[test]
fn sample() {
let input = "AGCTTTTCATTCTGACTGCAACGGGCAATATGTCTCTGTGTGGATTAAAAAAAGAGTGTCTGATAGCAGC".to_string(); |
autogrow.ts | export function autogrow() {
document.querySelectorAll('[data-autogrow]').forEach(function (element: any) {
element.style.boxSizing = 'border-box'
const offset = element.offsetHeight - element.clientHeight
element.addEventListener('input', function (event) {
event.target.style.height = 'auto' | element.removeAttribute('data-autogrow')
})
} | event.target.style.height = event.target.scrollHeight + offset + 'px'
}) |
main.rs | // Copyright (c) 2014, 2015 Robert Clipsham <[email protected]>
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/// This example shows a basic packet logger using libpnet
extern crate pnet;
use std::env;
use std::net::IpAddr;
use pnet::packet::Packet;
use pnet::packet::ethernet::{EthernetPacket, EtherTypes};
use pnet::packet::ip::{IpNextHeaderProtocol, IpNextHeaderProtocols};
use pnet::packet::ipv4::Ipv4Packet;
use pnet::packet::ipv6::Ipv6Packet;
use pnet::packet::udp::UdpPacket;
use pnet::packet::tcp::TcpPacket;
use pnet::packet::arp::ArpPacket;
use pnet::packet::icmp::{IcmpPacket, echo_reply, echo_request, icmp_types};
use pnet::datalink::{self, NetworkInterface};
fn handle_udp_packet(interface_name: &str, source: IpAddr, destination: IpAddr, packet: &[u8]) {
let udp = UdpPacket::new(packet);
if let Some(udp) = udp {
println!("[{}]: UDP Packet: {}:{} > {}:{}; length: {}", interface_name, source,
udp.get_source(), destination, udp.get_destination(), udp.get_length());
} else {
println!("[{}]: Malformed UDP Packet", interface_name);
}
}
fn handle_icmp_packet(interface_name: &str, source: IpAddr, destination: IpAddr, packet: &[u8]) {
let icmp_packet = IcmpPacket::new(packet);
if let Some(icmp_packet) = icmp_packet {
match icmp_packet.get_icmp_type() {
icmp_types::EchoReply => {
let echo_reply_packet = echo_reply::EchoReplyPacket::new(packet).unwrap();
println!("[{}]: ICMP echo reply {} -> {} (seq={:?}, id={:?})",
interface_name,
source,
destination,
echo_reply_packet.get_sequence_number(),
echo_reply_packet.get_identifier());
},
icmp_types::EchoRequest => {
let echo_request_packet = echo_request::EchoRequestPacket::new(packet).unwrap();
println!("[{}]: ICMP echo request {} -> {} (seq={:?}, id={:?})",
interface_name,
source,
destination,
echo_request_packet.get_sequence_number(),
echo_request_packet.get_identifier());
},
_ => println!("[{}]: ICMP packet {} -> {} (type={:?})",
interface_name,
source,
destination,
icmp_packet.get_icmp_type()),
}
} else {
println!("[{}]: Malformed ICMP Packet", interface_name);
}
}
fn handle_tcp_packet(interface_name: &str, source: IpAddr, destination: IpAddr, packet: &[u8]) |
fn handle_transport_protocol(interface_name: &str, source: IpAddr, destination: IpAddr,
protocol: IpNextHeaderProtocol, packet: &[u8]) {
match protocol {
IpNextHeaderProtocols::Udp => handle_udp_packet(interface_name, source, destination, packet),
IpNextHeaderProtocols::Tcp => handle_tcp_packet(interface_name, source, destination, packet),
IpNextHeaderProtocols::Icmp => handle_icmp_packet(interface_name, source, destination, packet),
_ => println!("[{}]: Unknown {} packet: {} > {}; protocol: {:?} length: {}",
interface_name,
match source { IpAddr::V4(..) => "IPv4", _ => "IPv6" },
source,
destination,
protocol,
packet.len())
}
}
fn handle_ipv4_packet(interface_name: &str, ethernet: &EthernetPacket) {
let header = Ipv4Packet::new(ethernet.payload());
if let Some(header) = header {
handle_transport_protocol(interface_name,
IpAddr::V4(header.get_source()),
IpAddr::V4(header.get_destination()),
header.get_next_level_protocol(),
header.payload());
} else {
println!("[{}]: Malformed IPv4 Packet", interface_name);
}
}
fn handle_ipv6_packet(interface_name: &str, ethernet: &EthernetPacket) {
let header = Ipv6Packet::new(ethernet.payload());
if let Some(header) = header {
handle_transport_protocol(interface_name,
IpAddr::V6(header.get_source()),
IpAddr::V6(header.get_destination()),
header.get_next_header(),
header.payload());
} else {
println!("[{}]: Malformed IPv6 Packet", interface_name);
}
}
fn handle_arp_packet(interface_name: &str, ethernet: &EthernetPacket) {
let header = ArpPacket::new(ethernet.payload());
if let Some(header) = header {
println!("[{}]: ARP packet: {}({}) > {}({}); operation: {:?}",
interface_name,
ethernet.get_source(),
header.get_sender_proto_addr(),
ethernet.get_destination(),
header.get_target_proto_addr(),
header.get_operation());
} else {
println!("[{}]: Malformed ARP Packet", interface_name);
}
}
fn handle_packet(interface_name: &str, ethernet: &EthernetPacket) {
match ethernet.get_ethertype() {
EtherTypes::Ipv4 => handle_ipv4_packet(interface_name, ethernet),
EtherTypes::Ipv6 => handle_ipv6_packet(interface_name, ethernet),
EtherTypes::Arp => handle_arp_packet(interface_name, ethernet),
_ => println!("[{}]: Unknown packet: {} > {}; ethertype: {:?} length: {}",
interface_name,
ethernet.get_source(),
ethernet.get_destination(),
ethernet.get_ethertype(),
ethernet.packet().len())
}
}
fn main() {
use pnet::datalink::Channel::Ethernet;
let iface_name = env::args().nth(1).unwrap();
let interface_names_match = |iface: &NetworkInterface| iface.name == iface_name;
// Find the network interface with the provided name
let interfaces = datalink::interfaces();
let interface = interfaces.into_iter()
.filter(interface_names_match)
.next()
.unwrap();
// Create a channel to receive on
let (_, mut rx) = match datalink::channel(&interface, Default::default()) {
Ok(Ethernet(tx, rx)) => (tx, rx),
Ok(_) => panic!("packetdump: unhandled channel type: {}"),
Err(e) => panic!("packetdump: unable to create channel: {}", e),
};
let mut iter = rx.iter();
loop {
match iter.next() {
Ok(packet) => handle_packet(&interface.name[..], &packet),
Err(e) => panic!("packetdump: unable to receive packet: {}", e)
}
}
}
| {
let tcp = TcpPacket::new(packet);
if let Some(tcp) = tcp {
println!("[{}]: TCP Packet: {}:{} > {}:{}; length: {}", interface_name, source,
tcp.get_source(), destination, tcp.get_destination(), packet.len());
} else {
println!("[{}]: Malformed TCP Packet", interface_name);
}
} |
base.go | package client
import (
"errors"
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute"
"github.com/Azure/azure-sdk-for-go/services/containerservice/mgmt/2017-09-30/containerservice"
"github.com/Azure/azure-sdk-for-go/services/resources/mgmt/2016-06-01/subscriptions"
"github.com/banzaicloud/azure-aks-client/cluster"
"github.com/banzaicloud/banzai-types/components/azure"
"github.com/banzaicloud/banzai-types/constants"
"net/http"
"time"
)
type ClusterManager interface {
CreateOrUpdate(request *cluster.CreateClusterRequest, managedCluster *containerservice.ManagedCluster) (*containerservice.ManagedCluster, error)
Delete(resourceGroup, name string) (*http.Response, error)
Get(resourceGroup, name string) (containerservice.ManagedCluster, error)
List() ([]containerservice.ManagedCluster, error)
GetAccessProfiles(resourceGroup, name, roleName string) (containerservice.ManagedClusterAccessProfile, error)
ListLocations() (subscriptions.LocationListResult, error)
ListVmSizes(location string) (result compute.VirtualMachineSizeListResult, err error)
ListVersions(locations, resourceType string) (result containerservice.OrchestratorVersionProfileListResult, err error)
GetClientId() string
GetClientSecret() string
LogDebug(args ...interface{})
LogInfo(args ...interface{})
LogWarn(args ...interface{})
LogError(args ...interface{})
LogFatal(args ...interface{})
LogPanic(args ...interface{})
LogDebugf(format string, args ...interface{})
LogInfof(format string, args ...interface{})
LogWarnf(format string, args ...interface{})
LogErrorf(format string, args ...interface{})
LogFatalf(format string, args ...interface{})
LogPanicf(format string, args ...interface{})
}
// CreateUpdateCluster creates or updates a managed cluster with the specified configuration for agents and Kubernetes
// version.
func CreateUpdateCluster(manager ClusterManager, request *cluster.CreateClusterRequest) (*azure.ResponseWithValue, error) {
if request == nil {
return nil, errors.New("Empty request")
}
manager.LogInfo("Start create/update cluster")
manager.LogDebugf("CreateRequest: %v", request)
manager.LogInfo("Validate cluster create/update request")
if err := request.Validate(); err != nil {
return nil, err
}
manager.LogInfo("Validate passed")
managedCluster := cluster.GetManagedCluster(request, manager.GetClientId(), manager.GetClientSecret())
manager.LogDebugf("Created managed cluster model - %#v", &managedCluster)
manager.LogDebug("Send request to azure")
result, err := manager.CreateOrUpdate(request, managedCluster)
if err != nil {
return nil, err
}
manager.LogInfo("Create response model")
return &azure.ResponseWithValue{
StatusCode: result.Response.StatusCode,
Value: *convertManagedClusterToValue(result),
}, nil
}
// DeleteCluster deletes the managed cluster with a specified resource group and name.
func DeleteCluster(manager ClusterManager, name string, resourceGroup string) error {
manager.LogInfof("Start deleting cluster %s in %s resource group", name, resourceGroup)
manager.LogDebug("Send request to azure")
response, err := manager.Delete(resourceGroup, name)
if err != nil {
return err
}
manager.LogInfof("Status code: %d", response.StatusCode)
return nil
}
// PollingCluster polls until the cluster ready or an error occurs
func PollingCluster(manager ClusterManager, name string, resourceGroup string) (*azure.ResponseWithValue, error) {
const stageSuccess = "Succeeded"
const stageFailed = "Failed"
const waitInSeconds = 10
manager.LogInfof("Start polling cluster: %s [%s]", name, resourceGroup)
manager.LogDebug("Start loop")
result := azure.ResponseWithValue{}
for isReady := false; !isReady; {
manager.LogDebug("Send request to azure")
managedCluster, err := manager.Get(resourceGroup, name)
if err != nil {
return nil, err
}
statusCode := managedCluster.StatusCode
manager.LogInfof("Cluster polling status code: %d", statusCode)
convertManagedClusterToValue(&managedCluster)
switch statusCode {
case http.StatusOK:
response := convertManagedClusterToValue(&managedCluster)
stage := *managedCluster.ProvisioningState
manager.LogInfof("Cluster stage is %s", stage)
switch stage {
case stageSuccess:
isReady = true
result.Update(http.StatusCreated, *response)
case stageFailed:
return nil, constants.ErrorAzureCLusterStageFailed
default:
manager.LogInfo("Waiting for cluster ready...")
time.Sleep(waitInSeconds * time.Second)
}
default:
return nil, errors.New("status code is not OK")
}
}
return &result, nil
}
// GetCluster gets the details of the managed cluster with a specified resource group and name.
func GetCluster(manager ClusterManager, name string, resourceGroup string) (*azure.ResponseWithValue, error) {
manager.LogInfof("Start getting aks cluster: %s [%s]", name, resourceGroup)
managedCluster, err := manager.Get(resourceGroup, name)
if err != nil {
return nil, err
}
manager.LogInfof("Status code: %d", managedCluster.StatusCode)
return &azure.ResponseWithValue{
StatusCode: managedCluster.StatusCode,
Value: *convertManagedClusterToValue(&managedCluster),
}, nil
}
// ListClusters gets a list of managed clusters in the specified subscription. The operation returns properties of each managed
// cluster.
func ListClusters(manager ClusterManager) (*azure.ListResponse, error) {
manager.LogInfo("Start listing clusters")
managedClusters, err := manager.List()
if err != nil {
return nil, err
}
manager.LogInfo("Create response model")
response := azure.ListResponse{StatusCode: http.StatusOK, Value: azure.Values{
Value: convertManagedClustersToValues(managedClusters),
}}
return &response, nil
}
// GetClusterConfig gets the given cluster kubeconfig
func GetClusterConfig(manager ClusterManager, name, resourceGroup, roleName string) (*azure.Config, error) {
manager.LogInfof("Start getting %s cluster's config in %s, role name: %s", name, resourceGroup, roleName)
manager.LogDebug("Send request to azure")
profile, err := manager.GetAccessProfiles(resourceGroup, name, roleName)
if err != nil {
return nil, err
}
manager.LogInfof("Status code: %d", profile.StatusCode)
manager.LogInfo("Create response model")
return &azure.Config{
Location: *profile.Location,
Name: *profile.Name,
Properties: struct {
KubeConfig string `json:"kubeConfig"`
}{
KubeConfig: string(*profile.KubeConfig),
},
}, nil
}
// GetLocations returns all the locations that are available for resource providers
func | (manager ClusterManager) ([]string, error) {
manager.LogInfo("Start listing locations")
resp, err := manager.ListLocations()
if err != nil {
return nil, err
}
var locations []string
for _, loc := range *resp.Value {
locations = append(locations, *loc.Name)
}
return locations, nil
}
// GetVmSizes lists all available virtual machine sizes for a subscription in a location.
func GetVmSizes(manager ClusterManager, location string) ([]string, error) {
manager.LogInfo("Start listing vm sizes")
resp, err := manager.ListVmSizes(location)
if err != nil {
return nil, err
}
var sizes []string
for _, vm := range *resp.Value {
sizes = append(sizes, *vm.Name)
}
return sizes, nil
}
// GetKubernetesVersions returns a list of supported kubernetes version in the specified subscription
func GetKubernetesVersions(manager ClusterManager, location string) ([]string, error) {
manager.LogInfo("Start listing Kubernetes versions")
resp, err := manager.ListVersions(location, string(compute.Kubernetes))
if err != nil {
return nil, err
}
var versions []string
for _, v := range *resp.OrchestratorVersionProfileProperties.Orchestrators {
versions = append(versions, *v.OrchestratorVersion)
}
return versions, nil
}
// convertManagedClustersToValues returns []Value with the managed clusters properties
func convertManagedClustersToValues(managedCluster []containerservice.ManagedCluster) []azure.Value {
var values []azure.Value
for _, mc := range managedCluster {
values = append(values, *convertManagedClusterToValue(&mc))
}
return values
}
// convertManagedClusterToValue returns Value with the ManagedCluster properties
func convertManagedClusterToValue(managedCluster *containerservice.ManagedCluster) *azure.Value {
var profiles []azure.Profile
if managedCluster.AgentPoolProfiles != nil {
for _, p := range *managedCluster.AgentPoolProfiles {
profiles = append(profiles, azure.Profile{
Name: *p.Name,
Count: int(*p.Count),
})
}
}
return &azure.Value{
Id: *managedCluster.ID,
Location: *managedCluster.Location,
Name: *managedCluster.Name,
Properties: azure.Properties{
ProvisioningState: *managedCluster.ProvisioningState,
AgentPoolProfiles: profiles,
Fqdn: *managedCluster.Fqdn,
},
}
}
| GetLocations |
board.module.ts | import { UsersModule } from './../users/users.module';
import { Module } from '@nestjs/common';
import { BoardGateway } from './board.gateway';
@Module({
providers: [BoardGateway],
imports: [UsersModule],
})
export class | {}
| BoardModule |
CSSCrypt.py | """
Clyde's Simple Shuffler Encryption
@Desc
This encryption algorthym is design for users to use their own keys to build
a unique encrypted output. It called shuffler as it uses the inputed key
to shuffle each character in the message, thus making it harder to crack.
I highly advise you to not use this for passwords. Paswords are secured by
hashing and not through encryption. Hashed values can't be decrypted where as
encryption can. Feel free to encrypt stuff for fun and use this as a learning
tool.
If you use this to encrypt something sensitive, use at your own discretion. I am
not responsible for messages you've created that's gotten cracked.
@author
Clyde Smets <[email protected]>
@license
MIT
"""
import re
from pathlib import Path
class encryption:
# Character values within the list is used to encode the message.
# Default file 'key/encoding.txt' uses base64, change it to whatever.
__encodingValues = []
# Pad identifier. Padding is used in encoding to fit the bit block size
__pad = ''
# The bit size helps determine the encoding index value by x num of binary
# bits. For example base64 is 6 - it grabs 6 bits to create a decimal for
# assigning that index value to a charater. 010011 => 15 => T
# The default value is assigned at the top of the file 'key/encoding.txt'
__bitSize = 0
def __init__ (self):
# Check if encoding file exists
encodingFilePath = 'key/encoding.txt'
encodingFile = Path(encodingFilePath)
if encodingFile.is_file():
lines = self.__readByLine(encodingFilePath)
self.__encodingValues = lines[1:-1]
self.__bitSize = int(lines[0])
self.__pad = lines[-1]
else:
raise Exception('encoding.txt is not found')
def encrypt (self, message, key):
# Encode the message
encoded = self.__encode(message)
# count number of encoding pads
padNum = encoded.count(self.__pad)
# remove and store the encoding padded values
pads = encoded[-padNum:]
encoded = encoded[:-padNum]
# Extend the key to cover the length of the encoding values
key = self.__resize(key, len(encoded))
encrypted = ''
# Shift the encoded values according to the key.
# Values can only shift from 0-9.
for i in range(len(encoded)):
shift = self.__shift(encoded[i], int(key[i]))
encrypted = encrypted + shift
# reattached padding to the encrypted output
encrypted = encrypted + pads
return encrypted
def decrypt (self, encrypted, key):
# Resize the key to the length of the encrypted message
key = self.__resize(key, len(encrypted))
# Count number of encoding pads
padNum = encrypted.count(self.__pad)
# Remove and store the encoding padded values
pads = encrypted[-padNum:]
encrypted = encrypted[:-padNum]
decrypted = ''
# unshift the encrypted message to be decoded using the key.
for i in range(len(encrypted)):
unshift = self.__unshift(encrypted[i], int(key[i]))
decrypted = decrypted + unshift
# re-append the padding
decrypted = decrypted + pads
# decode the message and return the decrypted result.
decoded = self.__decode(decrypted)
return decoded
# Resize the length of a string to match the amount.
def __resize (self, string, amount):
if len(string) < amount:
index = 0
for i in range(len(string), amount):
string = string + string[index]
index = index + 1
elif len(string) > amount: # if it's larger cut it
cutAmount = amount - len(string) # negative value
string = string[:cutAmount]
return string
def __encode (self, message):
encoded = ''
longBinary = ''
# Loop through characters in message to convert it to binary
for i in range(len(message)):
# Convert to hexadecimal
hexChar = format(ord(message[i]), "x")
# Convert hexadecimal to decimal
decimal = int(hexChar, 16)
# Convert decimal to binary
binary = '{0:08b}'.format(decimal)
longBinary += binary
# Encoding requires 24 bit blocks. So the long binary has to be split
# into bits of 24. If a block doesn't complete 24 bits, pad it!
# so that it does. e.g. '100110110101' => '100110110101000000000000'
blockSize = 24
blocks = []
counter = 0
block = ''
# build the blocks
for i in range(len(longBinary)):
if longBinary[i]:
if counter < blockSize:
block += longBinary[i]
counter = counter + 1
else:
counter = 0
blocks.append(block)
block = longBinary[i]
# append last remaining block if it has values
if len(block) > 0:
blocks.append(block)
# pad the last block
for i in range(len(blocks)):
if len(blocks[i]) < blockSize:
# append padded 0
size = blockSize - len(blocks[i])
for b in range(size):
blocks[i] = blocks[i] + '0'
# convert back to long binary
longBinary = ''.join(blocks)
# group binary values by bit size
grouped = self.__groupBinary(longBinary, self.__bitSize)
# Get the encoded character for the binary group. But it will
# require the binary to be converted to decimal to find the index
# position.
# Find the number of groups that is required to make a block
numOfGroups = blockSize // self.__bitSize
# Loop through, except for the last group. Since we also know that to
# create a group it needs at least one group of bits, thus we can forget
# that one (i.e. numOfGroups - 1)
for gi in range(len(grouped) - (numOfGroups - 1)):
eDecimal = int(grouped[gi], 2)
encoded += self.__encodingValues[eDecimal]
# Size of padding
padding = ''
for n in range(self.__bitSize):
padding += '0'
# Check to see the last remaining groups are padded, and if they are,
# assign them a padded value.
for lgi in range(numOfGroups - 1):
target = len(grouped) - (3 - lgi)
if grouped[target] == padding:
encoded += self.__pad
else:
eDecimal = int(grouped[target], 2)
encoded += self.__encodingValues[eDecimal]
return encoded
def __decode (self, message):
decoded = ''
longBinary = ''
pads = ''
# Size of padding
padding = ''
for n in range(self.__bitSize):
padding += '0'
# Loop through encoded message and return values as binary
for i in range(len(message)):
# Find position of char in index
index = 0
# Find the index values from the encoding key
for mi in range(len(self.__encodingValues)):
if message[i] == self.__encodingValues[mi]:
index = mi
break
# Check if the character is a padding value or not
if message[i] == self.__pad:
pads += padding
break
# Convert index to binary following bit amount
binaryFormat = '{0:0' + str(self.__bitSize) + 'b}'
binary = binaryFormat.format(index)
longBinary += binary
# Append padding to converted indexes
longBinary = longBinary + pads
# group binary values to divisable of 8
grouped = self.__groupBinary(longBinary, 8)
# Decode
for i in range(len(grouped)):
# Get decimal from binary
decimal = int(grouped[i], 2)
# Get hexadecimal from decimal
hexadecimal = hex(decimal).split('x')[1]
# Get character from hex
if (hexadecimal != '0'):
char = bytes.fromhex(hexadecimal).decode('utf-8')
decoded += char
return decoded
# Write to content to file
def __write (self, file, contents):
f = open(file, 'w')
f.write(contents)
f.closed()
# Read a file line by line and return it as a list
def __readByLine (self, file):
contents = []
with open(file) as line:
contents = line.read().splitlines()
return contents
# Return a list of binary values grouped by bit size
def | (self, binary, bitSize):
# group binary values by base number
grouped = re.findall('.{1,' + str(bitSize) + '}', binary)
# Fill the last value with any missing 0 - so groups are whole
# e.g. '01' will be changed to '000001' if bit size is 6
lastGroupSize = len(grouped[len(grouped) - 1])
if lastGroupSize < bitSize:
count = 0
amount = bitSize - lastGroupSize
while count < amount:
grouped[len(grouped) - 1] = '0' + grouped[len(grouped) - 1]
count = count + 1
return grouped
# Find the character in the list and find it's shifted value
def __shift (self, char, amount):
values = self.__encodingValues
index = self.__charPosition(char, values)
shift = index + amount
if shift < len(values):
return values[shift]
else:
remainder = len(self.__encodingValues) - (index + 1)
shift = (amount - remainder) - 1
return values[shift]
# Getting the original value unshifted value.
def __unshift (self, char, amount):
values = self.__encodingValues
index = self.__charPosition(char, values)
return values[index - amount]
# Return the index value of a matching character in a long string.
def __charPosition (self, char, string):
index = 0
# Get the index value of the character in the list
for i in range(len(string)):
if string[i] == char:
index = i
break
return index
| __groupBinary |
Batch_Service_operations_should_get_a_job_reference_successfully.nock.js | // This file has been autogenerated.
exports.setEnvironment = function() {
process.env['AZURE_BATCH_ACCOUNT'] = 'lchency4';
process.env['AZURE_BATCH_ENDPOINT'] = 'https://lchency4.westcentralus.batch.azure.com';
process.env['AZURE_SUBSCRIPTION_ID'] = '3ee7eaf5-6a2f-49fd-953f-d760b5ac2e05';
};
exports.scopes = [[function (nock) {
var result =
nock('http://lchency4.westcentralus.batch.azure.com:443')
.get('/jobs/HelloWorldJobNodeSDKTest?api-version=2018-08-01.7.0')
.reply(200, "{\r\n \"odata.metadata\":\"https://lchency4.westcentralus.batch.azure.com/$metadata#jobs/@Element\",\"id\":\"HelloWorldJobNodeSDKTest\",\"url\":\"https://lchency4.westcentralus.batch.azure.com/jobs/HelloWorldJobNodeSDKTest\",\"eTag\":\"0x8D607A4396C0D8C\",\"lastModified\":\"2018-08-21T20:25:23.8917516Z\",\"creationTime\":\"2018-08-21T20:25:23.5846713Z\",\"state\":\"active\",\"stateTransitionTime\":\"2018-08-21T20:25:23.6076746Z\",\"priority\":500,\"usesTaskDependencies\":false,\"constraints\":{\r\n \"maxWallClockTime\":\"P10675199DT2H48M5.4775807S\",\"maxTaskRetryCount\":3\r\n },\"poolInfo\":{\r\n \"poolId\":\"nodesdktestpool1\"\r\n },\"executionInfo\":{\r\n \"startTime\":\"2018-08-21T20:25:23.6076746Z\",\"poolId\":\"nodesdktestpool1\"\r\n },\"onAllTasksComplete\":\"noaction\",\"onTaskFailure\":\"noaction\"\r\n}", { 'transfer-encoding': 'chunked',
'content-type': 'application/json;odata=minimalmetadata',
'last-modified': 'Tue, 21 Aug 2018 20:25:23 GMT',
etag: '0x8D607A4396C0D8C',
server: 'Microsoft-HTTPAPI/2.0',
'request-id': '48237bb7-af49-462c-8793-11c154c658b9',
'strict-transport-security': 'max-age=31536000; includeSubDomains',
'x-content-type-options': 'nosniff',
dataserviceversion: '3.0',
date: 'Tue, 21 Aug 2018 20:31:01 GMT',
connection: 'close' });
return result; },
function (nock) {
var result =
nock('https://lchency4.westcentralus.batch.azure.com:443') | 'content-type': 'application/json;odata=minimalmetadata',
'last-modified': 'Tue, 21 Aug 2018 20:25:23 GMT',
etag: '0x8D607A4396C0D8C',
server: 'Microsoft-HTTPAPI/2.0',
'request-id': '48237bb7-af49-462c-8793-11c154c658b9',
'strict-transport-security': 'max-age=31536000; includeSubDomains',
'x-content-type-options': 'nosniff',
dataserviceversion: '3.0',
date: 'Tue, 21 Aug 2018 20:31:01 GMT',
connection: 'close' });
return result; }]]; | .get('/jobs/HelloWorldJobNodeSDKTest?api-version=2018-08-01.7.0')
.reply(200, "{\r\n \"odata.metadata\":\"https://lchency4.westcentralus.batch.azure.com/$metadata#jobs/@Element\",\"id\":\"HelloWorldJobNodeSDKTest\",\"url\":\"https://lchency4.westcentralus.batch.azure.com/jobs/HelloWorldJobNodeSDKTest\",\"eTag\":\"0x8D607A4396C0D8C\",\"lastModified\":\"2018-08-21T20:25:23.8917516Z\",\"creationTime\":\"2018-08-21T20:25:23.5846713Z\",\"state\":\"active\",\"stateTransitionTime\":\"2018-08-21T20:25:23.6076746Z\",\"priority\":500,\"usesTaskDependencies\":false,\"constraints\":{\r\n \"maxWallClockTime\":\"P10675199DT2H48M5.4775807S\",\"maxTaskRetryCount\":3\r\n },\"poolInfo\":{\r\n \"poolId\":\"nodesdktestpool1\"\r\n },\"executionInfo\":{\r\n \"startTime\":\"2018-08-21T20:25:23.6076746Z\",\"poolId\":\"nodesdktestpool1\"\r\n },\"onAllTasksComplete\":\"noaction\",\"onTaskFailure\":\"noaction\"\r\n}", { 'transfer-encoding': 'chunked', |
ajax.js | $( document ).ready(function() {
$( document ).ajaxError(function( event, jqXHR ) {
if ( 403 === jqXHR.status ) {
window.location.reload();
} | });
}); | |
product_group_id_test.go | package validate
// NOTE: this file is generated via 'go:generate' - manual changes will be overwritten
import "testing"
func | (t *testing.T) {
cases := []struct {
Input string
Valid bool
}{
{
// empty
Input: "",
Valid: false,
},
{
// missing SubscriptionId
Input: "/",
Valid: false,
},
{
// missing value for SubscriptionId
Input: "/subscriptions/",
Valid: false,
},
{
// missing ResourceGroup
Input: "/subscriptions/12345678-1234-9876-4563-123456789012/",
Valid: false,
},
{
// missing value for ResourceGroup
Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/",
Valid: false,
},
{
// missing ServiceName
Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.ApiManagement/",
Valid: false,
},
{
// missing value for ServiceName
Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.ApiManagement/service/",
Valid: false,
},
{
// missing ProductName
Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.ApiManagement/service/service1/",
Valid: false,
},
{
// missing value for ProductName
Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.ApiManagement/service/service1/products/",
Valid: false,
},
{
// missing GroupName
Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.ApiManagement/service/service1/products/product1/",
Valid: false,
},
{
// missing value for GroupName
Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.ApiManagement/service/service1/products/product1/groups/",
Valid: false,
},
{
// valid
Input: "/subscriptions/12345678-1234-9876-4563-123456789012/resourceGroups/resGroup1/providers/Microsoft.ApiManagement/service/service1/products/product1/groups/group1",
Valid: true,
},
{
// upper-cased
Input: "/SUBSCRIPTIONS/12345678-1234-9876-4563-123456789012/RESOURCEGROUPS/RESGROUP1/PROVIDERS/MICROSOFT.APIMANAGEMENT/SERVICE/SERVICE1/PRODUCTS/PRODUCT1/GROUPS/GROUP1",
Valid: false,
},
}
for _, tc := range cases {
t.Logf("[DEBUG] Testing Value %s", tc.Input)
_, errors := ProductGroupID(tc.Input, "test")
valid := len(errors) == 0
if tc.Valid != valid {
t.Fatalf("Expected %t but got %t", tc.Valid, valid)
}
}
}
| TestProductGroupID |
logging.go | package visibility
import (
"strings"
log "github.com/sirupsen/logrus"
graylog "gopkg.in/gemnasium/logrus-graylog-hook.v2"
)
// ShipLogging configures hooks used to ship logs to ELK.
func ShipLogging(mode, address string) {
loggingConfig := make(map[string]interface{})
loggingConfig["application"] = "statusbay"
loggingConfig["mode"] = mode
hook := graylog.NewGraylogHook(address, loggingConfig)
log.SetFormatter(&log.TextFormatter{FullTimestamp: true})
log.AddHook(hook)
}
// SetLoggingLevel sets the logging level to the specified string
func SetLoggingLevel(level string) | {
level = strings.ToLower(level)
log.WithFields(log.Fields{"level": level}).Warn("setting logging level")
switch level {
case "debug":
log.SetLevel(log.DebugLevel)
case "info":
log.SetLevel(log.InfoLevel)
case "warn", "warning":
log.SetLevel(log.WarnLevel)
case "error":
log.SetLevel(log.ErrorLevel)
case "fatal":
log.SetLevel(log.FatalLevel)
case "panic":
log.SetLevel(log.PanicLevel)
default:
log.WithFields(log.Fields{"level": level}).Warn("invalid log level, not setting")
}
} |
|
foreign_items.rs | use rustc::ty::layout::{Align, LayoutOf, Size};
use rustc::hir::def_id::DefId;
use rustc::mir;
use syntax::attr;
use syntax::symbol::sym;
use crate::*;
impl<'mir, 'tcx> EvalContextExt<'mir, 'tcx> for crate::MiriEvalContext<'mir, 'tcx> {}
pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx> {
/// Returns the minimum alignment for the target architecture for allocations of the given size.
fn min_align(&self, size: u64, kind: MiriMemoryKind) -> Align {
let this = self.eval_context_ref();
// List taken from `libstd/sys_common/alloc.rs`.
let min_align = match this.tcx.tcx.sess.target.target.arch.as_str() {
"x86" | "arm" | "mips" | "powerpc" | "powerpc64" | "asmjs" | "wasm32" => 8,
"x86_64" | "aarch64" | "mips64" | "s390x" | "sparc64" => 16,
arch => bug!("Unsupported target architecture: {}", arch),
};
// Windows always aligns, even small allocations.
// Source: <https://support.microsoft.com/en-us/help/286470/how-to-use-pageheap-exe-in-windows-xp-windows-2000-and-windows-server>
// But jemalloc does not, so for the C heap we only align if the allocation is sufficiently big.
if kind == MiriMemoryKind::WinHeap || size >= min_align {
return Align::from_bytes(min_align).unwrap();
}
// We have `size < min_align`. Round `size` *down* to the next power of two and use that.
fn prev_power_of_two(x: u64) -> u64 {
let next_pow2 = x.next_power_of_two();
if next_pow2 == x {
// x *is* a power of two, just use that.
x
} else {
// x is between two powers, so next = 2*prev.
next_pow2 / 2
}
}
Align::from_bytes(prev_power_of_two(size)).unwrap()
}
fn malloc(
&mut self,
size: u64,
zero_init: bool,
kind: MiriMemoryKind,
) -> Scalar<Tag> {
let this = self.eval_context_mut();
let tcx = &{this.tcx.tcx};
if size == 0 {
Scalar::from_int(0, this.pointer_size())
} else {
let align = this.min_align(size, kind);
let ptr = this.memory_mut().allocate(Size::from_bytes(size), align, kind.into());
if zero_init {
// We just allocated this, the access cannot fail
this.memory_mut()
.get_mut(ptr.alloc_id).unwrap()
.write_repeat(tcx, ptr, 0, Size::from_bytes(size)).unwrap();
}
Scalar::Ptr(ptr)
}
}
fn free(
&mut self,
ptr: Scalar<Tag>,
kind: MiriMemoryKind,
) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
if !this.is_null(ptr)? {
let ptr = this.force_ptr(ptr)?;
this.memory_mut().deallocate(
ptr,
None,
kind.into(),
)?;
}
Ok(())
}
fn realloc(
&mut self,
old_ptr: Scalar<Tag>,
new_size: u64,
kind: MiriMemoryKind,
) -> InterpResult<'tcx, Scalar<Tag>> {
let this = self.eval_context_mut();
let new_align = this.min_align(new_size, kind);
if this.is_null(old_ptr)? {
if new_size == 0 {
Ok(Scalar::from_int(0, this.pointer_size()))
} else {
let new_ptr = this.memory_mut().allocate(
Size::from_bytes(new_size),
new_align,
kind.into()
);
Ok(Scalar::Ptr(new_ptr))
}
} else {
let old_ptr = this.force_ptr(old_ptr)?;
let memory = this.memory_mut();
if new_size == 0 {
memory.deallocate(
old_ptr,
None,
kind.into(),
)?;
Ok(Scalar::from_int(0, this.pointer_size()))
} else {
let new_ptr = memory.reallocate(
old_ptr,
None,
Size::from_bytes(new_size),
new_align,
kind.into(),
)?;
Ok(Scalar::Ptr(new_ptr))
}
}
}
/// Emulates calling a foreign item, failing if the item is not supported.
/// This function will handle `goto_block` if needed.
fn emulate_foreign_item(
&mut self,
def_id: DefId,
args: &[OpTy<'tcx, Tag>],
dest: Option<PlaceTy<'tcx, Tag>>,
ret: Option<mir::BasicBlock>,
) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
let attrs = this.tcx.get_attrs(def_id);
let link_name = match attr::first_attr_value_str_by_name(&attrs, sym::link_name) {
Some(name) => name.as_str(),
None => this.tcx.item_name(def_id).as_str(),
};
// Strip linker suffixes (seen on 32-bit macOS).
let link_name = link_name.get().trim_end_matches("$UNIX2003");
let tcx = &{this.tcx.tcx};
// First: functions that diverge.
match link_name {
"__rust_start_panic" | "panic_impl" => {
return err!(MachineError("the evaluated program panicked".to_string()));
}
"exit" | "ExitProcess" => {
// it's really u32 for ExitProcess, but we have to put it into the `Exit` error variant anyway
let code = this.read_scalar(args[0])?.to_i32()?;
return err!(Exit(code));
}
_ => if dest.is_none() {
return err!(Unimplemented(
format!("can't call diverging foreign function: {}", link_name),
));
}
}
// Next: functions that assume a ret and dest.
let dest = dest.expect("we already checked for a dest");
let ret = ret.expect("dest is `Some` but ret is `None`");
match link_name {
"malloc" => {
let size = this.read_scalar(args[0])?.to_usize(this)?;
let res = this.malloc(size, /*zero_init:*/ false, MiriMemoryKind::C);
this.write_scalar(res, dest)?;
}
"calloc" => {
let items = this.read_scalar(args[0])?.to_usize(this)?;
let len = this.read_scalar(args[1])?.to_usize(this)?;
let size = items.checked_mul(len).ok_or_else(|| InterpError::Overflow(mir::BinOp::Mul))?;
let res = this.malloc(size, /*zero_init:*/ true, MiriMemoryKind::C);
this.write_scalar(res, dest)?;
}
"posix_memalign" => {
let ret = this.deref_operand(args[0])?;
let align = this.read_scalar(args[1])?.to_usize(this)?;
let size = this.read_scalar(args[2])?.to_usize(this)?;
// Align must be power of 2, and also at least ptr-sized (POSIX rules).
if !align.is_power_of_two() {
return err!(HeapAllocNonPowerOfTwoAlignment(align));
}
/*
FIXME: This check is disabled because rustc violates it.
See <https://github.com/rust-lang/rust/issues/62251>.
if align < this.pointer_size().bytes() {
return err!(MachineError(format!(
"posix_memalign: alignment must be at least the size of a pointer, but is {}",
align,
)));
}
*/
if size == 0 {
this.write_null(ret.into())?;
} else {
let ptr = this.memory_mut().allocate(
Size::from_bytes(size),
Align::from_bytes(align).unwrap(),
MiriMemoryKind::C.into()
);
this.write_scalar(Scalar::Ptr(ptr), ret.into())?;
}
this.write_null(dest)?;
}
"free" => {
let ptr = this.read_scalar(args[0])?.not_undef()?;
this.free(ptr, MiriMemoryKind::C)?;
}
"realloc" => {
let old_ptr = this.read_scalar(args[0])?.not_undef()?;
let new_size = this.read_scalar(args[1])?.to_usize(this)?;
let res = this.realloc(old_ptr, new_size, MiriMemoryKind::C)?;
this.write_scalar(res, dest)?;
}
"__rust_alloc" => {
let size = this.read_scalar(args[0])?.to_usize(this)?;
let align = this.read_scalar(args[1])?.to_usize(this)?;
if size == 0 {
return err!(HeapAllocZeroBytes);
}
if !align.is_power_of_two() {
return err!(HeapAllocNonPowerOfTwoAlignment(align));
}
let ptr = this.memory_mut()
.allocate(
Size::from_bytes(size),
Align::from_bytes(align).unwrap(),
MiriMemoryKind::Rust.into()
);
this.write_scalar(Scalar::Ptr(ptr), dest)?;
}
"__rust_alloc_zeroed" => {
let size = this.read_scalar(args[0])?.to_usize(this)?;
let align = this.read_scalar(args[1])?.to_usize(this)?;
if size == 0 {
return err!(HeapAllocZeroBytes);
}
if !align.is_power_of_two() {
return err!(HeapAllocNonPowerOfTwoAlignment(align));
}
let ptr = this.memory_mut()
.allocate(
Size::from_bytes(size),
Align::from_bytes(align).unwrap(),
MiriMemoryKind::Rust.into()
);
// We just allocated this, the access cannot fail
this.memory_mut()
.get_mut(ptr.alloc_id).unwrap()
.write_repeat(tcx, ptr, 0, Size::from_bytes(size)).unwrap();
this.write_scalar(Scalar::Ptr(ptr), dest)?;
}
"__rust_dealloc" => {
let ptr = this.read_scalar(args[0])?.not_undef()?;
let old_size = this.read_scalar(args[1])?.to_usize(this)?;
let align = this.read_scalar(args[2])?.to_usize(this)?;
if old_size == 0 {
return err!(HeapAllocZeroBytes);
}
if !align.is_power_of_two() {
return err!(HeapAllocNonPowerOfTwoAlignment(align));
}
let ptr = this.force_ptr(ptr)?;
this.memory_mut().deallocate(
ptr,
Some((Size::from_bytes(old_size), Align::from_bytes(align).unwrap())),
MiriMemoryKind::Rust.into(),
)?;
}
"__rust_realloc" => {
let ptr = this.read_scalar(args[0])?.to_ptr()?;
let old_size = this.read_scalar(args[1])?.to_usize(this)?;
let align = this.read_scalar(args[2])?.to_usize(this)?;
let new_size = this.read_scalar(args[3])?.to_usize(this)?;
if old_size == 0 || new_size == 0 {
return err!(HeapAllocZeroBytes);
}
if !align.is_power_of_two() {
return err!(HeapAllocNonPowerOfTwoAlignment(align));
}
let align = Align::from_bytes(align).unwrap();
let new_ptr = this.memory_mut().reallocate(
ptr,
Some((Size::from_bytes(old_size), align)),
Size::from_bytes(new_size),
align,
MiriMemoryKind::Rust.into(),
)?;
this.write_scalar(Scalar::Ptr(new_ptr), dest)?;
}
"syscall" => {
let sys_getrandom = this.eval_path_scalar(&["libc", "SYS_getrandom"])?
.expect("Failed to get libc::SYS_getrandom")
.to_usize(this)?;
// `libc::syscall(NR_GETRANDOM, buf.as_mut_ptr(), buf.len(), GRND_NONBLOCK)`
// is called if a `HashMap` is created the regular way (e.g. HashMap<K, V>).
match this.read_scalar(args[0])?.to_usize(this)? {
id if id == sys_getrandom => {
let ptr = this.read_scalar(args[1])?.not_undef()?;
let len = this.read_scalar(args[2])?.to_usize(this)?;
// The only supported flags are GRND_RANDOM and GRND_NONBLOCK,
// neither of which have any effect on our current PRNG
let _flags = this.read_scalar(args[3])?.to_i32()?;
this.gen_random(len as usize, ptr)?;
this.write_scalar(Scalar::from_uint(len, dest.layout.size), dest)?;
}
id => {
return err!(Unimplemented(
format!("miri does not support syscall ID {}", id),
))
}
}
}
"dlsym" => {
let _handle = this.read_scalar(args[0])?;
let symbol = this.read_scalar(args[1])?.not_undef()?;
let symbol_name = this.memory().read_c_str(symbol)?;
let err = format!("bad c unicode symbol: {:?}", symbol_name);
let symbol_name = ::std::str::from_utf8(symbol_name).unwrap_or(&err);
if let Some(dlsym) = Dlsym::from_str(symbol_name)? {
let ptr = this.memory_mut().create_fn_alloc(FnVal::Other(dlsym));
this.write_scalar(Scalar::from(ptr), dest)?;
} else {
this.write_null(dest)?;
}
}
"__rust_maybe_catch_panic" => {
// fn __rust_maybe_catch_panic(
// f: fn(*mut u8),
// data: *mut u8,
// data_ptr: *mut usize,
// vtable_ptr: *mut usize,
// ) -> u32
// We abort on panic, so not much is going on here, but we still have to call the closure.
let f = this.read_scalar(args[0])?.not_undef()?;
let data = this.read_scalar(args[1])?.not_undef()?;
let f_instance = this.memory().get_fn(f)?.as_instance()?;
this.write_null(dest)?;
trace!("__rust_maybe_catch_panic: {:?}", f_instance);
// Now we make a function call.
// TODO: consider making this reusable? `InterpCx::step` does something similar
// for the TLS destructors, and of course `eval_main`.
let mir = this.load_mir(f_instance.def)?;
let ret_place = MPlaceTy::dangling(this.layout_of(this.tcx.mk_unit())?, this).into();
this.push_stack_frame(
f_instance,
mir.span,
mir,
Some(ret_place),
// Directly return to caller.
StackPopCleanup::Goto(Some(ret)),
)?;
let mut args = this.frame().body.args_iter();
let arg_local = args.next().ok_or_else(||
InterpError::AbiViolation(
"Argument to __rust_maybe_catch_panic does not take enough arguments."
.to_owned(),
),
)?;
let arg_dest = this.eval_place(&mir::Place::Base(mir::PlaceBase::Local(arg_local)))?;
this.write_scalar(data, arg_dest)?;
assert!(args.next().is_none(), "__rust_maybe_catch_panic argument has more arguments than expected");
// We ourselves will return `0`, eventually (because we will not return if we paniced).
this.write_null(dest)?;
// Don't fall through, we do *not* want to `goto_block`!
return Ok(());
}
"memcmp" => {
let left = this.read_scalar(args[0])?.not_undef()?;
let right = this.read_scalar(args[1])?.not_undef()?;
let n = Size::from_bytes(this.read_scalar(args[2])?.to_usize(this)?);
let result = {
let left_bytes = this.memory().read_bytes(left, n)?;
let right_bytes = this.memory().read_bytes(right, n)?;
use std::cmp::Ordering::*;
match left_bytes.cmp(right_bytes) {
Less => -1i32,
Equal => 0,
Greater => 1,
}
};
this.write_scalar(
Scalar::from_int(result, Size::from_bits(32)),
dest,
)?;
}
"memrchr" => {
let ptr = this.read_scalar(args[0])?.not_undef()?;
let val = this.read_scalar(args[1])?.to_i32()? as u8;
let num = this.read_scalar(args[2])?.to_usize(this)?;
if let Some(idx) = this.memory().read_bytes(ptr, Size::from_bytes(num))?
.iter().rev().position(|&c| c == val)
{
let new_ptr = ptr.ptr_offset(Size::from_bytes(num - idx as u64 - 1), this)?;
this.write_scalar(new_ptr, dest)?;
} else {
this.write_null(dest)?;
}
}
"memchr" => {
let ptr = this.read_scalar(args[0])?.not_undef()?;
let val = this.read_scalar(args[1])?.to_i32()? as u8;
let num = this.read_scalar(args[2])?.to_usize(this)?;
let idx = this
.memory()
.read_bytes(ptr, Size::from_bytes(num))?
.iter()
.position(|&c| c == val);
if let Some(idx) = idx {
let new_ptr = ptr.ptr_offset(Size::from_bytes(idx as u64), this)?;
this.write_scalar(new_ptr, dest)?;
} else {
this.write_null(dest)?;
}
}
"getenv" => {
let result = {
let name_ptr = this.read_scalar(args[0])?.not_undef()?;
let name = this.memory().read_c_str(name_ptr)?;
match this.machine.env_vars.get(name) {
Some(&var) => Scalar::Ptr(var),
None => Scalar::ptr_null(&*this.tcx),
}
};
this.write_scalar(result, dest)?;
}
"unsetenv" => {
let mut success = None;
{
let name_ptr = this.read_scalar(args[0])?.not_undef()?;
if !this.is_null(name_ptr)? {
let name = this.memory().read_c_str(name_ptr)?.to_owned();
if !name.is_empty() && !name.contains(&b'=') {
success = Some(this.machine.env_vars.remove(&name));
}
}
}
if let Some(old) = success {
if let Some(var) = old {
this.memory_mut().deallocate(var, None, MiriMemoryKind::Env.into())?;
}
this.write_null(dest)?;
} else {
this.write_scalar(Scalar::from_int(-1, dest.layout.size), dest)?;
}
}
"setenv" => {
let mut new = None;
{
let name_ptr = this.read_scalar(args[0])?.not_undef()?;
let value_ptr = this.read_scalar(args[1])?.not_undef()?;
let value = this.memory().read_c_str(value_ptr)?;
if !this.is_null(name_ptr)? {
let name = this.memory().read_c_str(name_ptr)?;
if !name.is_empty() && !name.contains(&b'=') {
new = Some((name.to_owned(), value.to_owned()));
}
}
}
if let Some((name, value)) = new {
// `+1` for the null terminator.
let value_copy = this.memory_mut().allocate(
Size::from_bytes((value.len() + 1) as u64),
Align::from_bytes(1).unwrap(),
MiriMemoryKind::Env.into(),
);
// We just allocated these, so the write cannot fail.
let alloc = this.memory_mut().get_mut(value_copy.alloc_id).unwrap();
alloc.write_bytes(tcx, value_copy, &value).unwrap();
let trailing_zero_ptr = value_copy.offset(
Size::from_bytes(value.len() as u64),
tcx,
).unwrap();
alloc.write_bytes(tcx, trailing_zero_ptr, &[0]).unwrap();
if let Some(var) = this.machine.env_vars.insert(
name.to_owned(),
value_copy,
)
{
this.memory_mut().deallocate(var, None, MiriMemoryKind::Env.into())?;
}
this.write_null(dest)?;
} else {
this.write_scalar(Scalar::from_int(-1, dest.layout.size), dest)?;
}
}
"write" => {
let fd = this.read_scalar(args[0])?.to_i32()?;
let buf = this.read_scalar(args[1])?.not_undef()?;
let n = this.read_scalar(args[2])?.to_usize(&*this.tcx)?;
trace!("Called write({:?}, {:?}, {:?})", fd, buf, n);
let result = if fd == 1 || fd == 2 {
// stdout/stderr
use std::io::{self, Write};
let buf_cont = this.memory().read_bytes(buf, Size::from_bytes(n))?;
// We need to flush to make sure this actually appears on the screen
let res = if fd == 1 {
// Stdout is buffered, flush to make sure it appears on the screen.
// This is the write() syscall of the interpreted program, we want it
// to correspond to a write() syscall on the host -- there is no good
// in adding extra buffering here.
let res = io::stdout().write(buf_cont);
io::stdout().flush().unwrap();
res
} else {
// No need to flush, stderr is not buffered.
io::stderr().write(buf_cont)
};
match res {
Ok(n) => n as i64,
Err(_) => -1,
}
} else {
eprintln!("Miri: Ignored output to FD {}", fd);
// Pretend it all went well.
n as i64
};
// Now, `result` is the value we return back to the program.
this.write_scalar(
Scalar::from_int(result, dest.layout.size),
dest,
)?;
}
"strlen" => {
let ptr = this.read_scalar(args[0])?.not_undef()?;
let n = this.memory().read_c_str(ptr)?.len();
this.write_scalar(Scalar::from_uint(n as u64, dest.layout.size), dest)?;
}
// math functions
"cbrtf" | "coshf" | "sinhf" |"tanf" => {
// FIXME: Using host floats.
let f = f32::from_bits(this.read_scalar(args[0])?.to_u32()?);
let f = match link_name {
"cbrtf" => f.cbrt(),
"coshf" => f.cosh(),
"sinhf" => f.sinh(),
"tanf" => f.tan(),
_ => bug!(),
};
this.write_scalar(Scalar::from_u32(f.to_bits()), dest)?;
}
// underscore case for windows
"_hypotf" | "hypotf" | "atan2f" => {
// FIXME: Using host floats.
let f1 = f32::from_bits(this.read_scalar(args[0])?.to_u32()?);
let f2 = f32::from_bits(this.read_scalar(args[1])?.to_u32()?);
let n = match link_name {
"_hypotf" | "hypotf" => f1.hypot(f2),
"atan2f" => f1.atan2(f2),
_ => bug!(),
};
this.write_scalar(Scalar::from_u32(n.to_bits()), dest)?;
}
"cbrt" | "cosh" | "sinh" | "tan" => {
// FIXME: Using host floats.
let f = f64::from_bits(this.read_scalar(args[0])?.to_u64()?);
let f = match link_name {
"cbrt" => f.cbrt(),
"cosh" => f.cosh(),
"sinh" => f.sinh(),
"tan" => f.tan(),
_ => bug!(),
};
this.write_scalar(Scalar::from_u64(f.to_bits()), dest)?;
}
// underscore case for windows
"_hypot" | "hypot" | "atan2" => {
// FIXME: Using host floats.
let f1 = f64::from_bits(this.read_scalar(args[0])?.to_u64()?);
let f2 = f64::from_bits(this.read_scalar(args[1])?.to_u64()?);
let n = match link_name {
"_hypot" | "hypot" => f1.hypot(f2),
"atan2" => f1.atan2(f2),
_ => bug!(),
};
this.write_scalar(Scalar::from_u64(n.to_bits()), dest)?;
}
// Some things needed for `sys::thread` initialization to go through.
"signal" | "sigaction" | "sigaltstack" => {
this.write_scalar(Scalar::from_int(0, dest.layout.size), dest)?;
}
"sysconf" => {
let name = this.read_scalar(args[0])?.to_i32()?;
trace!("sysconf() called with name {}", name);
// TODO: Cache the sysconf integers via Miri's global cache.
let paths = &[
(&["libc", "_SC_PAGESIZE"], Scalar::from_int(PAGE_SIZE, dest.layout.size)),
(&["libc", "_SC_GETPW_R_SIZE_MAX"], Scalar::from_int(-1, dest.layout.size)),
(&["libc", "_SC_NPROCESSORS_ONLN"], Scalar::from_int(NUM_CPUS, dest.layout.size)),
];
let mut result = None;
for &(path, path_value) in paths {
if let Some(val) = this.eval_path_scalar(path)? {
let val = val.to_i32()?;
if val == name {
result = Some(path_value);
break;
}
}
}
if let Some(result) = result {
this.write_scalar(result, dest)?;
} else {
return err!(Unimplemented(
format!("Unimplemented sysconf name: {}", name),
));
}
}
"sched_getaffinity" => {
// Return an error; `num_cpus` then falls back to `sysconf`.
this.write_scalar(Scalar::from_int(-1, dest.layout.size), dest)?;
}
"isatty" => {
this.write_null(dest)?;
}
// Hook pthread calls that go to the thread-local storage memory subsystem.
"pthread_key_create" => {
let key_ptr = this.read_scalar(args[0])?.not_undef()?;
// Extract the function type out of the signature (that seems easier than constructing it ourselves).
let dtor = match this.test_null(this.read_scalar(args[1])?.not_undef()?)? {
Some(dtor_ptr) => Some(this.memory().get_fn(dtor_ptr)?.as_instance()?),
None => None,
};
// Figure out how large a pthread TLS key actually is.
// This is `libc::pthread_key_t`.
let key_type = args[0].layout.ty
.builtin_deref(true)
.ok_or_else(|| InterpError::AbiViolation("wrong signature used for `pthread_key_create`: first argument must be a raw pointer.".to_owned()))?
.ty;
let key_layout = this.layout_of(key_type)?;
// Create key and write it into the memory where `key_ptr` wants it.
let key = this.machine.tls.create_tls_key(dtor) as u128;
if key_layout.size.bits() < 128 && key >= (1u128 << key_layout.size.bits() as u128) {
return err!(OutOfTls);
}
let key_ptr = this.memory().check_ptr_access(key_ptr, key_layout.size, key_layout.align.abi)?
.expect("cannot be a ZST");
this.memory_mut().get_mut(key_ptr.alloc_id)?.write_scalar(
tcx,
key_ptr,
Scalar::from_uint(key, key_layout.size).into(),
key_layout.size,
)?;
// Return success (`0`).
this.write_null(dest)?;
}
"pthread_key_delete" => {
let key = this.read_scalar(args[0])?.to_bits(args[0].layout.size)?;
this.machine.tls.delete_tls_key(key)?;
// Return success (0)
this.write_null(dest)?;
}
"pthread_getspecific" => {
let key = this.read_scalar(args[0])?.to_bits(args[0].layout.size)?;
let ptr = this.machine.tls.load_tls(key, tcx)?;
this.write_scalar(ptr, dest)?;
}
"pthread_setspecific" => {
let key = this.read_scalar(args[0])?.to_bits(args[0].layout.size)?;
let new_ptr = this.read_scalar(args[1])?.not_undef()?;
this.machine.tls.store_tls(key, this.test_null(new_ptr)?)?;
// Return success (`0`).
this.write_null(dest)?;
}
// Stack size/address stuff.
"pthread_attr_init" | "pthread_attr_destroy" | "pthread_self" |
"pthread_attr_setstacksize" => {
this.write_null(dest)?;
}
"pthread_attr_getstack" => {
let addr_place = this.deref_operand(args[1])?;
let size_place = this.deref_operand(args[2])?;
this.write_scalar(
Scalar::from_uint(STACK_ADDR, addr_place.layout.size),
addr_place.into(),
)?;
this.write_scalar(
Scalar::from_uint(STACK_SIZE, size_place.layout.size),
size_place.into(),
)?;
// Return success (`0`).
this.write_null(dest)?;
}
// We don't support threading. (Also for Windows.)
"pthread_create" | "CreateThread" => {
return err!(Unimplemented(format!("Miri does not support threading")));
}
// Stub out calls for condvar, mutex and rwlock, to just return `0`.
"pthread_mutexattr_init" | "pthread_mutexattr_settype" | "pthread_mutex_init" |
"pthread_mutexattr_destroy" | "pthread_mutex_lock" | "pthread_mutex_unlock" |
"pthread_mutex_destroy" | "pthread_rwlock_rdlock" | "pthread_rwlock_unlock" |
"pthread_rwlock_wrlock" | "pthread_rwlock_destroy" | "pthread_condattr_init" |
"pthread_condattr_setclock" | "pthread_cond_init" | "pthread_condattr_destroy" |
"pthread_cond_destroy" => {
this.write_null(dest)?;
}
// We don't support fork so we don't have to do anything for atfork.
"pthread_atfork" => {
this.write_null(dest)?;
}
"mmap" => {
// This is a horrible hack, but since the guard page mechanism calls mmap and expects a particular return value, we just give it that value.
let addr = this.read_scalar(args[0])?.not_undef()?;
this.write_scalar(addr, dest)?;
}
"mprotect" => {
this.write_null(dest)?;
}
// macOS API stubs.
"pthread_attr_get_np" | "pthread_getattr_np" => {
this.write_null(dest)?;
}
"pthread_get_stackaddr_np" => {
let stack_addr = Scalar::from_uint(STACK_ADDR, dest.layout.size);
this.write_scalar(stack_addr, dest)?;
}
"pthread_get_stacksize_np" => {
let stack_size = Scalar::from_uint(STACK_SIZE, dest.layout.size);
this.write_scalar(stack_size, dest)?;
}
"_tlv_atexit" => {
// FIXME: register the destructor.
},
"_NSGetArgc" => {
this.write_scalar(Scalar::Ptr(this.machine.argc.unwrap()), dest)?;
},
"_NSGetArgv" => {
this.write_scalar(Scalar::Ptr(this.machine.argv.unwrap()), dest)?;
},
"SecRandomCopyBytes" => {
let len = this.read_scalar(args[1])?.to_usize(this)?;
let ptr = this.read_scalar(args[2])?.not_undef()?;
this.gen_random(len as usize, ptr)?;
this.write_null(dest)?;
}
// Windows API stubs.
// HANDLE = isize
// DWORD = ULONG = u32
// BOOL = i32
"GetProcessHeap" => |
"HeapAlloc" => {
let _handle = this.read_scalar(args[0])?.to_isize(this)?;
let flags = this.read_scalar(args[1])?.to_u32()?;
let size = this.read_scalar(args[2])?.to_usize(this)?;
let zero_init = (flags & 0x00000008) != 0; // HEAP_ZERO_MEMORY
let res = this.malloc(size, zero_init, MiriMemoryKind::WinHeap);
this.write_scalar(res, dest)?;
}
"HeapFree" => {
let _handle = this.read_scalar(args[0])?.to_isize(this)?;
let _flags = this.read_scalar(args[1])?.to_u32()?;
let ptr = this.read_scalar(args[2])?.not_undef()?;
this.free(ptr, MiriMemoryKind::WinHeap)?;
this.write_scalar(Scalar::from_int(1, Size::from_bytes(4)), dest)?;
}
"HeapReAlloc" => {
let _handle = this.read_scalar(args[0])?.to_isize(this)?;
let _flags = this.read_scalar(args[1])?.to_u32()?;
let ptr = this.read_scalar(args[2])?.not_undef()?;
let size = this.read_scalar(args[3])?.to_usize(this)?;
let res = this.realloc(ptr, size, MiriMemoryKind::WinHeap)?;
this.write_scalar(res, dest)?;
}
"SetLastError" => {
let err = this.read_scalar(args[0])?.to_u32()?;
this.machine.last_error = err;
}
"GetLastError" => {
this.write_scalar(Scalar::from_u32(this.machine.last_error), dest)?;
}
"AddVectoredExceptionHandler" => {
// Any non zero value works for the stdlib. This is just used for stack overflows anyway.
this.write_scalar(Scalar::from_int(1, dest.layout.size), dest)?;
},
"InitializeCriticalSection" |
"EnterCriticalSection" |
"LeaveCriticalSection" |
"DeleteCriticalSection" => {
// Nothing to do, not even a return value.
},
"GetModuleHandleW" |
"GetProcAddress" |
"TryEnterCriticalSection" |
"GetConsoleScreenBufferInfo" |
"SetConsoleTextAttribute" => {
// Pretend these do not exist / nothing happened, by returning zero.
this.write_null(dest)?;
},
"GetSystemInfo" => {
let system_info = this.deref_operand(args[0])?;
let system_info_ptr = this.check_mplace_access(system_info, None)?
.expect("cannot be a ZST");
// Initialize with `0`.
this.memory_mut().get_mut(system_info_ptr.alloc_id)?
.write_repeat(tcx, system_info_ptr, 0, system_info.layout.size)?;
// Set number of processors.
let dword_size = Size::from_bytes(4);
let offset = 2*dword_size + 3*tcx.pointer_size();
this.memory_mut().get_mut(system_info_ptr.alloc_id)?
.write_scalar(
tcx,
system_info_ptr.offset(offset, tcx)?,
Scalar::from_int(NUM_CPUS, dword_size).into(),
dword_size,
)?;
}
"TlsAlloc" => {
// This just creates a key; Windows does not natively support TLS destructors.
// Create key and return it.
let key = this.machine.tls.create_tls_key(None) as u128;
// Figure out how large a TLS key actually is. This is `c::DWORD`.
if dest.layout.size.bits() < 128
&& key >= (1u128 << dest.layout.size.bits() as u128) {
return err!(OutOfTls);
}
this.write_scalar(Scalar::from_uint(key, dest.layout.size), dest)?;
}
"TlsGetValue" => {
let key = this.read_scalar(args[0])?.to_u32()? as u128;
let ptr = this.machine.tls.load_tls(key, tcx)?;
this.write_scalar(ptr, dest)?;
}
"TlsSetValue" => {
let key = this.read_scalar(args[0])?.to_u32()? as u128;
let new_ptr = this.read_scalar(args[1])?.not_undef()?;
this.machine.tls.store_tls(key, this.test_null(new_ptr)?)?;
// Return success (`1`).
this.write_scalar(Scalar::from_int(1, dest.layout.size), dest)?;
}
"GetStdHandle" => {
let which = this.read_scalar(args[0])?.to_i32()?;
// We just make this the identity function, so we know later in `WriteFile`
// which one it is.
this.write_scalar(Scalar::from_int(which, this.pointer_size()), dest)?;
}
"WriteFile" => {
let handle = this.read_scalar(args[0])?.to_isize(this)?;
let buf = this.read_scalar(args[1])?.not_undef()?;
let n = this.read_scalar(args[2])?.to_u32()?;
let written_place = this.deref_operand(args[3])?;
// Spec says to always write `0` first.
this.write_null(written_place.into())?;
let written = if handle == -11 || handle == -12 {
// stdout/stderr
use std::io::{self, Write};
let buf_cont = this.memory().read_bytes(buf, Size::from_bytes(u64::from(n)))?;
let res = if handle == -11 {
io::stdout().write(buf_cont)
} else {
io::stderr().write(buf_cont)
};
res.ok().map(|n| n as u32)
} else {
eprintln!("Miri: Ignored output to handle {}", handle);
// Pretend it all went well.
Some(n)
};
// If there was no error, write back how much was written.
if let Some(n) = written {
this.write_scalar(Scalar::from_u32(n), written_place.into())?;
}
// Return whether this was a success.
this.write_scalar(
Scalar::from_int(if written.is_some() { 1 } else { 0 }, dest.layout.size),
dest,
)?;
}
"GetConsoleMode" => {
// Everything is a pipe.
this.write_null(dest)?;
}
"GetEnvironmentVariableW" => {
// This is not the env var you are looking for.
this.machine.last_error = 203; // ERROR_ENVVAR_NOT_FOUND
this.write_null(dest)?;
}
"GetCommandLineW" => {
this.write_scalar(Scalar::Ptr(this.machine.cmd_line.unwrap()), dest)?;
}
// The actual name of 'RtlGenRandom'
"SystemFunction036" => {
let ptr = this.read_scalar(args[0])?.not_undef()?;
let len = this.read_scalar(args[1])?.to_u32()?;
this.gen_random(len as usize, ptr)?;
this.write_scalar(Scalar::from_bool(true), dest)?;
}
// We can't execute anything else.
_ => {
return err!(Unimplemented(
format!("can't call foreign function: {}", link_name),
));
}
}
this.goto_block(Some(ret))?;
this.dump_place(*dest);
Ok(())
}
/// Evaluates the scalar at the specified path. Returns Some(val)
/// if the path could be resolved, and None otherwise
fn eval_path_scalar(&mut self, path: &[&str]) -> InterpResult<'tcx, Option<ScalarMaybeUndef<Tag>>> {
let this = self.eval_context_mut();
if let Ok(instance) = this.resolve_path(path) {
let cid = GlobalId {
instance,
promoted: None,
};
let const_val = this.const_eval_raw(cid)?;
let const_val = this.read_scalar(const_val.into())?;
return Ok(Some(const_val));
}
return Ok(None);
}
} | {
// Just fake a HANDLE
this.write_scalar(Scalar::from_int(1, this.pointer_size()), dest)?;
} |
AlipayMarketingCampaignPromotionactivityCustomerReceiveRequest.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.FileItem import FileItem
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.AlipayMarketingCampaignPromotionactivityCustomerReceiveModel import AlipayMarketingCampaignPromotionactivityCustomerReceiveModel
class AlipayMarketingCampaignPromotionactivityCustomerReceiveRequest(object):
def __init__(self, biz_model=None):
self._biz_model = biz_model
self._biz_content = None
self._version = "1.0"
self._terminal_type = None
self._terminal_info = None
self._prod_code = None
self._notify_url = None
self._return_url = None
self._udf_params = None
self._need_encrypt = False
@property
def biz_model(self):
return self._biz_model
@biz_model.setter
def biz_model(self, value):
self._biz_model = value
@property
def biz_content(self):
return self._biz_content
@biz_content.setter
def biz_content(self, value):
if isinstance(value, AlipayMarketingCampaignPromotionactivityCustomerReceiveModel):
self._biz_content = value
else:
self._biz_content = AlipayMarketingCampaignPromotionactivityCustomerReceiveModel.from_alipay_dict(value)
@property
def version(self):
return self._version
@version.setter
def version(self, value):
self._version = value
@property
def terminal_type(self):
return self._terminal_type
@terminal_type.setter
def terminal_type(self, value):
self._terminal_type = value
@property
def terminal_info(self):
return self._terminal_info
@terminal_info.setter
def terminal_info(self, value):
self._terminal_info = value
@property
def prod_code(self):
return self._prod_code
@prod_code.setter
def prod_code(self, value):
self._prod_code = value
@property
def notify_url(self):
return self._notify_url
@notify_url.setter
def notify_url(self, value):
self._notify_url = value
@property
def return_url(self):
return self._return_url
@return_url.setter
def return_url(self, value):
self._return_url = value
@property
def udf_params(self):
return self._udf_params
@udf_params.setter
def udf_params(self, value):
if not isinstance(value, dict):
return
self._udf_params = value
@property
def need_encrypt(self):
return self._need_encrypt
@need_encrypt.setter
def need_encrypt(self, value):
self._need_encrypt = value
def | (self, key, value):
if not self.udf_params:
self.udf_params = dict()
self.udf_params[key] = value
def get_params(self):
params = dict()
params[P_METHOD] = 'alipay.marketing.campaign.promotionactivity.customer.receive'
params[P_VERSION] = self.version
if self.biz_model:
params[P_BIZ_CONTENT] = json.dumps(obj=self.biz_model.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
if self.biz_content:
if hasattr(self.biz_content, 'to_alipay_dict'):
params['biz_content'] = json.dumps(obj=self.biz_content.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['biz_content'] = self.biz_content
if self.terminal_type:
params['terminal_type'] = self.terminal_type
if self.terminal_info:
params['terminal_info'] = self.terminal_info
if self.prod_code:
params['prod_code'] = self.prod_code
if self.notify_url:
params['notify_url'] = self.notify_url
if self.return_url:
params['return_url'] = self.return_url
if self.udf_params:
params.update(self.udf_params)
return params
def get_multipart_params(self):
multipart_params = dict()
return multipart_params
| add_other_text_param |
Team_20190829011925.js | import React from 'react'
const Team = () => {
return (
<div>
Team Page
</div>
)
} | export default Team | |
station_type.go | package station_type
import (
// Core packages
"context" // https://golang.org/pkg/context
"database/sql"
"time"
// Third-party packages
"github.com/pkg/errors"
"github.com/google/uuid"
"github.com/jmoiron/sqlx"
"go.opencensus.io/trace"
)
// Predefined errors identify expected failure conditions.
var (
// ErrNotFound is used when a specific StationType is requested but does not exist.
ErrNotFound = errors.New("station type not found")
// ErrInvalidID is used when an invalid UUID is provided.
ErrInvalidID = errors.New("ID is not in its proper UUID format")
)
// Create adds a StationType to the database. It returns the created StationType with
// fields like ID and DateCreated populated.
func Create(ctx context.Context, db *sqlx.DB, nst NewStationType, now time.Time) (*StationType, error) {
ctx, span := trace.StartSpan(ctx, "station_type.Create")
defer span.End()
st := StationType{
Id: uuid.New().String(),
Name: nst.Name,
Description: nst.Description,
DateCreated: now.UTC(),
DateUpdated: now.UTC(),
}
const q = `
INSERT INTO station_type
(id, name, description, date_created, date_updated)
VALUES ($1, $2, $3, $4, $5)`
_, err := db.ExecContext(ctx, q,
st.Id,
st.Name,
st.Description,
st.DateCreated,
st.DateUpdated,
)
if err != nil {
return nil, errors.Wrap(err, "inserting station tyoe")
}
return &st, nil
}
// Delete removes the station type identified by a given ID.
func Delete(ctx context.Context, db *sqlx.DB, id string) error {
ctx, span := trace.StartSpan(ctx, "station_type.Delete")
defer span.End()
// Validate id is a valid uuid
if _, err := uuid.Parse(id); err != nil {
return ErrInvalidID
}
const q = `DELETE FROM station_type WHERE id = $1`
if _, err := db.ExecContext(ctx, q, id); err != nil {
return errors.Wrapf(err, "deleting station type %s", id)
}
return nil
}
// List gets all StationType from the database.
func List(ctx context.Context, db *sqlx.DB) ([]StationType, error) {
ctx, span := trace.StartSpan(ctx, "station_type.List")
defer span.End()
station_type := []StationType{}
const q = `
SELECT
station_type.id,
station_type.name,
station_type.description,
COUNT(station.id) AS stations,
station_type.date_created,
station_type.date_updated
FROM station_type
LEFT JOIN station ON station_type.id = station.station_type_id
GROUP BY station_type.id`
if err := db.SelectContext(ctx, &station_type, q); err != nil {
return nil, errors.Wrap(err, "selecting station types")
}
return station_type, nil
}
// Get finds the product identified by a given ID.
func Get(ctx context.Context, db *sqlx.DB, id string) (*StationType, error) {
ctx, span := trace.StartSpan(ctx, "station_type.Get")
defer span.End()
if _, err := uuid.Parse(id); err != nil {
return nil, ErrInvalidID
}
var st StationType
const q = `
SELECT
station_type.id,
station_type.name,
station_type.description,
COUNT(station.id) AS stations,
station_type.date_created,
station_type.date_updated
FROM station_type
LEFT JOIN station ON station_type.id = station.station_type_id
WHERE station_type.id = $1
GROUP BY station_type.id`
if err := db.GetContext(ctx, &st, q, id); err != nil |
return &st, nil
}
// Update modifies data about a StationType. It will error if the specified ID is
// invalid or does not reference an existing StationType.
func Update(ctx context.Context, db *sqlx.DB, id string, update UpdateStationType, now time.Time) error {
ctx, span := trace.StartSpan(ctx, "station_type.Update")
defer span.End()
st, err := Get(ctx, db, id)
if err != nil {
return err
}
if update.Name != nil {
st.Name = *update.Name
}
if update.Description != nil {
st.Description = *update.Description
}
st.DateUpdated = now
const q = `UPDATE station_type SET
"name" = $2,
"description" = $3,
"date_updated" = $4
WHERE id = $1`
_, err = db.ExecContext(ctx, q, id,
st.Name,
st.Description,
st.DateUpdated,
)
if err != nil {
return errors.Wrap(err, "updating station tyoe")
}
return nil
}
| {
if err == sql.ErrNoRows {
return nil, ErrNotFound
}
return nil, errors.Wrap(err, "selecting single station type")
} |
_filter.py | # ----------------------------------------------------------------------------
# Copyright (c) 2016-2020, QIIME 2 development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
import biom
import qiime2
import numpy as np
import pandas as pd
def _get_biom_filter_function(ids_to_keep, min_frequency, max_frequency,
min_nonzero, max_nonzero):
|
_other_axis_map = {'sample': 'observation', 'observation': 'sample'}
def _filter_table(table, min_frequency, max_frequency, min_nonzero,
max_nonzero, metadata, where, axis, exclude_ids=False):
if min_frequency == 0 and max_frequency is None and min_nonzero == 0 and\
max_nonzero is None and metadata is None and where is None and\
exclude_ids is False:
raise ValueError("No filtering was requested.")
if metadata is None and where is not None:
raise ValueError("Metadata must be provided if 'where' is "
"specified.")
if metadata is None and exclude_ids is True:
raise ValueError("Metadata must be provided if 'exclude_ids' "
"is True.")
if metadata is not None:
ids_to_keep = metadata.get_ids(where=where)
else:
ids_to_keep = table.ids(axis=axis)
if exclude_ids is True:
ids_to_keep = set(table.ids(axis=axis)) - set(ids_to_keep)
filter_fn1 = _get_biom_filter_function(
ids_to_keep, min_frequency, max_frequency, min_nonzero, max_nonzero)
table.filter(filter_fn1, axis=axis, inplace=True)
# filter on the opposite axis to remove any entities that now have a
# frequency of zero
filter_fn2 = _get_biom_filter_function(
ids_to_keep=table.ids(axis=_other_axis_map[axis]), min_frequency=0,
max_frequency=None, min_nonzero=1, max_nonzero=None)
table.filter(filter_fn2, axis=_other_axis_map[axis], inplace=True)
def filter_samples(table: biom.Table, min_frequency: int = 0,
max_frequency: int = None, min_features: int = 0,
max_features: int = None,
metadata: qiime2.Metadata = None, where: str = None,
exclude_ids: bool = False)\
-> biom.Table:
_filter_table(table=table, min_frequency=min_frequency,
max_frequency=max_frequency, min_nonzero=min_features,
max_nonzero=max_features, metadata=metadata,
where=where, axis='sample', exclude_ids=exclude_ids)
return table
def filter_features(table: biom.Table, min_frequency: int = 0,
max_frequency: int = None, min_samples: int = 0,
max_samples: int = None,
metadata: qiime2.Metadata = None, where: str = None,
exclude_ids: bool = False)\
-> biom.Table:
_filter_table(table=table, min_frequency=min_frequency,
max_frequency=max_frequency, min_nonzero=min_samples,
max_nonzero=max_samples, metadata=metadata,
where=where, axis='observation', exclude_ids=exclude_ids)
return table
def filter_seqs(data: pd.Series, table: biom.Table = None,
metadata: qiime2.Metadata = None, where: str = None,
exclude_ids: bool = False) -> pd.Series:
if table is not None and metadata is not None:
raise ValueError('Filtering with metadata and filtering with a table '
'are mutually exclusive.')
elif table is None and metadata is None:
raise ValueError('No filtering requested. Must provide either table '
'or metadata.')
elif table is not None:
ids_to_keep = table.ids(axis='observation')
else:
# Note, no need to check for missing feature IDs in the metadata,
# because that is basically the point of this method.
ids_to_keep = metadata.get_ids(where=where)
if exclude_ids is True:
ids_to_keep = set(data.index) - set(ids_to_keep)
filtered = data[data.index.isin(ids_to_keep)]
if filtered.empty is True:
raise ValueError('All features were filtered out of the data.')
return filtered
| ids_to_keep = set(ids_to_keep)
if max_frequency is None:
max_frequency = np.inf
if max_nonzero is None:
max_nonzero = np.inf
def f(data_vector, id_, metadata):
return (id_ in ids_to_keep) and \
(min_frequency <= data_vector.sum() <= max_frequency) and \
(min_nonzero <= (data_vector > 0).sum() <= max_nonzero)
return f |
urls.py | """practice URL Configuration | Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.conf import settings
# from django.contrib import admin
from django.urls import include, path
urlpatterns = [
# path("admin/", admin.site.urls),
path("", include("dictionary.urls")),
]
if "debug_toolbar" in settings.INSTALLED_APPS:
urlpatterns.insert(0, path("__debug__/", include("debug_toolbar.urls"))) |
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/4.0/topics/http/urls/
Examples: |
LimitedReorderDecoder.py | from operator import itemgetter
from itertools import groupby
from Decoder import Decoder
from collections import namedtuple
class LimitedReorderDecoder(Decoder):
def __init__(self, tm, lm, maxsize, reorderlimit, threshold):
# model and option inputs
self.tm = tm
self.lm = lm
self.ms = maxsize
self.rl = reorderlimit
self.th = threshold
# initialize necessary data structures
self.stacks = {}
self.cost = {}
self.best = []
self.hypothesis = namedtuple("hypothesis", "logprob, cost, lm_state, predecessor, phrase, covered, num")
def initialize(self, sentence):
# calculate future cost estimates
self.cost = {}
for length in xrange(1, len(sentence)):
for start in range(len(sentence) + 1 - length):
end = start + length
self.cost[(start, end)] = float('-inf')
if sentence[start:end] in self.tm:
for phrase in self.tm[sentence[start:end]]:
option_cost = phrase.logprob
lm_state = ()
for word in phrase.english.split():
(lm_state, word_logprob) = self.lm.score(lm_state, word)
option_cost += word_logprob
if option_cost > self.cost[(start, end)]:
self.cost[(start, end)] = option_cost
for i in xrange(start + 1, end):
if self.cost[(start, i)] + self.cost[(i,end)] > self.cost[(start, end)]:
self.cost[(start,end)] = self.cost[(start, i)] + self.cost[(i,end)]
initial_hypothesis = self.hypothesis(0.0, self.cost[(0, len(sentence) - 1)], self.lm.begin(), None, None, [0] * len(sentence), 0)
self.stacks = [{} for _ in sentence] + [{}]
self.stacks[0][(self.lm.begin(), tuple([0] * len(sentence)))] = initial_hypothesis
self.best = [float('-inf') for i in range(len(sentence) + 1)]
def | (self, j, k, h, phrase, ls):
# get new language model state and logprob
logprob = h.logprob + phrase.logprob
lm_state = h.lm_state
for word in phrase.english.split():
(lm_state, word_logprob) = self.lm.score(lm_state, word)
logprob += word_logprob
# create new covered list
covered = list(h.covered)
for i in xrange(j, k):
covered[i] = 1
# if we have a full translation hypothesis, add in end state to logprob.
if len([i for i in covered if i == 1]) == ls:
logprob += self.lm.end(lm_state)
# estimate future cost
uncovered = [index for index, value in enumerate(covered) if value == 0]
spans = [map(itemgetter(1), g) for f, g in groupby(enumerate(uncovered), lambda (i, x):i-x)]
future_cost = 0.0
for span in spans:
future_cost += self.cost[(span[0], span[-1] + 1)]
# create new hypothesis with backpointer
nh = self.hypothesis(logprob, future_cost, lm_state, h, phrase, covered, h.num + (k - j))
covered = tuple(covered)
# if hypothesis is below threshold, ignore it
if logprob + future_cost > self.best[nh.num]:
self.best[nh.num] = logprob + future_cost
elif logprob + future_cost < self.best[nh.num] * self.th:
return
# add hypothesis to stack
if (lm_state, covered) not in self.stacks[h.num + k - j]:
self.stacks[nh.num][(lm_state, covered)] = nh
# do recombination if necessary
elif self.stacks[nh.num][(lm_state, covered)].logprob < logprob:
self.stacks[nh.num][(lm_state, covered)] = nh
def decode(self, sentence):
self.initialize(sentence)
for stack in self.stacks[:-1]:
for h in sorted(stack.itervalues(),key=lambda h: -h.logprob - h.cost)[:self.ms]: # prune
# build valid phrase indexes, split into ranges for easy iteration
uncovered = [index for index, value in enumerate(h.covered) if value == 0]
ranges = [map(itemgetter(1), g) for k, g in groupby(enumerate(uncovered), lambda (i, x):i-x)]
# determine maximum starting phrase position given reordering limit
limit = uncovered[min(len(uncovered) - 1, self.rl)]
# for every possible phrase in the valid indexes, build a hypothesis
for r in ranges:
for j in xrange(r[0], min(r[-1], limit)+1):
for k in xrange(j+1,r[-1]+2):
if sentence[j:k] in self.tm:
for phrase in self.tm[sentence[j:k]]:
self.update(j, k, h, phrase, len(sentence))
return max(self.stacks[-1].itervalues(), key=lambda h: h.logprob)
| update |
splunk-local-app.service.ts | import { Injectable } from '@angular/core';
import { HttpClient, HttpHeaders } from '@angular/common/http';
import { environment } from '../../../environments/environment';
@Injectable({
providedIn: 'root'
})
export class | {
constructor(
private http: HttpClient
) { }
getAllApps() {
return this.http.get(`${environment.apps}/list`);
}
deleteApp(appName) {
return this.http.delete(`${environment.apps}/delete/${appName}`);
}
}
| SplunkLocalAppService |
File.ts | class File {
readonly filepath: string
readonly data: string | this.data = data
}
}
export default File |
constructor(filepath: string, data: string) {
this.filepath = filepath |
test_foreignkey.py | #!/usr/bin/python
import re
import unittest
import schemaobject
class TestForeignKeySchema(unittest.TestCase):
def setUp(self):
self.database_url = "mysql://root:root@localhost:3306/"
self.schema = schemaobject.SchemaObject(self.database_url + 'sakila', charset='utf8')
self.fk = self.schema.selected.tables['rental'].foreign_keys
def test_fk_exists(self):
self.assertTrue("fk_rental_customer" in list(self.fk.keys()))
def test_fk_not_exists(self):
self.assertFalse("fk_foobar" in list(self.fk.keys()))
def test_fk_name(self):
self.assertEqual("fk_rental_customer", self.fk['fk_rental_customer'].name)
def test_fk_symbol(self):
self.assertEqual("fk_rental_customer", self.fk['fk_rental_customer'].symbol)
def test_fk_table_name(self):
self.assertEqual("rental", self.fk['fk_rental_customer'].table_name)
def test_fk_table_schema(self):
self.assertEqual("sakila", self.fk['fk_rental_customer'].table_schema)
def test_fk_columns(self):
self.assertEqual(['customer_id'], self.fk['fk_rental_customer'].columns)
def test_fk_referenced_table_name(self):
self.assertEqual("customer", self.fk['fk_rental_customer'].referenced_table_name)
def test_fk_referenced_table_schema(self):
self.assertEqual("sakila", self.fk['fk_rental_customer'].referenced_table_schema)
def test_fk_referenced_columns(self):
self.assertEqual(['customer_id'], self.fk['fk_rental_customer'].referenced_columns)
def test_fk_match_option(self):
self.assertEqual(None, self.fk['fk_rental_customer'].match_option)
def test_fk_update_rule(self):
self.assertEqual("CASCADE", self.fk['fk_rental_customer'].update_rule)
def test_fk_delete_rule(self):
self.assertEqual("RESTRICT", self.fk['fk_rental_customer'].delete_rule)
def test_format_referenced_col_with_length(self):
self.assertEqual('`fk_rental_customer`(11)', schemaobject.foreignkey.ForeignKeySchema._format_referenced_col('fk_rental_customer', 11))
def test_format_referenced_col_without_length(self):
self.assertEqual('`fk_rental_customer`', schemaobject.foreignkey.ForeignKeySchema._format_referenced_col('fk_rental_customer', 0))
self.assertEqual('`fk_rental_customer`', schemaobject.foreignkey.ForeignKeySchema._format_referenced_col('fk_rental_customer', None))
def test_fk_drop(self):
|
def test_fk_create(self):
self.assertEqual(self.fk['fk_rental_customer'].create(),
"ADD CONSTRAINT `fk_rental_customer` FOREIGN KEY `fk_rental_customer` (`customer_id`) REFERENCES `customer` (`customer_id`) ON DELETE RESTRICT ON UPDATE CASCADE")
def test_fk_eq(self):
self.assertEqual(self.fk['fk_rental_customer'], self.fk['fk_rental_customer'])
def test_fk_neq(self):
self.assertNotEqual(self.fk['fk_rental_customer'], self.fk['fk_rental_inventory'])
# def test_fk_reference_opts_update_and_delete(self):
# table_def = """CREATE TABLE `child` (
# `id` int(11) DEFAULT NULL,
# `parent_id` int(11) DEFAULT NULL,
# KEY `par_ind` (`parent_id`),
# CONSTRAINT `child_ibfk_1` FOREIGN KEY (`parent_id`)
# REFERENCES `parent` (`id`) ON DELETE SET NULL ON UPDATE CASCADE,
# CONSTRAINT `child_ibfk_2` FOREIGN KEY (`parent_id`)
# REFERENCES `parent` (`id`) ON DELETE RESTRICT ON UPDATE RESTRICT )
# ENGINE=InnoDB DEFAULT CHARSET=latin1 COLLATE=latin1_danish_ci COMMENT='hello world';"""
#
# matches = re.search(REGEX_FK_REFERENCE_OPTIONS % 'child_ibfk_1', table_def, re.X)
# self.assertTrue(matches)
# self.assertTrue(matches.group('on_delete'))
# self.assertTrue(matches.group('on_update'))
# self.assertEqual(matches.group('on_delete'), 'SET NULL')
# self.assertEqual(matches.group('on_update'), 'CASCADE')
#
# matches = re.search(REGEX_FK_REFERENCE_OPTIONS % 'child_ibfk_1', table_def, re.X)
# self.assertTrue(matches)
# self.assertTrue(matches.group('on_delete'))
# self.assertTrue(matches.group('on_update'))
# self.assertEqual(matches.group('on_delete'), 'RESTRICT')
# self.assertEqual(matches.group('on_update'), 'RESTRICT')
#
# def test_fk_reference_opts_delete(self):
# table_def = """CREATE TABLE `child` (
# `id` int(11) DEFAULT NULL,
# `parent_id` int(11) DEFAULT NULL,
# KEY `par_ind` (`parent_id`),
# CONSTRAINT `child_ibfk_1` FOREIGN KEY (`parent_id`) REFERENCES `parent` (`id`) ON DELETE SET NULL )
# ENGINE=InnoDB DEFAULT CHARSET=latin1 COLLATE=latin1_danish_ci COMMENT='hello world';"""
#
# matches = re.search(REGEX_FK_REFERENCE_OPTIONS % 'child_ibfk_1', table_def, re.X)
# self.assertTrue(matches)
# self.assertTrue(matches.group('on_delete'))
# self.assertTrue(not matches.group('on_update'))
# self.assertEqual(matches.group('on_delete'), 'SET NULL')
#
# def test_fk_reference_opts_update(self):
# table_def = """CREATE TABLE `child` (
# `id` int(11) DEFAULT NULL,
# `parent_id` int(11) DEFAULT NULL,
# KEY `par_ind` (`parent_id`),
# CONSTRAINT `child_ibfk_1` FOREIGN KEY (`parent_id`) REFERENCES `parent` (`id`) ON UPDATE CASCADE )
# ENGINE=InnoDB DEFAULT CHARSET=latin1 COLLATE=latin1_danish_ci COMMENT='hello world';"""
#
# matches = re.search(REGEX_FK_REFERENCE_OPTIONS % 'child_ibfk_1', table_def, re.X)
# self.assertTrue(matches)
# self.assertTrue(not matches.group('on_delete'))
# self.assertTrue(matches.group('on_update'))
# self.assertEqual(matches.group('on_update'), 'CASCADE') | self.assertEqual(self.fk['fk_rental_customer'].drop(), "DROP FOREIGN KEY `fk_rental_customer`") |
dip4-coinbasemerkleroots.py | #!/usr/bin/env python3
# Copyright (c) 2015-2018 The Dash Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from collections import namedtuple
from test_framework.mininode import *
from test_framework.test_framework import WildfireTestFramework
from test_framework.util import *
from time import *
'''
dip4-coinbasemerkleroots.py
Checks DIP4 merkle roots in coinbases
'''
class TestNode(SingleNodeConnCB):
def __init__(self):
SingleNodeConnCB.__init__(self)
self.last_mnlistdiff = None
def on_mnlistdiff(self, conn, message):
self.last_mnlistdiff = message
def wait_for_mnlistdiff(self, timeout=30):
self.last_mnlistdiff = None
def received_mnlistdiff():
return self.last_mnlistdiff is not None
return wait_until(received_mnlistdiff, timeout=timeout)
def getmnlistdiff(self, baseBlockHash, blockHash):
msg = msg_getmnlistd(baseBlockHash, blockHash)
self.send_message(msg)
self.wait_for_mnlistdiff()
return self.last_mnlistdiff
class LLMQCoinbaseCommitmentsTest(WildfireTestFramework):
def __init__(self):
super().__init__(6, 5, [], fast_dip3_enforcement=True)
def run_test(self):
self.test_node = TestNode()
self.test_node.add_connection(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], self.test_node))
NetworkThread().start() # Start up network handling in another thread
self.test_node.wait_for_verack()
self.confirm_mns()
null_hash = format(0, "064x")
# Check if a diff with the genesis block as base returns all MNs
expectedUpdated = [mn.proTxHash for mn in self.mninfo]
mnList = self.test_getmnlistdiff(null_hash, self.nodes[0].getbestblockhash(), {}, [], expectedUpdated)
expectedUpdated2 = expectedUpdated + []
# Register one more MN, but don't start it (that would fail as WildfireTestFramework doesn't support this atm)
baseBlockHash = self.nodes[0].getbestblockhash()
self.prepare_masternode(self.mn_count)
new_mn = self.mninfo[self.mn_count]
# Now test if that MN appears in a diff when the base block is the one just before MN registration
expectedDeleted = []
expectedUpdated = [new_mn.proTxHash]
mnList = self.test_getmnlistdiff(baseBlockHash, self.nodes[0].getbestblockhash(), mnList, expectedDeleted, expectedUpdated)
assert(mnList[new_mn.proTxHash].confirmedHash == 0)
# Now let the MN get enough confirmations and verify that the MNLISTDIFF now has confirmedHash != 0
self.confirm_mns()
mnList = self.test_getmnlistdiff(baseBlockHash, self.nodes[0].getbestblockhash(), mnList, expectedDeleted, expectedUpdated)
assert(mnList[new_mn.proTxHash].confirmedHash != 0)
# Spend the collateral of the previously added MN and test if it appears in "deletedMNs"
expectedDeleted = [new_mn.proTxHash]
expectedUpdated = []
baseBlockHash2 = self.nodes[0].getbestblockhash()
self.remove_mastermode(self.mn_count)
mnList = self.test_getmnlistdiff(baseBlockHash2, self.nodes[0].getbestblockhash(), mnList, expectedDeleted, expectedUpdated)
# When comparing genesis and best block, we shouldn't see the previously added and then deleted MN
mnList = self.test_getmnlistdiff(null_hash, self.nodes[0].getbestblockhash(), {}, [], expectedUpdated2)
#############################
# Now start testing quorum commitment merkle roots
self.nodes[0].generate(1)
oldhash = self.nodes[0].getbestblockhash()
# Test DIP8 activation once with a pre-existing quorum and once without (we don't know in which order it will activate on mainnet)
self.test_dip8_quorum_merkle_root_activation(True)
for n in self.nodes:
n.invalidateblock(oldhash)
self.sync_all()
first_quorum = self.test_dip8_quorum_merkle_root_activation(False)
self.nodes[0].spork("SPORK_17_QUORUM_DKG_ENABLED", 0)
self.wait_for_sporks_same()
# Verify that the first quorum appears in MNLISTDIFF
expectedDeleted = []
expectedNew = [QuorumId(100, int(first_quorum, 16))]
quorumList = self.test_getmnlistdiff_quorums(null_hash, self.nodes[0].getbestblockhash(), {}, expectedDeleted, expectedNew)
baseBlockHash = self.nodes[0].getbestblockhash()
second_quorum = self.mine_quorum()
# Verify that the second quorum appears in MNLISTDIFF
expectedDeleted = []
expectedNew = [QuorumId(100, int(second_quorum, 16))]
quorums_before_third = self.test_getmnlistdiff_quorums(baseBlockHash, self.nodes[0].getbestblockhash(), quorumList, expectedDeleted, expectedNew)
block_before_third = self.nodes[0].getbestblockhash()
third_quorum = self.mine_quorum()
# Verify that the first quorum is deleted and the third quorum is added in MNLISTDIFF (the first got inactive)
expectedDeleted = [QuorumId(100, int(first_quorum, 16))]
expectedNew = [QuorumId(100, int(third_quorum, 16))]
self.test_getmnlistdiff_quorums(block_before_third, self.nodes[0].getbestblockhash(), quorums_before_third, expectedDeleted, expectedNew)
# Verify that the diff between genesis and best block is the current active set (second and third quorum)
expectedDeleted = []
expectedNew = [QuorumId(100, int(second_quorum, 16)), QuorumId(100, int(third_quorum, 16))]
self.test_getmnlistdiff_quorums(null_hash, self.nodes[0].getbestblockhash(), {}, expectedDeleted, expectedNew)
# Now verify that diffs are correct around the block that mined the third quorum.
# This tests the logic in CalcCbTxMerkleRootQuorums, which has to manually add the commitment from the current
# block
mined_in_block = self.nodes[0].quorum("info", 100, third_quorum)["minedBlock"]
prev_block = self.nodes[0].getblock(mined_in_block)["previousblockhash"]
prev_block2 = self.nodes[0].getblock(prev_block)["previousblockhash"]
next_block = self.nodes[0].getblock(mined_in_block)["nextblockhash"]
next_block2 = self.nodes[0].getblock(mined_in_block)["nextblockhash"]
# The 2 block before the quorum was mined should both give an empty diff
expectedDeleted = []
expectedNew = []
self.test_getmnlistdiff_quorums(block_before_third, prev_block2, quorums_before_third, expectedDeleted, expectedNew)
self.test_getmnlistdiff_quorums(block_before_third, prev_block, quorums_before_third, expectedDeleted, expectedNew)
# The block in which the quorum was mined and the 2 after that should all give the same diff
expectedDeleted = [QuorumId(100, int(first_quorum, 16))]
expectedNew = [QuorumId(100, int(third_quorum, 16))]
quorums_with_third = self.test_getmnlistdiff_quorums(block_before_third, mined_in_block, quorums_before_third, expectedDeleted, expectedNew)
self.test_getmnlistdiff_quorums(block_before_third, next_block, quorums_before_third, expectedDeleted, expectedNew)
self.test_getmnlistdiff_quorums(block_before_third, next_block2, quorums_before_third, expectedDeleted, expectedNew)
# A diff between the two block that happened after the quorum was mined should give an empty diff
expectedDeleted = []
expectedNew = []
self.test_getmnlistdiff_quorums(mined_in_block, next_block, quorums_with_third, expectedDeleted, expectedNew)
self.test_getmnlistdiff_quorums(mined_in_block, next_block2, quorums_with_third, expectedDeleted, expectedNew)
self.test_getmnlistdiff_quorums(next_block, next_block2, quorums_with_third, expectedDeleted, expectedNew)
# Using the same block for baseBlockHash and blockHash should give empty diffs
self.test_getmnlistdiff_quorums(prev_block, prev_block, quorums_before_third, expectedDeleted, expectedNew)
self.test_getmnlistdiff_quorums(prev_block2, prev_block2, quorums_before_third, expectedDeleted, expectedNew)
self.test_getmnlistdiff_quorums(mined_in_block, mined_in_block, quorums_with_third, expectedDeleted, expectedNew)
self.test_getmnlistdiff_quorums(next_block, next_block, quorums_with_third, expectedDeleted, expectedNew)
self.test_getmnlistdiff_quorums(next_block2, next_block2, quorums_with_third, expectedDeleted, expectedNew)
def test_getmnlistdiff(self, baseBlockHash, blockHash, baseMNList, expectedDeleted, expectedUpdated):
d = self.test_getmnlistdiff_base(baseBlockHash, blockHash)
# Assert that the deletedMNs and mnList fields are what we expected
assert_equal(set(d.deletedMNs), set([int(e, 16) for e in expectedDeleted]))
assert_equal(set([e.proRegTxHash for e in d.mnList]), set(int(e, 16) for e in expectedUpdated))
# Build a new list based on the old list and the info from the diff
newMNList = baseMNList.copy()
for e in d.deletedMNs:
newMNList.pop(format(e, '064x'))
for e in d.mnList:
newMNList[format(e.proRegTxHash, '064x')] = e
cbtx = CCbTx()
cbtx.deserialize(BytesIO(d.cbTx.vExtraPayload))
# Verify that the merkle root matches what we locally calculate
hashes = []
for mn in sorted(newMNList.values(), key=lambda mn: ser_uint256(mn.proRegTxHash)):
hashes.append(hash256(mn.serialize()))
merkleRoot = CBlock.get_merkle_root(hashes)
assert_equal(merkleRoot, cbtx.merkleRootMNList)
return newMNList
def test_getmnlistdiff_quorums(self, baseBlockHash, blockHash, baseQuorumList, expectedDeleted, expectedNew):
d = self.test_getmnlistdiff_base(baseBlockHash, blockHash)
assert_equal(set(d.deletedQuorums), set(expectedDeleted))
assert_equal(set([QuorumId(e.llmqType, e.quorumHash) for e in d.newQuorums]), set(expectedNew))
newQuorumList = baseQuorumList.copy()
for e in d.deletedQuorums:
newQuorumList.pop(e)
for e in d.newQuorums:
newQuorumList[QuorumId(e.llmqType, e.quorumHash)] = e
cbtx = CCbTx()
cbtx.deserialize(BytesIO(d.cbTx.vExtraPayload))
if cbtx.version >= 2:
hashes = []
for qc in newQuorumList.values():
hashes.append(hash256(qc.serialize()))
hashes.sort()
merkleRoot = CBlock.get_merkle_root(hashes)
assert_equal(merkleRoot, cbtx.merkleRootQuorums)
return newQuorumList
def | (self, baseBlockHash, blockHash):
hexstr = self.nodes[0].getblockheader(blockHash, False)
header = FromHex(CBlockHeader(), hexstr)
d = self.test_node.getmnlistdiff(int(baseBlockHash, 16), int(blockHash, 16))
assert_equal(d.baseBlockHash, int(baseBlockHash, 16))
assert_equal(d.blockHash, int(blockHash, 16))
# Check that the merkle proof is valid
proof = CMerkleBlock(header, d.merkleProof)
proof = proof.serialize().hex()
assert_equal(self.nodes[0].verifytxoutproof(proof), [d.cbTx.hash])
# Check if P2P messages match with RPCs
d2 = self.nodes[0].protx("diff", baseBlockHash, blockHash)
assert_equal(d2["baseBlockHash"], baseBlockHash)
assert_equal(d2["blockHash"], blockHash)
assert_equal(d2["cbTxMerkleTree"], d.merkleProof.serialize().hex())
assert_equal(d2["cbTx"], d.cbTx.serialize().hex())
assert_equal(set([int(e, 16) for e in d2["deletedMNs"]]), set(d.deletedMNs))
assert_equal(set([int(e["proRegTxHash"], 16) for e in d2["mnList"]]), set([e.proRegTxHash for e in d.mnList]))
assert_equal(set([QuorumId(e["llmqType"], int(e["quorumHash"], 16)) for e in d2["deletedQuorums"]]), set(d.deletedQuorums))
assert_equal(set([QuorumId(e["llmqType"], int(e["quorumHash"], 16)) for e in d2["newQuorums"]]), set([QuorumId(e.llmqType, e.quorumHash) for e in d.newQuorums]))
return d
def test_dip8_quorum_merkle_root_activation(self, with_initial_quorum):
if with_initial_quorum:
self.nodes[0].spork("SPORK_17_QUORUM_DKG_ENABLED", 0)
self.wait_for_sporks_same()
# Mine one quorum before dip8 is activated
self.mine_quorum()
self.nodes[0].spork("SPORK_17_QUORUM_DKG_ENABLED", 4070908800)
self.wait_for_sporks_same()
cbtx = self.nodes[0].getblock(self.nodes[0].getbestblockhash(), 2)["tx"][0]
assert(cbtx["cbTx"]["version"] == 1)
assert(self.nodes[0].getblockchaininfo()["bip9_softforks"]["dip0008"]["status"] != "active")
while self.nodes[0].getblockchaininfo()["bip9_softforks"]["dip0008"]["status"] != "active":
self.nodes[0].generate(4)
self.sync_all()
self.nodes[0].generate(1)
sync_blocks(self.nodes)
# Assert that merkleRootQuorums is present and 0 (we have no quorums yet)
cbtx = self.nodes[0].getblock(self.nodes[0].getbestblockhash(), 2)["tx"][0]
assert_equal(cbtx["cbTx"]["version"], 2)
assert("merkleRootQuorums" in cbtx["cbTx"])
merkleRootQuorums = int(cbtx["cbTx"]["merkleRootQuorums"], 16)
if with_initial_quorum:
assert(merkleRootQuorums != 0)
else:
assert_equal(merkleRootQuorums, 0)
set_mocktime(get_mocktime() + 1)
set_node_times(self.nodes, get_mocktime())
self.nodes[0].spork("SPORK_17_QUORUM_DKG_ENABLED", 0)
self.wait_for_sporks_same()
# Mine quorum and verify that merkleRootQuorums has changed
quorum = self.mine_quorum()
cbtx = self.nodes[0].getblock(self.nodes[0].getbestblockhash(), 2)["tx"][0]
assert(int(cbtx["cbTx"]["merkleRootQuorums"], 16) != merkleRootQuorums)
return quorum
def confirm_mns(self):
while True:
diff = self.nodes[0].protx("diff", 1, self.nodes[0].getblockcount())
found_unconfirmed = False
for mn in diff["mnList"]:
if int(mn["confirmedHash"], 16) == 0:
found_unconfirmed = True
break
if not found_unconfirmed:
break
self.nodes[0].generate(1)
sync_blocks(self.nodes)
if __name__ == '__main__':
LLMQCoinbaseCommitmentsTest().main()
| test_getmnlistdiff_base |
deployment.go | /*
Copyright 2019 The KubeOne Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package machinecontroller
import (
"context"
"fmt"
"net"
"time"
"github.com/pkg/errors"
kubeoneapi "github.com/kubermatic/kubeone/pkg/apis/kubeone"
"github.com/kubermatic/kubeone/pkg/util"
"github.com/kubermatic/kubeone/pkg/util/credentials"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
apiextensions "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/kubernetes/pkg/registry/core/service/ipallocator"
dynclient "sigs.k8s.io/controller-runtime/pkg/client"
)
// MachineController related constants
const (
MachineControllerNamespace = metav1.NamespaceSystem
MachineControllerAppLabelKey = "app"
MachineControllerAppLabelValue = "machine-controller"
MachineControllerTag = "v1.1.5"
)
// Deploy deploys MachineController deployment with RBAC on the cluster
func Deploy(ctx *util.Context) error {
if ctx.DynamicClient == nil {
return errors.New("kubernetes client not initialized")
}
bgCtx := context.Background()
// ServiceAccounts
if err := simpleCreateOrUpdate(bgCtx, ctx.DynamicClient, machineControllerServiceAccount()); err != nil {
return errors.Wrap(err, "failed to ensure machine-controller service account")
}
// ClusterRoles
if err := simpleCreateOrUpdate(bgCtx, ctx.DynamicClient, machineControllerClusterRole()); err != nil {
return errors.Wrap(err, "failed to ensure machine-controller cluster role")
}
// ClusterRoleBindings
crbGenerators := []func() *rbacv1.ClusterRoleBinding{
nodeSignerClusterRoleBinding,
machineControllerClusterRoleBinding,
nodeBootstrapperClusterRoleBinding,
}
for _, crbGen := range crbGenerators {
if err := simpleCreateOrUpdate(bgCtx, ctx.DynamicClient, crbGen()); err != nil {
return errors.Wrap(err, "failed to ensure machine-controller cluster-role binding")
}
}
// Roles
roleGenerators := []func() *rbacv1.Role{
machineControllerKubeSystemRole,
machineControllerKubePublicRole,
machineControllerEndpointReaderRole,
machineControllerClusterInfoReaderRole,
}
for _, roleGen := range roleGenerators {
if err := simpleCreateOrUpdate(bgCtx, ctx.DynamicClient, roleGen()); err != nil {
return errors.Wrap(err, "failed to ensure machine-controller role")
}
}
// RoleBindings
roleBindingsGenerators := []func() *rbacv1.RoleBinding{
machineControllerKubeSystemRoleBinding,
machineControllerKubePublicRoleBinding,
machineControllerDefaultRoleBinding,
machineControllerClusterInfoRoleBinding,
}
for _, roleBindingGen := range roleBindingsGenerators {
if err := simpleCreateOrUpdate(bgCtx, ctx.DynamicClient, roleBindingGen()); err != nil {
return errors.Wrap(err, "failed to ensure machine-controller role binding")
}
}
// Deployments
deployment, err := machineControllerDeployment(ctx.Cluster)
if err != nil {
return errors.Wrap(err, "failed to generate machine-controller deployment")
}
if err = simpleCreateOrUpdate(bgCtx, ctx.DynamicClient, deployment); err != nil {
return errors.Wrap(err, "failed to ensure machine-controller deployment")
}
// CRDs
crdGenerators := []func() *apiextensions.CustomResourceDefinition{
machineControllerMachineCRD,
machineControllerClusterCRD,
machineControllerMachineSetCRD,
machineControllerMachineDeploymentCRD,
}
for _, crdGen := range crdGenerators {
if err = simpleCreateOrUpdate(bgCtx, ctx.DynamicClient, crdGen()); err != nil {
return errors.Wrap(err, "failed to ensure machine-controller CRDs")
}
}
// HACK: re-init dynamic client in order to re-init RestMapper, to drop caches
err = util.HackIssue321InitDynamicClient(ctx)
return errors.Wrap(err, "failed to re-init dynamic client")
}
// WaitForMachineController waits for machine-controller-webhook to become running
// func WaitForMachineController(corev1Client corev1types.CoreV1Interface) error {
func WaitForMachineController(client dynclient.Client) error {
listOpts := dynclient.ListOptions{Namespace: WebhookNamespace}
err := listOpts.SetLabelSelector(fmt.Sprintf("%s=%s", MachineControllerAppLabelKey, MachineControllerAppLabelValue))
if err != nil {
return errors.Wrap(err, "failed to parse machine-controller labels")
}
return wait.Poll(5*time.Second, 3*time.Minute, func() (bool, error) {
machineControllerPods := corev1.PodList{}
err = client.List(context.Background(), &listOpts, &machineControllerPods)
if err != nil {
return false, errors.Wrap(err, "failed to list machine-controller pod")
}
if len(machineControllerPods.Items) == 0 {
return false, nil
}
mcpod := machineControllerPods.Items[0]
if mcpod.Status.Phase == corev1.PodRunning {
for _, podcond := range mcpod.Status.Conditions {
if podcond.Type == corev1.PodReady && podcond.Status == corev1.ConditionTrue {
return true, nil
}
}
}
return false, nil
})
}
func machineControllerServiceAccount() *corev1.ServiceAccount {
return &corev1.ServiceAccount{
TypeMeta: metav1.TypeMeta{
APIVersion: "v1",
Kind: "ServiceAccount",
},
ObjectMeta: metav1.ObjectMeta{
Name: "machine-controller",
Namespace: MachineControllerNamespace,
Labels: map[string]string{
MachineControllerAppLabelKey: MachineControllerAppLabelValue,
},
},
}
}
func machineControllerClusterRole() *rbacv1.ClusterRole {
return &rbacv1.ClusterRole{
TypeMeta: metav1.TypeMeta{
APIVersion: "rbac.authorization.k8s.io/v1",
Kind: "ClusterRole",
},
ObjectMeta: metav1.ObjectMeta{
Name: "machine-controller",
Labels: map[string]string{
MachineControllerAppLabelKey: MachineControllerAppLabelValue,
},
},
Rules: []rbacv1.PolicyRule{
{
APIGroups: []string{"apiextensions.k8s.io"},
Resources: []string{"customresourcedefinitions"},
Verbs: []string{"get"},
},
{
APIGroups: []string{"apiextensions.k8s.io"},
Resources: []string{"customresourcedefinitions"},
ResourceNames: []string{"machines.machine.k8s.io"},
Verbs: []string{"*"},
},
{
APIGroups: []string{"machine.k8s.io"},
Resources: []string{"machines"},
Verbs: []string{"*"},
},
{
APIGroups: []string{""},
Resources: []string{"persistentvolumes"},
Verbs: []string{"list", "get", "watch"},
},
{
APIGroups: []string{""},
Resources: []string{"nodes"},
Verbs: []string{"*"},
},
{
APIGroups: []string{""},
Resources: []string{"pods"},
Verbs: []string{"list", "get"},
},
{
APIGroups: []string{""},
Resources: []string{"pods/eviction"},
Verbs: []string{"create"},
},
{
APIGroups: []string{""},
Resources: []string{"events"},
Verbs: []string{"create", "patch"},
},
{
APIGroups: []string{"cluster.k8s.io"},
Resources: []string{
"clusters",
"clusters/status",
"machinedeployments",
"machinedeployments/status",
"machines",
"machinesets",
"machinesets/status",
},
Verbs: []string{"*"},
},
},
}
}
func machineControllerClusterRoleBinding() *rbacv1.ClusterRoleBinding {
return &rbacv1.ClusterRoleBinding{
TypeMeta: metav1.TypeMeta{
APIVersion: "rbac.authorization.k8s.io/v1",
Kind: "ClusterRoleBinding",
},
ObjectMeta: metav1.ObjectMeta{
Name: "machine-controller",
Labels: map[string]string{
MachineControllerAppLabelKey: MachineControllerAppLabelValue,
},
},
RoleRef: rbacv1.RoleRef{
APIGroup: rbacv1.GroupName,
Name: "machine-controller",
Kind: "ClusterRole",
},
Subjects: []rbacv1.Subject{
{
Kind: "ServiceAccount",
Name: "machine-controller",
Namespace: MachineControllerNamespace,
},
},
}
}
func nodeBootstrapperClusterRoleBinding() *rbacv1.ClusterRoleBinding {
return &rbacv1.ClusterRoleBinding{
TypeMeta: metav1.TypeMeta{
APIVersion: "rbac.authorization.k8s.io/v1",
Kind: "ClusterRoleBinding",
},
ObjectMeta: metav1.ObjectMeta{
Name: "machine-controller:kubelet-bootstrap",
Labels: map[string]string{
MachineControllerAppLabelKey: MachineControllerAppLabelValue,
},
},
RoleRef: rbacv1.RoleRef{
APIGroup: rbacv1.GroupName,
Name: "system:node-bootstrapper",
Kind: "ClusterRole",
},
Subjects: []rbacv1.Subject{
{
APIGroup: rbacv1.GroupName,
Kind: "Group",
Name: "system:bootstrappers:machine-controller:default-node-token",
},
},
}
}
func nodeSignerClusterRoleBinding() *rbacv1.ClusterRoleBinding {
return &rbacv1.ClusterRoleBinding{
TypeMeta: metav1.TypeMeta{
APIVersion: "rbac.authorization.k8s.io/v1",
Kind: "ClusterRoleBinding",
},
ObjectMeta: metav1.ObjectMeta{
Name: "machine-controller:node-signer",
Labels: map[string]string{
MachineControllerAppLabelKey: MachineControllerAppLabelValue,
},
},
RoleRef: rbacv1.RoleRef{
Name: "system:certificates.k8s.io:certificatesigningrequests:nodeclient",
Kind: "ClusterRole",
APIGroup: rbacv1.GroupName,
},
Subjects: []rbacv1.Subject{
{
Kind: "Group",
Name: "system:bootstrappers:machine-controller:default-node-token",
APIGroup: rbacv1.GroupName,
},
},
}
}
func machineControllerKubeSystemRole() *rbacv1.Role {
return &rbacv1.Role{
TypeMeta: metav1.TypeMeta{
APIVersion: "rbac.authorization.k8s.io/v1",
Kind: "Role",
},
ObjectMeta: metav1.ObjectMeta{
Name: "machine-controller",
Namespace: MachineControllerNamespace,
Labels: map[string]string{
MachineControllerAppLabelKey: MachineControllerAppLabelValue,
},
},
Rules: []rbacv1.PolicyRule{
{
APIGroups: []string{""},
Resources: []string{"secrets"},
Verbs: []string{
"create",
"list",
"update",
"watch",
},
},
{
APIGroups: []string{""},
Resources: []string{"endpoints"},
ResourceNames: []string{"machine-controller"},
Verbs: []string{"*"},
},
{
APIGroups: []string{""},
Resources: []string{"endpoints"},
Verbs: []string{"create"},
},
},
}
}
func machineControllerKubePublicRole() *rbacv1.Role {
return &rbacv1.Role{
TypeMeta: metav1.TypeMeta{
APIVersion: "rbac.authorization.k8s.io/v1",
Kind: "Role",
},
ObjectMeta: metav1.ObjectMeta{
Name: "machine-controller",
Namespace: metav1.NamespacePublic,
Labels: map[string]string{
MachineControllerAppLabelKey: MachineControllerAppLabelValue,
},
},
Rules: []rbacv1.PolicyRule{
{
APIGroups: []string{""},
Resources: []string{"configmaps"},
Verbs: []string{
"get",
"list",
"watch",
},
},
},
}
}
func machineControllerEndpointReaderRole() *rbacv1.Role |
func machineControllerClusterInfoReaderRole() *rbacv1.Role {
return &rbacv1.Role{
TypeMeta: metav1.TypeMeta{
APIVersion: "rbac.authorization.k8s.io/v1",
Kind: "Role",
},
ObjectMeta: metav1.ObjectMeta{
Name: "cluster-info",
Namespace: metav1.NamespacePublic,
Labels: map[string]string{
MachineControllerAppLabelKey: MachineControllerAppLabelValue,
},
},
Rules: []rbacv1.PolicyRule{
{
APIGroups: []string{""},
ResourceNames: []string{"cluster-info"},
Resources: []string{"configmaps"},
Verbs: []string{"get"},
},
},
}
}
func machineControllerKubeSystemRoleBinding() *rbacv1.RoleBinding {
return &rbacv1.RoleBinding{
TypeMeta: metav1.TypeMeta{
APIVersion: "rbac.authorization.k8s.io/v1",
Kind: "RoleBinding",
},
ObjectMeta: metav1.ObjectMeta{
Name: "machine-controller",
Namespace: MachineControllerNamespace,
Labels: map[string]string{
MachineControllerAppLabelKey: MachineControllerAppLabelValue,
},
},
RoleRef: rbacv1.RoleRef{
Name: "machine-controller",
Kind: "Role",
APIGroup: rbacv1.GroupName,
},
Subjects: []rbacv1.Subject{
{
Kind: "ServiceAccount",
Name: "machine-controller",
Namespace: MachineControllerNamespace,
},
},
}
}
func machineControllerKubePublicRoleBinding() *rbacv1.RoleBinding {
return &rbacv1.RoleBinding{
TypeMeta: metav1.TypeMeta{
APIVersion: "rbac.authorization.k8s.io/v1",
Kind: "RoleBinding",
},
ObjectMeta: metav1.ObjectMeta{
Name: "machine-controller",
Namespace: metav1.NamespacePublic,
Labels: map[string]string{
MachineControllerAppLabelKey: MachineControllerAppLabelValue,
},
},
RoleRef: rbacv1.RoleRef{
Name: "machine-controller",
Kind: "Role",
APIGroup: rbacv1.GroupName,
},
Subjects: []rbacv1.Subject{
{
Kind: "ServiceAccount",
Name: "machine-controller",
Namespace: MachineControllerNamespace,
},
},
}
}
func machineControllerDefaultRoleBinding() *rbacv1.RoleBinding {
return &rbacv1.RoleBinding{
TypeMeta: metav1.TypeMeta{
APIVersion: "rbac.authorization.k8s.io/v1",
Kind: "RoleBinding",
},
ObjectMeta: metav1.ObjectMeta{
Name: "machine-controller",
Namespace: metav1.NamespaceDefault,
Labels: map[string]string{
MachineControllerAppLabelKey: MachineControllerAppLabelValue,
},
},
RoleRef: rbacv1.RoleRef{
Name: "machine-controller",
Kind: "Role",
APIGroup: rbacv1.GroupName,
},
Subjects: []rbacv1.Subject{
{
Kind: "ServiceAccount",
Name: "machine-controller",
Namespace: MachineControllerNamespace,
},
},
}
}
func machineControllerClusterInfoRoleBinding() *rbacv1.RoleBinding {
return &rbacv1.RoleBinding{
TypeMeta: metav1.TypeMeta{
APIVersion: "rbac.authorization.k8s.io/v1",
Kind: "RoleBinding",
},
ObjectMeta: metav1.ObjectMeta{
Name: "cluster-info",
Namespace: metav1.NamespacePublic,
Labels: map[string]string{
MachineControllerAppLabelKey: MachineControllerAppLabelValue,
},
},
RoleRef: rbacv1.RoleRef{
Name: "cluster-info",
Kind: "Role",
APIGroup: rbacv1.GroupName,
},
Subjects: []rbacv1.Subject{
{
Kind: "ServiceAccount",
Name: "machine-controller",
Namespace: MachineControllerNamespace,
},
},
}
}
// NB: CRDs are defined as YAML literals because the Go structures
// from k8s.io would always create a "status" field, which breaks the
// validation and prevents them from being applied to the cluster.
func machineControllerMachineCRD() *apiextensions.CustomResourceDefinition {
return &apiextensions.CustomResourceDefinition{
TypeMeta: metav1.TypeMeta{
APIVersion: "apiextensions.k8s.io/v1beta1",
Kind: "CustomResourceDefinition",
},
ObjectMeta: metav1.ObjectMeta{
Name: "machines.cluster.k8s.io",
},
Spec: apiextensions.CustomResourceDefinitionSpec{
Group: "cluster.k8s.io",
Scope: apiextensions.NamespaceScoped,
Versions: []apiextensions.CustomResourceDefinitionVersion{
{
Name: "v1alpha1",
Served: true,
Storage: true,
},
},
Names: apiextensions.CustomResourceDefinitionNames{
Plural: "machines",
Singular: "machine",
Kind: "Machine",
ListKind: "MachineList",
},
AdditionalPrinterColumns: []apiextensions.CustomResourceColumnDefinition{
{
Name: "Provider",
Type: "string",
JSONPath: ".spec.providerSpec.value.cloudProvider",
},
{
Name: "OS",
Type: "string",
JSONPath: ".spec.providerSpec.value.operatingSystem",
},
{
Name: "Address",
Type: "string",
JSONPath: ".status.addresses[0].address",
},
{
Name: "Age",
Type: "date",
JSONPath: ".metadata.creationTimestamp",
},
},
},
}
}
func machineControllerClusterCRD() *apiextensions.CustomResourceDefinition {
return &apiextensions.CustomResourceDefinition{
TypeMeta: metav1.TypeMeta{
APIVersion: "apiextensions.k8s.io/v1beta1",
Kind: "CustomResourceDefinition",
},
ObjectMeta: metav1.ObjectMeta{
Name: "clusters.cluster.k8s.io",
},
Spec: apiextensions.CustomResourceDefinitionSpec{
Group: "cluster.k8s.io",
Scope: apiextensions.NamespaceScoped,
Versions: []apiextensions.CustomResourceDefinitionVersion{
{
Name: "v1alpha1",
Served: true,
Storage: true,
},
},
Names: apiextensions.CustomResourceDefinitionNames{
Plural: "clusters",
Singular: "cluster",
Kind: "Cluster",
ListKind: "ClusterList",
},
Subresources: &apiextensions.CustomResourceSubresources{
Status: &apiextensions.CustomResourceSubresourceStatus{},
},
},
}
}
func machineControllerMachineSetCRD() *apiextensions.CustomResourceDefinition {
return &apiextensions.CustomResourceDefinition{
TypeMeta: metav1.TypeMeta{
APIVersion: "apiextensions.k8s.io/v1beta1",
Kind: "CustomResourceDefinition",
},
ObjectMeta: metav1.ObjectMeta{
Name: "machinesets.cluster.k8s.io",
},
Spec: apiextensions.CustomResourceDefinitionSpec{
Group: "cluster.k8s.io",
Scope: apiextensions.NamespaceScoped,
Versions: []apiextensions.CustomResourceDefinitionVersion{
{
Name: "v1alpha1",
Served: true,
Storage: true,
},
},
Names: apiextensions.CustomResourceDefinitionNames{
Plural: "machinesets",
Singular: "machineset",
Kind: "MachineSet",
ListKind: "MachineSetList",
},
Subresources: &apiextensions.CustomResourceSubresources{
Status: &apiextensions.CustomResourceSubresourceStatus{},
},
AdditionalPrinterColumns: []apiextensions.CustomResourceColumnDefinition{
{
Name: "Replicas",
Type: "integer",
JSONPath: ".spec.replicas",
},
{
Name: "Provider",
Type: "string",
JSONPath: ".spec.template.spec.providerSpec.value.cloudProvider",
},
{
Name: "OS",
Type: "string",
JSONPath: ".spec.template.spec.providerSpec.value.operatingSystem",
},
{
Name: "Age",
Type: "date",
JSONPath: ".metadata.creationTimestamp",
},
},
},
}
}
func machineControllerMachineDeploymentCRD() *apiextensions.CustomResourceDefinition {
return &apiextensions.CustomResourceDefinition{
TypeMeta: metav1.TypeMeta{
APIVersion: "apiextensions.k8s.io/v1beta1",
Kind: "CustomResourceDefinition",
},
ObjectMeta: metav1.ObjectMeta{
Name: "machinedeployments.cluster.k8s.io",
},
Spec: apiextensions.CustomResourceDefinitionSpec{
Group: "cluster.k8s.io",
Scope: apiextensions.NamespaceScoped,
Versions: []apiextensions.CustomResourceDefinitionVersion{
{
Name: "v1alpha1",
Served: true,
Storage: true,
},
},
Names: apiextensions.CustomResourceDefinitionNames{
Plural: "machinedeployments",
Singular: "machinedeployment",
Kind: "MachineDeployment",
ListKind: "MachineDeploymentList",
},
Subresources: &apiextensions.CustomResourceSubresources{
Status: &apiextensions.CustomResourceSubresourceStatus{},
Scale: &apiextensions.CustomResourceSubresourceScale{
SpecReplicasPath: ".spec.replicas",
StatusReplicasPath: ".status.replicas",
},
},
AdditionalPrinterColumns: []apiextensions.CustomResourceColumnDefinition{
{
Name: "Replicas",
Type: "integer",
JSONPath: ".spec.replicas",
},
{
Name: "Provider",
Type: "string",
JSONPath: ".spec.template.spec.providerSpec.value.cloudProvider",
},
{
Name: "OS",
Type: "string",
JSONPath: ".spec.template.spec.providerSpec.value.operatingSystem",
},
{
Name: "Age",
Type: "date",
JSONPath: ".metadata.creationTimestamp",
},
},
},
}
}
func machineControllerDeployment(cluster *kubeoneapi.KubeOneCluster) (*appsv1.Deployment, error) {
var replicas int32 = 1
clusterDNS, err := clusterDNSIP(cluster)
if err != nil {
return nil, errors.Wrap(err, "failed to get clusterDNS IP")
}
args := []string{
"-logtostderr",
"-v", "4",
"-internal-listen-address", "0.0.0.0:8085",
"-cluster-dns", clusterDNS.String(),
}
if cluster.CloudProvider.External {
args = append(args, "-external-cloud-provider")
}
return &appsv1.Deployment{
TypeMeta: metav1.TypeMeta{
APIVersion: "apps/v1",
Kind: "Deployment",
},
ObjectMeta: metav1.ObjectMeta{
Name: "machine-controller",
Namespace: MachineControllerNamespace,
Labels: map[string]string{
MachineControllerAppLabelKey: MachineControllerAppLabelValue,
},
},
Spec: appsv1.DeploymentSpec{
Replicas: &replicas,
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{
MachineControllerAppLabelKey: MachineControllerAppLabelValue,
},
},
Strategy: appsv1.DeploymentStrategy{
Type: appsv1.RollingUpdateDeploymentStrategyType,
RollingUpdate: &appsv1.RollingUpdateDeployment{
MaxSurge: &intstr.IntOrString{
Type: intstr.Int,
IntVal: 1,
},
MaxUnavailable: &intstr.IntOrString{
Type: intstr.Int,
IntVal: 0,
},
},
},
Template: corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Annotations: map[string]string{
"prometheus.io/scrape": "true",
"prometheus.io/path": "/metrics",
"prometheus.io/port": "8085",
},
Labels: map[string]string{
MachineControllerAppLabelKey: MachineControllerAppLabelValue,
},
},
Spec: corev1.PodSpec{
ServiceAccountName: "machine-controller",
Tolerations: []corev1.Toleration{
{
Key: "node-role.kubernetes.io/master",
Operator: corev1.TolerationOpExists,
Effect: corev1.TaintEffectNoSchedule,
},
{
Key: "node.cloudprovider.kubernetes.io/uninitialized",
Value: "true",
Effect: corev1.TaintEffectNoSchedule,
},
{
Key: "CriticalAddonsOnly",
Operator: corev1.TolerationOpExists,
},
},
Containers: []corev1.Container{
{
Name: "machine-controller",
Image: "docker.io/kubermatic/machine-controller:" + MachineControllerTag,
ImagePullPolicy: corev1.PullIfNotPresent,
Command: []string{"/usr/local/bin/machine-controller"},
Args: args,
Env: getEnvVarCredentials(cluster),
TerminationMessagePath: corev1.TerminationMessagePathDefault,
TerminationMessagePolicy: corev1.TerminationMessageReadFile,
ReadinessProbe: &corev1.Probe{
Handler: corev1.Handler{
HTTPGet: &corev1.HTTPGetAction{
Path: "/ready",
Port: intstr.FromInt(8085),
},
},
FailureThreshold: 3,
PeriodSeconds: 10,
SuccessThreshold: 1,
TimeoutSeconds: 15,
},
LivenessProbe: &corev1.Probe{
FailureThreshold: 8,
Handler: corev1.Handler{
HTTPGet: &corev1.HTTPGetAction{
Path: "/live",
Port: intstr.FromInt(8085),
},
},
InitialDelaySeconds: 15,
PeriodSeconds: 10,
SuccessThreshold: 1,
TimeoutSeconds: 15,
},
},
},
},
},
},
}, nil
}
func getEnvVarCredentials(cluster *kubeoneapi.KubeOneCluster) []corev1.EnvVar {
env := make([]corev1.EnvVar, 0)
for k := range cluster.Credentials {
env = append(env, corev1.EnvVar{
Name: k,
ValueFrom: &corev1.EnvVarSource{
SecretKeyRef: &corev1.SecretKeySelector{
LocalObjectReference: corev1.LocalObjectReference{
Name: credentials.SecretName,
},
Key: k,
},
},
})
}
return env
}
// clusterDNSIP returns the IP address of ClusterDNS Service,
// which is 10th IP of the Services CIDR.
func clusterDNSIP(cluster *kubeoneapi.KubeOneCluster) (*net.IP, error) {
// Get the Services CIDR
_, svcSubnetCIDR, err := net.ParseCIDR(cluster.ClusterNetwork.ServiceSubnet)
if err != nil {
return nil, errors.Wrap(err, "failed to parse network.service_subnet")
}
// Select the 10th IP in Services CIDR range as ClusterDNSIP
clusterDNS, err := ipallocator.GetIndexedIP(svcSubnetCIDR, 10)
if err != nil {
return nil, errors.Wrap(err, "failed to get IP from service subnet")
}
return &clusterDNS, nil
}
| {
return &rbacv1.Role{
TypeMeta: metav1.TypeMeta{
APIVersion: "rbac.authorization.k8s.io/v1",
Kind: "Role",
},
ObjectMeta: metav1.ObjectMeta{
Name: "machine-controller",
Namespace: metav1.NamespaceDefault,
Labels: map[string]string{
MachineControllerAppLabelKey: MachineControllerAppLabelValue,
},
},
Rules: []rbacv1.PolicyRule{
{
APIGroups: []string{""},
Resources: []string{"endpoints"},
Verbs: []string{
"get",
"list",
"watch",
},
},
},
}
} |
codepipeline_job.rs | use custom_serde::*;
/// `CodePipelineEvent` contains data from an event sent from AWS Codepipeline
#[derive(Debug, Clone, PartialEq, Deserialize, Serialize)]
pub struct CodePipelineEvent {
#[serde(rename = "CodePipeline.job")]
pub code_pipeline_job: CodePipelineJob,
}
/// `CodePipelineJob` represents a job from an AWS CodePipeline event
#[derive(Debug, Clone, PartialEq, Deserialize, Serialize)]
pub struct CodePipelineJob {
#[cfg(feature = "string-null-none")]
#[serde(deserialize_with = "deserialize_lambda_string")]
#[serde(default)]
pub id: Option<String>,
#[cfg(feature = "string-null-empty")]
#[serde(deserialize_with = "deserialize_lambda_string")]
#[serde(default)]
pub id: String,
#[cfg(feature = "string-null-none")]
#[serde(deserialize_with = "deserialize_lambda_string")]
#[serde(default)]
#[serde(rename = "accountId")]
pub account_id: Option<String>,
#[cfg(feature = "string-null-empty")]
#[serde(deserialize_with = "deserialize_lambda_string")]
#[serde(default)]
#[serde(rename = "accountId")]
pub account_id: String,
pub data: CodePipelineData,
}
/// `CodePipelineData` represents a job from an AWS CodePipeline event
#[derive(Debug, Clone, PartialEq, Deserialize, Serialize)]
pub struct CodePipelineData {
#[serde(rename = "actionConfiguration")]
pub action_configuration: CodePipelineActionConfiguration,
#[serde(rename = "inputArtifacts")]
pub input_artifacts: Vec<CodePipelineInputArtifact>,
#[serde(rename = "outputArtifacts")]
pub out_put_artifacts: Vec<CodePipelineOutputArtifact>,
#[serde(rename = "artifactCredentials")]
pub artifact_credentials: CodePipelineArtifactCredentials,
#[cfg(feature = "string-null-none")]
#[serde(deserialize_with = "deserialize_lambda_string")]
#[serde(default)]
#[serde(rename = "continuationToken")]
pub continuation_token: Option<String>,
#[cfg(feature = "string-null-empty")]
#[serde(deserialize_with = "deserialize_lambda_string")]
#[serde(default)]
#[serde(rename = "continuationToken")]
pub continuation_token: String,
}
/// `CodePipelineActionConfiguration` represents an Action Configuration
#[derive(Debug, Clone, PartialEq, Deserialize, Serialize)]
pub struct CodePipelineActionConfiguration {
pub configuration: CodePipelineConfiguration,
}
/// `CodePipelineConfiguration` represents a configuration for an Action Configuration
#[derive(Debug, Clone, PartialEq, Deserialize, Serialize)]
pub struct CodePipelineConfiguration {
#[cfg(feature = "string-null-none")]
#[serde(deserialize_with = "deserialize_lambda_string")]
#[serde(default)]
#[serde(rename = "FunctionName")]
pub function_name: Option<String>,
#[cfg(feature = "string-null-empty")]
#[serde(deserialize_with = "deserialize_lambda_string")]
#[serde(default)]
#[serde(rename = "FunctionName")]
pub function_name: String,
#[cfg(feature = "string-null-none")]
#[serde(deserialize_with = "deserialize_lambda_string")]
#[serde(default)]
#[serde(rename = "UserParameters")]
pub user_parameters: Option<String>,
#[cfg(feature = "string-null-empty")]
#[serde(deserialize_with = "deserialize_lambda_string")]
#[serde(default)]
#[serde(rename = "UserParameters")]
pub user_parameters: String,
}
/// `CodePipelineInputArtifact` represents an input artifact
#[derive(Debug, Clone, PartialEq, Deserialize, Serialize)]
pub struct CodePipelineInputArtifact {
pub location: CodePipelineInputLocation,
pub revision: Option<String>,
#[cfg(feature = "string-null-none")]
#[serde(deserialize_with = "deserialize_lambda_string")]
#[serde(default)]
pub name: Option<String>,
#[cfg(feature = "string-null-empty")]
#[serde(deserialize_with = "deserialize_lambda_string")]
#[serde(default)]
pub name: String,
}
/// `CodePipelineInputLocation` represents a input location
#[derive(Debug, Clone, PartialEq, Deserialize, Serialize)]
pub struct CodePipelineInputLocation {
#[serde(rename = "s3Location")]
pub s3_location: CodePipelineS3Location,
#[cfg(feature = "string-null-none")]
#[serde(deserialize_with = "deserialize_lambda_string")]
#[serde(default)]
#[serde(rename = "type")]
pub location_type: Option<String>,
#[cfg(feature = "string-null-empty")]
#[serde(deserialize_with = "deserialize_lambda_string")]
#[serde(default)]
#[serde(rename = "type")]
pub location_type: String,
}
/// `CodePipelineS3Location` represents an s3 input location
#[derive(Debug, Clone, PartialEq, Deserialize, Serialize)]
pub struct | {
#[cfg(feature = "string-null-none")]
#[serde(deserialize_with = "deserialize_lambda_string")]
#[serde(default)]
#[serde(rename = "bucketName")]
pub bucket_name: Option<String>,
#[cfg(feature = "string-null-empty")]
#[serde(deserialize_with = "deserialize_lambda_string")]
#[serde(default)]
#[serde(rename = "bucketName")]
pub bucket_name: String,
#[cfg(feature = "string-null-none")]
#[serde(deserialize_with = "deserialize_lambda_string")]
#[serde(default)]
#[serde(rename = "objectKey")]
pub object_key: Option<String>,
#[cfg(feature = "string-null-empty")]
#[serde(deserialize_with = "deserialize_lambda_string")]
#[serde(default)]
#[serde(rename = "objectKey")]
pub object_key: String,
}
/// `CodePipelineOutputArtifact` represents an output artifact
#[derive(Debug, Clone, PartialEq, Deserialize, Serialize)]
pub struct CodePipelineOutputArtifact {
pub location: CodePipelineInputLocation,
pub revision: Option<String>,
#[cfg(feature = "string-null-none")]
#[serde(deserialize_with = "deserialize_lambda_string")]
#[serde(default)]
pub name: Option<String>,
#[cfg(feature = "string-null-empty")]
#[serde(deserialize_with = "deserialize_lambda_string")]
#[serde(default)]
pub name: String,
}
/// `CodePipelineOutputLocation` represents a output location
#[derive(Debug, Clone, PartialEq, Deserialize, Serialize)]
pub struct CodePipelineOutputLocation {
#[serde(rename = "s3Location")]
pub s3_location: CodePipelineS3Location,
#[cfg(feature = "string-null-none")]
#[serde(deserialize_with = "deserialize_lambda_string")]
#[serde(default)]
#[serde(rename = "type")]
pub location_type: Option<String>,
#[cfg(feature = "string-null-empty")]
#[serde(deserialize_with = "deserialize_lambda_string")]
#[serde(default)]
#[serde(rename = "type")]
pub location_type: String,
}
/// `CodePipelineArtifactCredentials` represents CodePipeline artifact credentials
#[derive(Debug, Clone, PartialEq, Deserialize, Serialize)]
pub struct CodePipelineArtifactCredentials {
#[cfg(feature = "string-null-none")]
#[serde(deserialize_with = "deserialize_lambda_string")]
#[serde(default)]
#[serde(rename = "secretAccessKey")]
pub secret_access_key: Option<String>,
#[cfg(feature = "string-null-empty")]
#[serde(deserialize_with = "deserialize_lambda_string")]
#[serde(default)]
#[serde(rename = "secretAccessKey")]
pub secret_access_key: String,
#[cfg(feature = "string-null-none")]
#[serde(deserialize_with = "deserialize_lambda_string")]
#[serde(default)]
#[serde(rename = "sessionToken")]
pub session_token: Option<String>,
#[cfg(feature = "string-null-empty")]
#[serde(deserialize_with = "deserialize_lambda_string")]
#[serde(default)]
#[serde(rename = "sessionToken")]
pub session_token: String,
#[cfg(feature = "string-null-none")]
#[serde(deserialize_with = "deserialize_lambda_string")]
#[serde(default)]
#[serde(rename = "accessKeyId")]
pub access_key_id: Option<String>,
#[cfg(feature = "string-null-empty")]
#[serde(deserialize_with = "deserialize_lambda_string")]
#[serde(default)]
#[serde(rename = "accessKeyId")]
pub access_key_id: String,
}
#[cfg(test)]
mod test {
use super::*;
extern crate serde_json;
#[test]
fn example_event() {
let data = include_bytes!("fixtures/example-codepipeline_job-event.json");
let parsed: CodePipelineEvent = serde_json::from_slice(data).unwrap();
let output: String = serde_json::to_string(&parsed).unwrap();
let reparsed: CodePipelineEvent = serde_json::from_slice(output.as_bytes()).unwrap();
assert_eq!(parsed, reparsed);
}
}
| CodePipelineS3Location |
call_site_value.rs | use either::Either;
use llvm_sys::LLVMTypeKind;
use llvm_sys::core::{LLVMIsTailCall, LLVMSetTailCall, LLVMGetTypeKind, LLVMTypeOf, LLVMSetInstructionCallConv, LLVMGetInstructionCallConv, LLVMSetInstrParamAlignment};
use llvm_sys::prelude::LLVMValueRef;
#[llvm_versions(3.9..=latest)]
use crate::attributes::{Attribute};
use crate::attributes::AttributeLoc;
use crate::support::LLVMString;
use crate::values::{AsValueRef, BasicValueEnum, InstructionValue, Value};
#[llvm_versions(3.9..=latest)]
use crate::values::FunctionValue;
/// A value resulting from a function call. It may have function attributes applied to it.
///
/// This struct may be removed in the future in favor of an `InstructionValue<CallSite>` type.
#[derive(Debug, PartialEq, Clone, Copy, Hash)]
pub struct CallSiteValue<'ctx>(Value<'ctx>);
impl<'ctx> CallSiteValue<'ctx> {
pub(crate) fn new(value: LLVMValueRef) -> Self {
CallSiteValue(Value::new(value))
}
/// Sets whether or not this call is a tail call.
///
/// # Example
///
/// ```no_run
/// use inkwell::context::Context;
///
/// let context = Context::create();
/// let builder = context.create_builder();
/// let module = context.create_module("my_mod");
/// let void_type = context.void_type();
/// let fn_type = void_type.fn_type(&[], false);
/// let fn_value = module.add_function("my_fn", fn_type, None);
/// let entry_bb = context.append_basic_block(fn_value, "entry");
///
/// builder.position_at_end(entry_bb);
///
/// let call_site_value = builder.build_call(fn_value, &[], "my_fn");
///
/// call_site_value.set_tail_call(true);
/// ```
pub fn set_tail_call(self, tail_call: bool) {
unsafe {
LLVMSetTailCall(self.as_value_ref(), tail_call as i32)
}
}
/// Determines whether or not this call is a tail call.
///
/// # Example
///
/// ```no_run
/// use inkwell::context::Context;
///
/// let context = Context::create();
/// let builder = context.create_builder();
/// let module = context.create_module("my_mod");
/// let void_type = context.void_type();
/// let fn_type = void_type.fn_type(&[], false);
/// let fn_value = module.add_function("my_fn", fn_type, None);
/// let entry_bb = context.append_basic_block(fn_value, "entry");
///
/// builder.position_at_end(entry_bb);
///
/// let call_site_value = builder.build_call(fn_value, &[], "my_fn");
///
/// call_site_value.set_tail_call(true);
///
/// assert!(call_site_value.is_tail_call());
/// ```
pub fn is_tail_call(self) -> bool {
unsafe {
LLVMIsTailCall(self.as_value_ref()) == 1
}
}
/// Try to convert this `CallSiteValue` to a `BasicValueEnum` if not a void return type.
///
/// # Example
///
/// ```no_run
/// use inkwell::context::Context;
///
/// let context = Context::create();
/// let builder = context.create_builder();
/// let module = context.create_module("my_mod");
/// let void_type = context.void_type();
/// let fn_type = void_type.fn_type(&[], false);
/// let fn_value = module.add_function("my_fn", fn_type, None);
/// let entry_bb = context.append_basic_block(fn_value, "entry");
///
/// builder.position_at_end(entry_bb);
///
/// let call_site_value = builder.build_call(fn_value, &[], "my_fn");
///
/// assert!(call_site_value.try_as_basic_value().is_right());
/// ```
pub fn try_as_basic_value(self) -> Either<BasicValueEnum<'ctx>, InstructionValue<'ctx>> {
unsafe {
match LLVMGetTypeKind(LLVMTypeOf(self.as_value_ref())) {
LLVMTypeKind::LLVMVoidTypeKind => Either::Right(InstructionValue::new(self.as_value_ref())),
_ => Either::Left(BasicValueEnum::new(self.as_value_ref())),
}
}
}
/// Adds an `Attribute` to this `CallSiteValue`.
///
/// # Example
///
/// ```no_run
/// use inkwell::attributes::AttributeLoc;
/// use inkwell::context::Context;
///
/// let context = Context::create();
/// let builder = context.create_builder();
/// let module = context.create_module("my_mod");
/// let void_type = context.void_type();
/// let fn_type = void_type.fn_type(&[], false);
/// let fn_value = module.add_function("my_fn", fn_type, None);
/// let string_attribute = context.create_string_attribute("my_key", "my_val");
/// let enum_attribute = context.create_enum_attribute(1, 1);
/// let entry_bb = context.append_basic_block(fn_value, "entry");
///
/// builder.position_at_end(entry_bb);
///
/// let call_site_value = builder.build_call(fn_value, &[], "my_fn");
///
/// call_site_value.add_attribute(AttributeLoc::Return, string_attribute);
/// call_site_value.add_attribute(AttributeLoc::Return, enum_attribute);
/// ```
#[llvm_versions(3.9..=latest)]
pub fn add_attribute(self, loc: AttributeLoc, attribute: Attribute) {
use llvm_sys::core::LLVMAddCallSiteAttribute;
unsafe {
LLVMAddCallSiteAttribute(self.as_value_ref(), loc.get_index(), attribute.attribute)
}
}
/// Gets the `FunctionValue` this `CallSiteValue` is based on.
///
/// # Example
///
/// ```no_run
/// use inkwell::context::Context;
///
/// let context = Context::create();
/// let builder = context.create_builder();
/// let module = context.create_module("my_mod");
/// let void_type = context.void_type();
/// let fn_type = void_type.fn_type(&[], false);
/// let fn_value = module.add_function("my_fn", fn_type, None);
/// let string_attribute = context.create_string_attribute("my_key", "my_val");
/// let enum_attribute = context.create_enum_attribute(1, 1);
/// let entry_bb = context.append_basic_block(fn_value, "entry");
///
/// builder.position_at_end(entry_bb);
///
/// let call_site_value = builder.build_call(fn_value, &[], "my_fn");
///
/// assert_eq!(call_site_value.get_called_fn_value(), fn_value);
/// ```
#[llvm_versions(3.9..=latest)]
pub fn get_called_fn_value(self) -> FunctionValue<'ctx> {
use llvm_sys::core::LLVMGetCalledValue;
let ptr = unsafe {
LLVMGetCalledValue(self.as_value_ref())
};
FunctionValue::new(ptr).expect("This should never be null?")
}
/// Counts the number of `Attribute`s on this `CallSiteValue` at an index.
///
/// # Example
///
/// ```no_run
/// use inkwell::attributes::AttributeLoc;
/// use inkwell::context::Context;
///
/// let context = Context::create();
/// let builder = context.create_builder();
/// let module = context.create_module("my_mod");
/// let void_type = context.void_type();
/// let fn_type = void_type.fn_type(&[], false);
/// let fn_value = module.add_function("my_fn", fn_type, None);
/// let string_attribute = context.create_string_attribute("my_key", "my_val");
/// let enum_attribute = context.create_enum_attribute(1, 1);
/// let entry_bb = context.append_basic_block(fn_value, "entry");
///
/// builder.position_at_end(entry_bb);
///
/// let call_site_value = builder.build_call(fn_value, &[], "my_fn");
///
/// call_site_value.add_attribute(AttributeLoc::Return, string_attribute);
/// call_site_value.add_attribute(AttributeLoc::Return, enum_attribute);
///
/// assert_eq!(call_site_value.count_attributes(AttributeLoc::Return), 2);
/// ```
#[llvm_versions(3.9..=latest)]
pub fn count_attributes(self, loc: AttributeLoc) -> u32 |
/// Gets an enum `Attribute` on this `CallSiteValue` at an index and kind id.
///
/// # Example
///
/// ```no_run
/// use inkwell::attributes::AttributeLoc;
/// use inkwell::context::Context;
///
/// let context = Context::create();
/// let builder = context.create_builder();
/// let module = context.create_module("my_mod");
/// let void_type = context.void_type();
/// let fn_type = void_type.fn_type(&[], false);
/// let fn_value = module.add_function("my_fn", fn_type, None);
/// let string_attribute = context.create_string_attribute("my_key", "my_val");
/// let enum_attribute = context.create_enum_attribute(1, 1);
/// let entry_bb = context.append_basic_block(fn_value, "entry");
///
/// builder.position_at_end(entry_bb);
///
/// let call_site_value = builder.build_call(fn_value, &[], "my_fn");
///
/// call_site_value.add_attribute(AttributeLoc::Return, string_attribute);
/// call_site_value.add_attribute(AttributeLoc::Return, enum_attribute);
///
/// assert_eq!(call_site_value.get_enum_attribute(AttributeLoc::Return, 1).unwrap(), enum_attribute);
/// ```
// SubTypes: -> Attribute<Enum>
#[llvm_versions(3.9..=latest)]
pub fn get_enum_attribute(self, loc: AttributeLoc, kind_id: u32) -> Option<Attribute> {
use llvm_sys::core::LLVMGetCallSiteEnumAttribute;
let ptr = unsafe {
LLVMGetCallSiteEnumAttribute(self.as_value_ref(), loc.get_index(), kind_id)
};
if ptr.is_null() {
return None;
}
Some(Attribute::new(ptr))
}
/// Gets a string `Attribute` on this `CallSiteValue` at an index and key.
///
/// # Example
///
/// ```no_run
/// use inkwell::attributes::AttributeLoc;
/// use inkwell::context::Context;
///
/// let context = Context::create();
/// let builder = context.create_builder();
/// let module = context.create_module("my_mod");
/// let void_type = context.void_type();
/// let fn_type = void_type.fn_type(&[], false);
/// let fn_value = module.add_function("my_fn", fn_type, None);
/// let string_attribute = context.create_string_attribute("my_key", "my_val");
/// let enum_attribute = context.create_enum_attribute(1, 1);
/// let entry_bb = context.append_basic_block(fn_value, "entry");
///
/// builder.position_at_end(entry_bb);
///
/// let call_site_value = builder.build_call(fn_value, &[], "my_fn");
///
/// call_site_value.add_attribute(AttributeLoc::Return, string_attribute);
/// call_site_value.add_attribute(AttributeLoc::Return, enum_attribute);
///
/// assert_eq!(call_site_value.get_string_attribute(AttributeLoc::Return, "my_key").unwrap(), string_attribute);
/// ```
// SubTypes: -> Attribute<String>
#[llvm_versions(3.9..=latest)]
pub fn get_string_attribute(self, loc: AttributeLoc, key: &str) -> Option<Attribute> {
use llvm_sys::core::LLVMGetCallSiteStringAttribute;
let ptr = unsafe {
LLVMGetCallSiteStringAttribute(self.as_value_ref(), loc.get_index(), key.as_ptr() as *const ::libc::c_char, key.len() as u32)
};
if ptr.is_null() {
return None;
}
Some(Attribute::new(ptr))
}
/// Removes an enum `Attribute` on this `CallSiteValue` at an index and kind id.
///
/// # Example
///
/// ```no_run
/// use inkwell::attributes::AttributeLoc;
/// use inkwell::context::Context;
///
/// let context = Context::create();
/// let builder = context.create_builder();
/// let module = context.create_module("my_mod");
/// let void_type = context.void_type();
/// let fn_type = void_type.fn_type(&[], false);
/// let fn_value = module.add_function("my_fn", fn_type, None);
/// let string_attribute = context.create_string_attribute("my_key", "my_val");
/// let enum_attribute = context.create_enum_attribute(1, 1);
/// let entry_bb = context.append_basic_block(fn_value, "entry");
///
/// builder.position_at_end(entry_bb);
///
/// let call_site_value = builder.build_call(fn_value, &[], "my_fn");
///
/// call_site_value.add_attribute(AttributeLoc::Return, string_attribute);
/// call_site_value.add_attribute(AttributeLoc::Return, enum_attribute);
/// call_site_value.remove_enum_attribute(AttributeLoc::Return, 1);
///
/// assert_eq!(call_site_value.get_enum_attribute(AttributeLoc::Return, 1), None);
/// ```
#[llvm_versions(3.9..=latest)]
pub fn remove_enum_attribute(self, loc: AttributeLoc, kind_id: u32) {
use llvm_sys::core::LLVMRemoveCallSiteEnumAttribute;
unsafe {
LLVMRemoveCallSiteEnumAttribute(self.as_value_ref(), loc.get_index(), kind_id)
}
}
/// Removes a string `Attribute` on this `CallSiteValue` at an index and key.
///
/// # Example
///
/// ```no_run
/// use inkwell::attributes::AttributeLoc;
/// use inkwell::context::Context;
///
/// let context = Context::create();
/// let builder = context.create_builder();
/// let module = context.create_module("my_mod");
/// let void_type = context.void_type();
/// let fn_type = void_type.fn_type(&[], false);
/// let fn_value = module.add_function("my_fn", fn_type, None);
/// let string_attribute = context.create_string_attribute("my_key", "my_val");
/// let enum_attribute = context.create_enum_attribute(1, 1);
/// let entry_bb = context.append_basic_block(fn_value, "entry");
///
/// builder.position_at_end(entry_bb);
///
/// let call_site_value = builder.build_call(fn_value, &[], "my_fn");
///
/// call_site_value.add_attribute(AttributeLoc::Return, string_attribute);
/// call_site_value.add_attribute(AttributeLoc::Return, enum_attribute);
/// call_site_value.remove_string_attribute(AttributeLoc::Return, "my_key");
///
/// assert_eq!(call_site_value.get_string_attribute(AttributeLoc::Return, "my_key"), None);
/// ```
#[llvm_versions(3.9..=latest)]
pub fn remove_string_attribute(self, loc: AttributeLoc, key: &str) {
use llvm_sys::core::LLVMRemoveCallSiteStringAttribute;
unsafe {
LLVMRemoveCallSiteStringAttribute(self.as_value_ref(), loc.get_index(), key.as_ptr() as *const ::libc::c_char, key.len() as u32)
}
}
/// Counts the number of arguments this `CallSiteValue` was called with.
///
/// # Example
///
/// ```no_run
/// use inkwell::attributes::AttributeLoc;
/// use inkwell::context::Context;
///
/// let context = Context::create();
/// let builder = context.create_builder();
/// let module = context.create_module("my_mod");
/// let void_type = context.void_type();
/// let fn_type = void_type.fn_type(&[], false);
/// let fn_value = module.add_function("my_fn", fn_type, None);
/// let string_attribute = context.create_string_attribute("my_key", "my_val");
/// let enum_attribute = context.create_enum_attribute(1, 1);
/// let entry_bb = context.append_basic_block(fn_value, "entry");
///
/// builder.position_at_end(entry_bb);
///
/// let call_site_value = builder.build_call(fn_value, &[], "my_fn");
///
/// assert_eq!(call_site_value.count_arguments(), 0);
/// ```
#[llvm_versions(3.9..=latest)]
pub fn count_arguments(self) -> u32 {
use llvm_sys::core::LLVMGetNumArgOperands;
unsafe {
LLVMGetNumArgOperands(self.as_value_ref())
}
}
/// Gets the calling convention for this `CallSiteValue`.
///
/// # Example
///
/// ```no_run
/// use inkwell::context::Context;
///
/// let context = Context::create();
/// let builder = context.create_builder();
/// let module = context.create_module("my_mod");
/// let void_type = context.void_type();
/// let fn_type = void_type.fn_type(&[], false);
/// let fn_value = module.add_function("my_fn", fn_type, None);
/// let entry_bb = context.append_basic_block(fn_value, "entry");
///
/// builder.position_at_end(entry_bb);
///
/// let call_site_value = builder.build_call(fn_value, &[], "my_fn");
///
/// assert_eq!(call_site_value.get_call_convention(), 0);
/// ```
pub fn get_call_convention(self) -> u32 {
unsafe {
LLVMGetInstructionCallConv(self.as_value_ref())
}
}
/// Sets the calling convention for this `CallSiteValue`.
///
/// # Example
///
/// ```no_run
/// use inkwell::context::Context;
///
/// let context = Context::create();
/// let builder = context.create_builder();
/// let module = context.create_module("my_mod");
/// let void_type = context.void_type();
/// let fn_type = void_type.fn_type(&[], false);
/// let fn_value = module.add_function("my_fn", fn_type, None);
/// let entry_bb = context.append_basic_block(fn_value, "entry");
///
/// builder.position_at_end(entry_bb);
///
/// let call_site_value = builder.build_call(fn_value, &[], "my_fn");
///
/// call_site_value.set_call_convention(2);
///
/// assert_eq!(call_site_value.get_call_convention(), 2);
/// ```
pub fn set_call_convention(self, conv: u32) {
unsafe {
LLVMSetInstructionCallConv(self.as_value_ref(), conv)
}
}
/// Shortcut for setting the alignment `Attribute` for this `CallSiteValue`.
///
/// # Panics
///
/// When the alignment is not a power of 2.
///
/// # Example
///
/// ```no_run
/// use inkwell::attributes::AttributeLoc;
/// use inkwell::context::Context;
///
/// let context = Context::create();
/// let builder = context.create_builder();
/// let module = context.create_module("my_mod");
/// let void_type = context.void_type();
/// let fn_type = void_type.fn_type(&[], false);
/// let fn_value = module.add_function("my_fn", fn_type, None);
/// let entry_bb = context.append_basic_block(fn_value, "entry");
///
/// builder.position_at_end(entry_bb);
///
/// let call_site_value = builder.build_call(fn_value, &[], "my_fn");
///
/// call_site_value.set_alignment_attribute(AttributeLoc::Param(0), 2);
/// ```
pub fn set_alignment_attribute(self, loc: AttributeLoc, alignment: u32) {
assert_eq!(alignment.count_ones(), 1, "Alignment must be a power of two.");
unsafe {
LLVMSetInstrParamAlignment(self.as_value_ref(), loc.get_index(), alignment)
}
}
/// Prints the definition of a `CallSiteValue` to a `LLVMString`.
pub fn print_to_string(self) -> LLVMString {
self.0.print_to_string()
}
}
impl AsValueRef for CallSiteValue<'_> {
fn as_value_ref(&self) -> LLVMValueRef {
self.0.value
}
}
| {
use llvm_sys::core::LLVMGetCallSiteAttributeCount;
unsafe {
LLVMGetCallSiteAttributeCount(self.as_value_ref(), loc.get_index())
}
} |
print.go | // Copyright 2017-2021 Jeff Foley. All rights reserved.
// Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file.
package format
import (
"fmt"
"io"
"net"
"strconv"
"strings"
amassnet "github.com/OWASP/Amass/v3/net"
"github.com/OWASP/Amass/v3/requests"
"github.com/fatih/color"
)
// Banner is the ASCII art logo used within help output.
const Banner = `
.+++:. : .+++.
+W@@@@@@8 &+W@# o8W8: +W@@@@@@#. oW@@@W#+
&@#+ .o@##. .@@@[email protected]@@o :@@#&W8o .@#: .:oW+ .@#+++&#&
+@& &@& #@8 +@W@&8@+ :@W. +@8 +@: .@8
8@ @@ 8@o 8@8 WW .@W W@+ .@W. o@#:
WW &@o &@: o@+ o@+ #@. 8@o +W@#+. +W@8:
#@ :@W &@+ &@+ @8 :@o o@o oW@@W+ oW@8
o@+ @@& &@+ &@+ #@ &@. .W@W .+#@& o@W.
WW +@W@8. &@+ :& o@+ #@ :@W&@& &@: .. :@o
:@W: o@# +Wo &@+ :W: +@W&o++o@W. &@& 8@#o+&@W. #@: o@+
:W@@WWWW@@8 + :&W@@@@& &W .o#@@W&. :W@WWW@@&
+o&&&&+. +oooo.
`
const (
// Version is used to display the current version of Amass.
Version = "v3.12.3"
// Author is used to display the Amass Project Team.
Author = "OWASP Amass Project - @owaspamass"
// Description is the slogan for the Amass Project.
Description = "In-depth Attack Surface Mapping and Asset Discovery"
)
var (
// Colors used to ease the reading of program output
g = color.New(color.FgHiGreen)
b = color.New(color.FgHiBlue)
yellow = color.New(color.FgHiYellow).SprintFunc()
green = color.New(color.FgHiGreen).SprintFunc()
blue = color.New(color.FgHiBlue).SprintFunc()
)
// ASNSummaryData stores information related to discovered ASs and netblocks.
type ASNSummaryData struct {
Name string
Netblocks map[string]int
}
// UpdateSummaryData updates the summary maps using the provided requests.Output data.
func UpdateSummaryData(output *requests.Output, tags map[string]int, asns map[int]*ASNSummaryData) {
tags[output.Tag]++
for _, addr := range output.Addresses {
if addr.CIDRStr == "" {
continue
}
data, found := asns[addr.ASN]
if !found {
asns[addr.ASN] = &ASNSummaryData{
Name: addr.Description,
Netblocks: make(map[string]int),
}
data = asns[addr.ASN]
}
// Increment how many IPs were in this netblock
data.Netblocks[addr.CIDRStr]++
}
}
// PrintEnumerationSummary outputs the summary information utilized by the command-line tools.
func PrintEnumerationSummary(total int, tags map[string]int, asns map[int]*ASNSummaryData, demo bool) {
FprintEnumerationSummary(color.Error, total, tags, asns, demo)
}
// FprintEnumerationSummary outputs the summary information utilized by the command-line tools.
func FprintEnumerationSummary(out io.Writer, total int, tags map[string]int, asns map[int]*ASNSummaryData, demo bool) {
pad := func(num int, chr string) {
for i := 0; i < num; i++ {
b.Fprint(out, chr)
}
}
fmt.Fprintln(out)
// Print the header information
title := "OWASP Amass "
site := "https://github.com/OWASP/Amass"
b.Fprint(out, title+Version)
num := 80 - (len(title) + len(Version) + len(site))
pad(num, " ")
b.Fprintf(out, "%s\n", site)
pad(8, "----------")
fmt.Fprintf(out, "\n%s%s", yellow(strconv.Itoa(total)), green(" names discovered - "))
// Print the stats using tag information
num, length := 1, len(tags)
for k, v := range tags {
fmt.Fprintf(out, "%s: %s", green(k), yellow(strconv.Itoa(v)))
if num < length {
g.Fprint(out, ", ")
}
num++
}
fmt.Fprintln(out)
if len(asns) == 0 {
return
}
// Another line gets printed
pad(8, "----------")
fmt.Fprintln(out)
// Print the ASN and netblock information
for asn, data := range asns {
asnstr := strconv.Itoa(asn)
datastr := data.Name
if demo && asn > 0 {
asnstr = censorString(asnstr, 0, len(asnstr))
datastr = censorString(datastr, 0, len(datastr))
}
fmt.Fprintf(out, "%s%s %s %s\n", blue("ASN: "), yellow(asnstr), green("-"), green(datastr))
for cidr, ips := range data.Netblocks {
countstr := strconv.Itoa(ips)
cidrstr := cidr
if demo {
cidrstr = censorNetBlock(cidrstr)
}
countstr = fmt.Sprintf("\t%-4s", countstr)
cidrstr = fmt.Sprintf("\t%-18s", cidrstr)
fmt.Fprintf(out, "%s%s %s\n", yellow(cidrstr), yellow(countstr), blue("Subdomain Name(s)"))
}
}
}
// PrintBanner outputs the Amass banner the same for all tools.
func PrintBanner() {
FprintBanner(color.Error)
}
// FprintBanner outputs the Amass banner the same for all tools.
func FprintBanner(out io.Writer) {
y := color.New(color.FgHiYellow)
r := color.New(color.FgHiRed)
rightmost := 76
pad := func(num int) {
for i := 0; i < num; i++ {
fmt.Fprint(out, " ")
}
}
r.Fprintln(out, Banner)
pad(rightmost - len(Version))
y.Fprintln(out, Version)
pad(rightmost - len(Author))
y.Fprintln(out, Author)
pad(rightmost - len(Description))
y.Fprintf(out, "%s\n\n\n", Description)
}
func censorDomain(input string) string {
return censorString(input, strings.Index(input, "."), len(input))
}
func censorIP(input string) string {
return censorString(input, 0, strings.LastIndex(input, "."))
}
func censorNetBlock(input string) string {
return censorString(input, 0, strings.Index(input, "/"))
}
func censorString(input string, start, end int) string {
runes := []rune(input)
for i := start; i < end; i++ {
if runes[i] == '.' ||
runes[i] == '/' ||
runes[i] == '-' ||
runes[i] == ' ' {
continue
}
runes[i] = 'x'
}
return string(runes)
}
// OutputLineParts returns the parts of a line to be printed for a requests.Output.
func OutputLineParts(out *requests.Output, src, addrs, demo bool) (source, name, ips string) {
if src {
source = fmt.Sprintf("%-18s", "["+out.Sources[0]+"] ")
}
if addrs {
for i, a := range out.Addresses {
if i != 0 {
ips += ","
}
if demo {
ips += censorIP(a.Address.String())
} else {
ips += a.Address.String()
}
}
if ips == "" {
ips = "N/A"
}
}
name = out.Name
if demo {
name = censorDomain(name)
}
return
}
// DesiredAddrTypes removes undesired address types from the AddressInfo slice.
func DesiredAddrTypes(addrs []requests.AddressInfo, ipv4, ipv6 bool) []requests.AddressInfo {
if !ipv4 && !ipv6 {
return addrs
}
var keep []requests.AddressInfo
for _, addr := range addrs {
if amassnet.IsIPv4(addr.Address) && !ipv4 {
continue
} else if amassnet.IsIPv6(addr.Address) && !ipv6 {
continue
}
keep = append(keep, addr)
}
return keep
}
// InterfaceInfo returns network interface information specific to the current host.
func | () string {
var output string
if ifaces, err := net.Interfaces(); err == nil {
for _, i := range ifaces {
addrs, err := i.Addrs()
if err != nil {
continue
}
output += fmt.Sprintf("%s%s%s\n", blue(i.Name+": "), green("flags="), yellow("<"+strings.ToUpper(i.Flags.String()+">")))
if i.HardwareAddr.String() != "" {
output += fmt.Sprintf("\t%s%s\n", green("ether: "), yellow(i.HardwareAddr.String()))
}
for _, addr := range addrs {
inet := "inet"
if a, ok := addr.(*net.IPNet); ok && amassnet.IsIPv6(a.IP) {
inet += "6"
}
inet += ": "
output += fmt.Sprintf("\t%s%s\n", green(inet), yellow(addr.String()))
}
}
}
return output
}
| InterfaceInfo |
test_auditregistration_api.py | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.13.4
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import kubernetes.client
from kubernetes.client.rest import ApiException
from kubernetes.client.apis.auditregistration_api import AuditregistrationApi
class TestAuditregistrationApi(unittest.TestCase):
""" AuditregistrationApi unit test stubs """
def | (self):
self.api = kubernetes.client.apis.auditregistration_api.AuditregistrationApi()
def tearDown(self):
pass
def test_get_api_group(self):
"""
Test case for get_api_group
"""
pass
if __name__ == '__main__':
unittest.main()
| setUp |
mod.rs | //! Sorted String Table, which is stored in disk.
//!
//! # SSTable
//!
//!
//! ```text
//! +-------------------------+ (offset 0)
//! | Data Block 1 |<-+
//! +-------------------------+ |
//! | Data Block 2 |<-+-+
//! +-------------------------+ | |
//! | ... | | |
//! +-------------------------+ | |
//! | Data Block n |<-+ |
//! +-------------------------+ |
//! | Index Block |----+<-+
//! +-------------------------+ |
//! | Filter Block |<-+ |
//! +-------------------------+ | |
//! | Footer |--+----+
//! +-------------------------+
//! ```
//!
//! ## Data Block
//!
//! ```text
//! +-----------------------------------------------------------------+
//! | Key/Value Entry 1 | Key/Value Entry 2 | ... | Key/Value Entry n |
//! +-----------------------------------------------------------------+
//! ```
//!
//! ### Key/Value Entry
//!
//! ```text
//! +-----------------------------------------+
//! | key length | value length | key | value |
//! +-----------------------------------------+
//! \-----------/\-------------/\-----/\------/
//! u32 u32 var-len var-len
//! ```
//!
//! ## Index Block
//!
//! ```text
//! +-------------------------------+
//! | min_key length(u32) | min_key |
//! +---------------------------------------------------------+
//! | offset | length | index_offset | key1 length | max key1 | -> Data Block1
//! +---------------------------------------------------------+
//! | offset | length | index_offset | key2 length | max key2 | -> Data BLock2
//! +---------------------------------------------------------+
//! | ... |
//! +---------------------------------------------------------+
//! \-------/\-------/\------------/\-------------/\----------/
//! u32 u32 u32 u32 var-len
//! ```
//!
//! ## Filter Block
//!
//! ```text
//! +---------------------------------+
//! | FilterBlock length | bit vector |
//! +---------------------------------+
//! ```
//!
//! ## Footer
//!
//! Length of Footer is fixed (64bit).
//!
//! ```text
//! +--------------------------------------------------------------------------------------------+
//! | IndexBlock offset | IndexBlock length | filter length | kv_total | Magic Number 0xdb991122 |
//! +--------------------------------------------------------------------------------------------+
//! \------------------/\-------------------/\-------------/\----------/\------------------------/
//! u32 u32 u32 u32 u32
//! ```
//!
//! NOTE: All fixed-length integer are little-endian.
pub(super) mod data_block;
pub(super) mod filter_block;
pub(crate) mod footer;
pub(crate) mod index_block;
pub mod manager;
mod table_cache;
pub mod table_handle;
pub type TableID = u64;
pub const DATA_BLOCK_SIZE: usize = 4096;
pub const NUM_LEVEL0_TABLE_TO_COMPACT: usize = 4;
pub fn | (db_path: &str, level: u32, table_id: u128) -> String {
format!("{}/{}/{}", db_path, level, table_id)
}
| sstable_file |
UserService.go | package service
import (
"net/http"
"strconv"
"time"
"github.com/gin-gonic/gin"
"kuukaa.fun/danmu-v4/common"
"kuukaa.fun/danmu-v4/dto"
"kuukaa.fun/danmu-v4/model"
"kuukaa.fun/danmu-v4/response"
"kuukaa.fun/danmu-v4/util"
"kuukaa.fun/danmu-v4/vo"
"github.com/jinzhu/gorm"
"golang.org/x/crypto/bcrypt"
)
/*********************************************************
** 函数功能: 注册
** 日 期: 2021/11/8
**********************************************************/
func RegisterService(user dto.RegisterDto) response.ResponseStruct {
res := response.ResponseStruct{
HttpStatus: http.StatusOK,
Code: response.SuccessCode,
Data: nil,
Msg: response.OK,
}
//邮箱是否存在
DB := common.GetDB()
if isEmailExist(DB, user.Email) {
res.HttpStatus = http.StatusUnprocessableEntity
res.Code = response.CheckFailCode
res.Msg = response.EmailRegistered
return res
}
//创建用户
hashedPassword, err := bcrypt.GenerateFromPassword([]byte(user.Password), bcrypt.DefaultCost)
if err != nil {
res.HttpStatus = http.StatusInternalServerError
res.Code = response.ServerErrorCode
res.Msg = response.SystemError
//记录日志
util.Logfile(util.ErrorLog, " hashed password "+err.Error())
return res
}
newUser := model.User{
Name: user.Name,
Email: user.Email,
Password: string(hashedPassword),
}
DB.Create(&newUser)
return res
}
/*********************************************************
** 函数功能: 登录
** 日 期: 2021/11/8
**********************************************************/
func LoginService(login dto.LoginDto, userIP string) response.ResponseStruct {
res := response.ResponseStruct{
HttpStatus: http.StatusOK,
Code: response.SuccessCode,
Data: nil,
Msg: response.OK,
}
//判断邮箱是否存在
var user model.User
DB := common.GetDB()
DB.Where("email = ?", login.Email).First(&user)
if user.ID == 0 {
res.HttpStatus = http.StatusBadRequest
res.Code = response.FailCode
res.Msg = response.UserNotExist
return res
}
//判断密码
if err := bcrypt.CompareHashAndPassword([]byte(user.Password), []byte(login.Password)); err != nil {
res.HttpStatus = http.StatusUnprocessableEntity
res.Code = response.CheckFailCode
res.Msg = response.NameOrPasswordError
return res
}
//发放token
token, err := common.ReleaseToken(user)
if err != nil {
res.HttpStatus = http.StatusInternalServerError
res.Code = response.ServerErrorCode
res.Msg = response.SystemError
util.Logfile(util.ErrorLog, " token generate error "+err.Error())
return res
}
util.Logfile(util.InfoLog, " Token issued successfully uid "+strconv.Itoa(int(user.ID))+" | "+userIP)
//返回数据
res.Data = gin.H{"token": token, "user": vo.ToUserVo(user)}
return res
}
/*********************************************************
** 函数功能: 邮箱登录
** 日 期: 2021/11/8
**********************************************************/
func EmailLoginService(login dto.EmailLoginDto, userIP string) response.ResponseStruct {
res := response.ResponseStruct{
HttpStatus: http.StatusOK,
Code: response.SuccessCode,
Data: nil,
Msg: response.OK,
}
//判断邮箱是否存在
var user model.User
DB := common.GetDB()
DB.Where("email = ?", login.Email).First(&user)
if user.ID == 0 {
res.HttpStatus = http.StatusBadRequest
res.Code = response.FailCode
res.Msg = response.UserNotExist
return res
}
//发放token
token, err := common.ReleaseToken(user)
if err != nil {
res.HttpStatus = http.StatusInternalServerError
res.Code = response.ServerErrorCode
res.Msg = response.SystemError
util.Logfile(util.ErrorLog, " token generate error "+err.Error())
return res
}
util.Logfile(util.InfoLog, " Token issued successfully uid "+strconv.Itoa(int(user.ID))+" | "+userIP)
//返回数据
res.Data = gin.H{"token": token, "user": vo.ToUserVo(user)}
return res
}
/*********************************************************
** 函数功能: 修改用户信息
** 日 期: 2021/11/8
**********************************************************/
func UserModifyService(modify dto.ModifyUserDto, uid interface{}, tBirthday time.Time) response.ResponseStruct {
res := response.ResponseStruct{
HttpStatus: http.StatusOK,
Code: response.SuccessCode,
Data: nil,
Msg: response.OK,
}
DB := common.GetDB()
err := DB.Model(model.User{}).Where("id = ?", uid).Updates(
map[string]interface{}{"name": modify.Name, "gender": modify.Gender, "birthday": tBirthday, "sign": modify.Sign},
).Error
if err != nil {
res.HttpStatus = http.StatusBadRequest
res.Code = response.FailCode
res.Msg = response.ModifyFail
return res
}
return res
}
/*********************************************************
** 函数功能: 修改密码
** 日 期: 2021/11/10
**********************************************************/
func ModifyPasswordService(password string, user model.User) response.ResponseStruct {
res := response.ResponseStruct{
HttpStatus: http.StatusOK,
Code: response.SuccessCode,
Data: nil,
Msg: response.OK,
}
DB := common.GetDB()
//更新密码
err := DB.Model(&user).Update("password", password).Error
if err != nil {
res.HttpStatus = http.StatusBadRequest
res.Code = response.FailCode
res.Msg = response.ModifyFail
return res
}
return res
}
/*********************************************************
** 函数功能: 通过用户ID获取用户信息
** 日 期: 2021/11/10
**********************************************************/
func GetUserInfoByIDService(uid interface{}) response.ResponseStruct {
var user model.User
DB := common.GetDB()
DB.Select("id,name,sign,avatar,gender").Where("id = ?", uid).First(&user)
return response.ResponseStruct{
HttpStatus: http.StatusOK,
Code: response.SuccessCode,
Data: gin.H{"user": vo.ToUserVo(user)},
Msg: response.OK,
}
}
/*********************************************************
** 函数功能: 管理员获取用户列表
** 日 期: 2021年11月12日15:13:53
**********************************************************/
func GetUserListService(page int, pageSize int) response.ResponseStruct {
var users []vo.AdminUserVo
DB := common.GetDB()
//记录总数
var total int
DB.Model(&model.User{}).Count(&total)
DB = DB.Limit(pageSize).Offset((page - 1) * pageSize)
DB.Model(&model.User{}).Select("id,name,created_at,email,avatar,sign,gender").Scan(&users)
return response.ResponseStruct{
HttpStatus: http.StatusOK,
Code: response.SuccessCode,
Data: gin.H{"count": total, "users": users},
Msg: response.OK,
}
}
/*********************************************************
** 函数功能: 管理员修改用户信息
** 日 期: 2021年11月12日15:19:08
**********************************************************/
func AdminModifyUserService(newInfo dto.AdminModifyUserDto) response.ResponseStruct {
res := response.ResponseStruct{
HttpStatus: http.StatusOK,
Code: response.SuccessCode,
Data: nil,
Msg: response.OK,
}
var user model.User
DB := common.GetDB()
//新邮箱的uid不为当前uid
DB.Where("email = ?", newInfo.Email).First(&user)
if user.ID != 0 && user.ID != newInfo.ID {
res.HttpStatus = http.StatusUnprocessableEntity
res.Code = response.CheckFailCode
res.Msg = response.EmailRegistered
return res
}
DB.Model(&model.User{}).Where("id = ?", newInfo.ID).Updates(
map[string]interface{}{
"email": newInfo.Email,
"name": newInfo.Name,
"sign": newInfo.Sign,
},
)
return res
}
/*********************************************************
** 函数功能: 管理员搜索用户
** 日 期: 2022年3月24日19:35:28
**********************************************************/
func AdminSearchUserService(page int, pageSize int, keyword string) response.ResponseStruct {
var total int //记录总数
var users []vo.AdminUserVo
DB := common.GetDB()
DB = DB.Limit(pageSize).Offset((page - 1) * pageSize)
keyword = "%" + keyword + "%"
DB.Model(model.User{}).Where("name like ? or id like ? or email like ?", keyword, keyword, keyword).Scan(&users).Count(&total)
return response.ResponseStruct{
HttpStatus: http.StatusOK,
Code: response.SuccessCode,
Data: gin.H{"count": total, "users": users},
Msg: response.OK,
}
}
/*********************************************************
** 函数功能: 管理员删除用户
** 日 期: 2021年11月12日15:26:42
**********************************************************/
func AdminDeleteUserService(id uint) response.ResponseStruct {
DB := common.GetDB()
DB.Where("id = ?", id).Delete(model.User{})
return response.ResponseStruct{
HttpStatus: http.StatusOK,
Code: response.SuccessCode,
Data: nil,
Msg: response.OK,
}
}
/*********************************************************
** 函数功能: 邮箱是否被注册
** 日 期: 2021年11月12日11:03:55
**********************************************************/
func isEm | ******************************************/
func isEmailBelongsToCurrentUser(email string, uid interface{}) bool {
var user model.User
DB := common.GetDB()
DB.First(&user, uid)
if user.Email == email {
return true
}
return false
}
/*********************************************************
** 函数功能: 邮箱是否存在
** 日 期: 2021/7/10
**********************************************************/
func isEmailExist(db *gorm.DB, email string) bool {
var user model.User
db.Where("email = ?", email).First(&user)
if user.ID != 0 {
return true
}
return false
}
/*********************************************************
** 函数功能: 用户是否存在
** 日 期: 2021/7/10
**********************************************************/
func isUserExist(db *gorm.DB, id uint) bool {
var user model.User
db.First(&user, id)
if user.ID != 0 {
return true
}
return false
}
| ailRegistered(email string) bool {
DB := common.GetDB()
return isEmailExist(DB, email)
}
/*********************************************************
** 函数功能: 邮箱是否属于当前用户
** 日 期: 2021年11月12日11:10:23
**************** |
server.go | package server
import (
"context"
"net/http"
"sync"
"time"
"github.com/qdm12/ddns-updater/internal/data"
"github.com/qdm12/golibs/logging"
)
type Server interface {
Run(ctx context.Context, wg *sync.WaitGroup)
}
type server struct {
address string
logger logging.Logger
handler http.Handler
}
func | (address, rootURL, uiDir string, db data.Database, logger logging.Logger,
forceUpdate chan<- struct{}) Server {
handler := newHandler(rootURL, uiDir, db, logger, forceUpdate)
return &server{
address: address,
logger: logger,
handler: handler,
}
}
func (s *server) Run(ctx context.Context, wg *sync.WaitGroup) {
defer wg.Done()
server := http.Server{Addr: s.address, Handler: s.handler}
go func() {
<-ctx.Done()
s.logger.Warn("shutting down (context canceled)")
defer s.logger.Warn("shut down")
const shutdownGraceDuration = 2 * time.Second
shutdownCtx, cancel := context.WithTimeout(context.Background(), shutdownGraceDuration)
defer cancel()
if err := server.Shutdown(shutdownCtx); err != nil {
s.logger.Error("failed shutting down: %s", err)
}
}()
for ctx.Err() == nil {
s.logger.Info("listening on %s", s.address)
err := server.ListenAndServe()
if err != nil && ctx.Err() == nil { // server crashed
s.logger.Error(err)
s.logger.Info("restarting")
}
}
}
| New |
remote_log.go | package logging
import (
"compress/gzip"
"io"
"os/exec"
"time"
)
type RemoteLog struct {
Host string
User string
Pattern string
Tail bool
Time time.Time
Compress bool
CustomLogRoot string
FromBegin bool // to be used with tail
}
const (
DEFAULT_LOG_ROOT = "/var/log/hourly"
HOURLY_PATTERN = "2006/01/02/2006-01-02T15.log"
)
func NewRemoteLogFromTime(host string, t time.Time, pattern string) *RemoteLog {
return &RemoteLog{
Time: t,
Host: host,
Pattern: pattern,
}
}
func (rl *RemoteLog) LogRoot() string {
if rl.CustomLogRoot != "" {
return rl.CustomLogRoot
}
return DEFAULT_LOG_ROOT
}
func (rl *RemoteLog) Current() string {
return rl.LogRoot() + "/current"
}
func (rl *RemoteLog) Path() string {
if !rl.Time.IsZero() {
return rl.Time.UTC().Format(rl.LogRoot() + "/" + HOURLY_PATTERN)
}
return rl.Current()
}
func (rl *RemoteLog) GzipPath() string {
return rl.Path() + ".gz"
}
func (rl *RemoteLog) Command() string {
cmd := rl.CatCmd()
if rl.Pattern != "" {
cmd += " | " + rl.GrepCmd()
}
if rl.Compress {
cmd += " | gzip"
}
return cmd
}
func (rl *RemoteLog) GrepCmd() string {
return "grep " + rl.Pattern
}
func (rl *RemoteLog) CatCmd() string {
if rl.Tail {
n := "0"
if rl.FromBegin {
n = "+0"
}
return "tail -n " + n + " -F " + rl.Current()
} | }
func (rl *RemoteLog) Open() (reader io.ReadCloser, e error) {
c := rl.Command()
var cmd *exec.Cmd
user := rl.User
if user == "" {
user = "root"
}
if rl.Host != "" {
cmd = exec.Command("ssh", "-t", "-l", user, rl.Host, c)
} else {
cmd = exec.Command("bash", "-c", c)
}
dbg.Printf("using cmd %q", cmd.Path)
reader, e = cmd.StdoutPipe()
if e != nil {
return nil, e
}
dbg.Printf("starting command %q", c)
e = cmd.Start()
if e != nil {
return nil, e
}
dbg.Print("command started")
if rl.Compress {
reader, e = gzip.NewReader(reader)
if e != nil {
return nil, e
}
}
dbg.Print("returning reader")
return reader, nil
} | return "{ test -e " + rl.Path() + " && cat " + rl.Path() + "; test -e " + rl.GzipPath() + " && cat " + rl.GzipPath() + " | gunzip; }" |
PageImage.js | import React from 'react';
import './PageImage.css';
// Semantic
import { Grid, Header, Icon, Image, Segment } from 'semantic-ui-react'; |
// Animation
import Fade from 'react-reveal/Fade';
import Rotate from 'react-reveal/Rotate';
const square = { width: 450, height: 450 };
class PageImage extends React.Component {
render() {
return(
<Grid columns={2} centered>
<Fade>
<Grid.Column className="image-column">
<Rotate top left>
<div className="image-div">
<div className="front-face">
<Image size="large" circular src="../../Images/ProfileSquare.jpg" alt="That's Me!" className="profile-pic" />
</div>
<div className="back-face">
<Segment circular style={square}>
<Header as='h2' className="rainbow">
A Splash of Color.
</Header>
<Header.Subheader as='h3'>
<br />
That's me, on the other side of this circle, at Yellowstone National Park in 2017.
I take a lot of inspiration from natural spaces and incorporate their shapes, colors and sounds into digital landscapes.
It's my goal to start with this mechanical, aesthetically pleasing design and transform it into a living, breathing space full of life as I update and alter it over time.
<br />
<br />
<Icon name="sync alternate" circular inverted className="flip-icon" />
</Header.Subheader>
</Segment>
</div>
</div>
</Rotate>
</Grid.Column>
<Grid.Column className="text-column">
<Rotate top right>
<Segment circular style={square} className="intro-segment">
<Header as='h2'>
CARSON CURRY <br />
<span className='dev-intro'>
developer
</span>
<span className='write-intro'>
writer
</span>
</Header>
<Header.Subheader as='h3' className='intro-text'>
<span className='dev-intro'>
I am a Kansas City-based full-stack web developer always looking for new projects to test my abilities.
Think of this page as an ever-evolving entity that's updated as I gather new tricks and skills as a developer.
Feel free to take a look at my past projects and send me a message if you think I'm the type of designer best suited to working on your shiny new website.
<br />
<br />
<Icon name="sync alternate" circular inverted className="flip-icon" />
</span>
<span className='write-intro'>
In addition to web development I am a writer who loves spilling digital ink on whatever weird subject pops into my head.
I love expressing myself as a writer on writing web content, but also in prose, essays and scripts as well.
Take a look at my blog, <a href="https://thiswebsiteisfine.com" target="_blank" rel="noopener noreferrer" className="web-link">thiswebsiteisfine</a> for the latest articles I've written and share my stuff if you think it's something people would enjoy!
<br />
<br />
<Icon name="sync alternate" circular inverted className="flip-icon" />
</span>
</Header.Subheader>
</Segment>
</Rotate>
</Grid.Column>
</Fade>
</Grid>
)
}
}
export default PageImage; | |
demo.js | b=document.getElementById("testDrive"),c=document.getElementById("testText");e.addEventListener("change",d,!1);c.addEventListener("input",a,!1);c.addEventListener("change",a,!1);d()})()}); | // All material copyright ESRI, All Rights Reserved, unless otherwise specified.
// See http://js.arcgis.com/3.17/esri/copyright.txt for details.
//>>built
define("esri/themes/calcite/icons/demo-files/demo",["dojo","dijit","dojox"],function(f,g,h){"boxShadow"in document.body.style||document.body.setAttribute("class","noBoxShadow");document.body.addEventListener("click",function(a){a=a.target;"INPUT"===a.tagName&&-1===a.getAttribute("class").indexOf("liga")&&a.select()});(function(){function a(){b.innerHTML=c.value||String.fromCharCode(160);window.icomoonLiga&&window.icomoonLiga(b)}function d(){b.style.fontSize=e.value+"px"}var e=document.getElementById("fontSize"), |
|
update.py | # Copyright (C) 2019 The Raphielscape Company LLC.
#
# Licensed under the Raphielscape Public License, Version 1.c (the "License");
# you may not use this file except in compliance with the License.
# Credits goes to @AvinashReddy3108 for creating this plugin
# edited to work on Uniborg by @Mayur_Karaniya
#
"""
This module updates the userbot based on Upstream revision
cmd is .update
Usage: Checks if the main userbot repository has any updates and shows a changelog if so.
.
cmd .update now
Usage: Updates your userbot, if there are any updates in the main userbot repository.
.
Credits goes to @AvinashReddy3108 for creating this plugin
edited to work on Uniborg by @Mayur_Karaniya
this is a Hugh fix thanks to @SpEcHiDe and @devpatel_73
"""
from os import environ, execle, path, remove
import asyncio
import sys
from git import Repo
from git.exc import GitCommandError, InvalidGitRepositoryError, NoSuchPathError
from os import remove
import sys
from git import Repo
from git.exc import GitCommandError
from git.exc import InvalidGitRepositoryError
from git.exc import NoSuchPathError
import asyncio
from uniborg.util import admin_cmd
import sys
import asyncio
from sample_config import Config
#
# ===============================Basic Constants=============================
# UPSTREAM_REPO_URL is as same as below.
# "https://github.com/prono69/PepeBot.git"
UPSTREAM_REPO_URL = Config.UPSTREAM_REPO_URL
# REPO_LINK is as same as below. "https://github.com/prono69/PepeBot.git"
REPO_LINK = Config.REPO_LINK
# provide your HEROKU_API_KEY in place of this value.
HEROKU_API_KEY = Config.HEROKU_API_KEY
# provide your HEROKU_APP_NAME in place of this value.
HEROKU_APP_NAME = Config.HEROKU_APP_NAME
# heroku memes
HEROKU_MEMEZ = Config.HEROKU_MEMEZ
# getting you git repo name is also needed
GIT_REPO_NAME = Config.GIT_REPO_NAME
# ===============================Basic Constants=============================
requirements_path = path.join(
path.dirname(path.dirname(path.dirname(__file__))), 'requirements.txt')
async def gen_chlog(repo, diff):
ch_log = ''
d_form = "%d/%m/%y"
for c in repo.iter_commits(diff):
ch_log += f'•[{c.committed_datetime.strftime(d_form)}]: {c.summary} by <{c.author}>\n'
return ch_log
async def updateme_requirements():
reqs = str(requirements_path)
try:
process = await asyncio.create_subprocess_shell(
' '.join([sys.executable, "-m", "pip", "install", "-r", reqs]),
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE)
await process.communicate()
return process.returncode
except Exception as e:
return repr(e)
@borg.on(admin_cmd(pattern="update ?(.*)", outgoing=True, allow_sudo=True))
async def up | ps):
"For .update command, check if the bot is up to date, update if specified"
await ups.edit("`Checking for updates, please wait....`")
conf = ups.pattern_match.group(1)
off_repo = UPSTREAM_REPO_URL
force_updateme = False
try:
txt = "`Oops.. Updater cannot continue due to "
txt += "some problems occured`\n\n**LOGTRACE:**\n"
repo = Repo()
except NoSuchPathError as error:
await ups.edit(f'{txt}\n`directory {error} is not found`')
repo.__del__()
return
except GitCommandError as error:
await ups.edit(f'{txt}\n`Early failure! {error}`')
repo.__del__()
return
except InvalidGitRepositoryError as error:
if conf != "now":
await ups.edit(
f"**Unfortunately, the directory {error} does not seem to be a git repository.\
\nOr Maybe it just needs a sync verification with {GIT_REPO_NAME}\
\nBut we can fix that by force updating the userbot using** `.update now.`"
)
return
repo = Repo.init()
origin = repo.create_remote('upstream', off_repo)
origin.fetch()
force_updateme = True
repo.create_head('master', origin.refs.master)
repo.heads.master.set_tracking_branch(origin.refs.master)
repo.heads.master.checkout(True)
ac_br = repo.active_branch.name
if ac_br != 'master':
await ups.edit(
f'**[UPDATER]:**` Looks like you are using your own custom branch ({ac_br}). '
'in that case, Updater is unable to identify '
'which branch is to be merged. '
'please checkout to any official branch`')
repo.__del__()
return
try:
repo.create_remote('upstream', off_repo)
except BaseException:
pass
ups_rem = repo.remote('upstream')
ups_rem.fetch(ac_br)
changelog = await gen_chlog(repo, f'HEAD..upstream/{ac_br}')
if not changelog and not force_updateme:
await ups.edit(
f'\n`Your BOT is` **up-to-date** `with` **{ac_br}**\n')
repo.__del__()
return
if conf != "now" and not force_updateme:
changelog_str = f'**New UPDATE available for [{ac_br}]:\n\nCHANGELOG:**\n`{changelog}`'
if len(changelog_str) > 4096:
await ups.edit("`Changelog is too big, view the file to see it.`")
file = open("output.txt", "w+")
file.write(changelog_str)
file.close()
await ups.client.send_file(
ups.chat_id,
"output.txt",
reply_to=ups.id,
)
remove("output.txt")
else:
await ups.edit(changelog_str)
await ups.respond('do \"`.update now`\" to update')
return
if force_updateme:
await ups.edit(
'`Force-Syncing to latest stable userbot code, please wait...`')
else:
await ups.edit('`Updating userbot, please wait....`')
# We're in a Heroku Dyno, handle it's memez.
if HEROKU_API_KEY is not None:
import heroku3
heroku = heroku3.from_key(HEROKU_API_KEY)
heroku_app = None
heroku_applications = heroku.apps()
if not HEROKU_APP_NAME:
await ups.edit(
'`[HEROKU MEMEZ] Please set up the HEROKU_APP_NAME variable to be able to update userbot.`'
)
repo.__del__()
return
for app in heroku_applications:
if app.name == HEROKU_APP_NAME:
heroku_app = app
break
if heroku_app is None:
await ups.edit(
f'{txt}\n`Invalid Heroku credentials for updating userbot dyno.`'
)
repo.__del__()
return
await ups.edit('`[HEROKU MEMEZ]\
\nUserbot dyno build in progress, please wait for it to complete.`'
)
ups_rem.fetch(ac_br)
repo.git.reset("--hard", "FETCH_HEAD")
heroku_git_url = heroku_app.git_url.replace(
"https://", "https://api:" + HEROKU_API_KEY + "@")
if "heroku" in repo.remotes:
remote = repo.remote("heroku")
remote.set_url(heroku_git_url)
else:
remote = repo.create_remote("heroku", heroku_git_url)
try:
remote.push(refspec="HEAD:refs/heads/master", force=True)
except GitCommandError as error:
await ups.edit(f'{txt}\n`Here is the error log:\n{error}`')
repo.__del__()
return
await ups.edit('`Successfully Updated!\n'
'Restarting, please wait...`')
else:
# Classic Updater, pretty straightforward.
try:
ups_rem.pull(ac_br)
except GitCommandError:
repo.git.reset("--hard", "FETCH_HEAD")
await updateme_requirements()
await ups.edit('`Successfully Updated!\n'
'Bot is restarting... Wait for a second!`')
# Spin a new instance of bot
args = [sys.executable, "-m", "stdborg"]
execle(sys.executable, *args, environ)
return
| stream(u |
profiles.rs | // Copyright 2021 Conveen
//! Network profiles.
/// Wi-Fi profile.
pub mod wifi;
pub use wifi::WiFiProfileDirective; | ||
android_commands.py | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Provides an interface to communicate with the device via the adb command.
Assumes adb binary is currently on system path.
Note that this module is deprecated.
"""
# TODO(jbudorick): Delete this file once no clients use it.
# pylint: skip-file
import collections
import datetime
import inspect
import logging
import os
import random
import re
import shlex
import signal
import subprocess
import sys
import tempfile
import time
import cmd_helper
import constants
import system_properties
from utils import host_utils
try:
from pylib import pexpect
except ImportError:
pexpect = None
sys.path.append(os.path.join(
constants.DIR_SOURCE_ROOT, 'third_party', 'android_testrunner'))
import adb_interface
import am_instrument_parser
import errors
from pylib.device import device_blacklist
from pylib.device import device_errors
# Pattern to search for the next whole line of pexpect output and capture it
# into a match group. We can't use ^ and $ for line start end with pexpect,
# see http://www.noah.org/python/pexpect/#doc for explanation why.
PEXPECT_LINE_RE = re.compile('\n([^\r]*)\r')
# Set the adb shell prompt to be a unique marker that will [hopefully] not
# appear at the start of any line of a command's output.
SHELL_PROMPT = '~+~PQ\x17RS~+~'
# Java properties file
LOCAL_PROPERTIES_PATH = constants.DEVICE_LOCAL_PROPERTIES_PATH
# Property in /data/local.prop that controls Java assertions.
JAVA_ASSERT_PROPERTY = 'dalvik.vm.enableassertions'
# Keycode "enum" suitable for passing to AndroidCommands.SendKey().
KEYCODE_HOME = 3
KEYCODE_BACK = 4
KEYCODE_DPAD_UP = 19
KEYCODE_DPAD_DOWN = 20
KEYCODE_DPAD_RIGHT = 22
KEYCODE_ENTER = 66
KEYCODE_MENU = 82
MD5SUM_DEVICE_FOLDER = constants.TEST_EXECUTABLE_DIR + '/md5sum/'
MD5SUM_DEVICE_PATH = MD5SUM_DEVICE_FOLDER + 'md5sum_bin'
PIE_WRAPPER_PATH = constants.TEST_EXECUTABLE_DIR + '/run_pie'
CONTROL_USB_CHARGING_COMMANDS = [
{
# Nexus 4
'witness_file': '/sys/module/pm8921_charger/parameters/disabled',
'enable_command': 'echo 0 > /sys/module/pm8921_charger/parameters/disabled',
'disable_command':
'echo 1 > /sys/module/pm8921_charger/parameters/disabled',
},
{
# Nexus 5
# Setting the HIZ bit of the bq24192 causes the charger to actually ignore
# energy coming from USB. Setting the power_supply offline just updates the
# Android system to reflect that.
'witness_file': '/sys/kernel/debug/bq24192/INPUT_SRC_CONT',
'enable_command': (
'echo 0x4A > /sys/kernel/debug/bq24192/INPUT_SRC_CONT && '
'echo 1 > /sys/class/power_supply/usb/online'),
'disable_command': (
'echo 0xCA > /sys/kernel/debug/bq24192/INPUT_SRC_CONT && '
'chmod 644 /sys/class/power_supply/usb/online && '
'echo 0 > /sys/class/power_supply/usb/online'),
},
]
class DeviceTempFile(object):
def __init__(self, android_commands, prefix='temp_file', suffix=''):
"""Find an unused temporary file path in the devices external directory.
When this object is closed, the file will be deleted on the device.
"""
self.android_commands = android_commands
while True:
# TODO(cjhopman): This could actually return the same file in multiple
# calls if the caller doesn't write to the files immediately. This is
# expected to never happen.
i = random.randint(0, 1000000)
self.name = '%s/%s-%d-%010d%s' % (
android_commands.GetExternalStorage(),
prefix, int(time.time()), i, suffix)
if not android_commands.FileExistsOnDevice(self.name):
break
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def close(self):
self.android_commands.RunShellCommand('rm ' + self.name)
def GetAVDs():
"""Returns a list of AVDs."""
re_avd = re.compile('^[ ]+Name: ([a-zA-Z0-9_:.-]+)', re.MULTILINE)
avds = re_avd.findall(cmd_helper.GetCmdOutput(['android', 'list', 'avd']))
return avds
def | ():
"""Removes the blacklist that keeps track of bad devices for a current
build.
"""
device_blacklist.ResetBlacklist()
def ExtendBadDevices(devices):
"""Adds devices to the blacklist that keeps track of bad devices for a
current build.
The devices listed in the bad devices file will not be returned by
GetAttachedDevices.
Args:
devices: list of bad devices to be added to the bad devices file.
"""
device_blacklist.ExtendBlacklist(devices)
def GetAttachedDevices(hardware=True, emulator=True, offline=False):
"""Returns a list of attached, android devices and emulators.
If a preferred device has been set with ANDROID_SERIAL, it will be first in
the returned list. The arguments specify what devices to include in the list.
Example output:
* daemon not running. starting it now on port 5037 *
* daemon started successfully *
List of devices attached
027c10494100b4d7 device
emulator-5554 offline
Args:
hardware: Include attached actual devices that are online.
emulator: Include emulators (i.e. AVD's) currently on host.
offline: Include devices and emulators that are offline.
Returns: List of devices.
"""
adb_devices_output = cmd_helper.GetCmdOutput([constants.GetAdbPath(),
'devices'])
re_device = re.compile('^([a-zA-Z0-9_:.-]+)\tdevice$', re.MULTILINE)
online_devices = re_device.findall(adb_devices_output)
re_device = re.compile('^(emulator-[0-9]+)\tdevice', re.MULTILINE)
emulator_devices = re_device.findall(adb_devices_output)
re_device = re.compile('^([a-zA-Z0-9_:.-]+)\t(?:offline|unauthorized)$',
re.MULTILINE)
offline_devices = re_device.findall(adb_devices_output)
devices = []
# First determine list of online devices (e.g. hardware and/or emulator).
if hardware and emulator:
devices = online_devices
elif hardware:
devices = [device for device in online_devices
if device not in emulator_devices]
elif emulator:
devices = emulator_devices
# Now add offline devices if offline is true
if offline:
devices = devices + offline_devices
# Remove any devices in the blacklist.
blacklist = device_blacklist.ReadBlacklist()
if len(blacklist):
logging.info('Avoiding bad devices %s', ' '.join(blacklist))
devices = [device for device in devices if device not in blacklist]
preferred_device = os.environ.get('ANDROID_SERIAL')
if preferred_device in devices:
devices.remove(preferred_device)
devices.insert(0, preferred_device)
return devices
def IsDeviceAttached(device):
"""Return true if the device is attached and online."""
return device in GetAttachedDevices()
def _GetFilesFromRecursiveLsOutput(path, ls_output, re_file, utc_offset=None):
"""Gets a list of files from `ls` command output.
Python's os.walk isn't used because it doesn't work over adb shell.
Args:
path: The path to list.
ls_output: A list of lines returned by an `ls -lR` command.
re_file: A compiled regular expression which parses a line into named groups
consisting of at minimum "filename", "date", "time", "size" and
optionally "timezone".
utc_offset: A 5-character string of the form +HHMM or -HHMM, where HH is a
2-digit string giving the number of UTC offset hours, and MM is a
2-digit string giving the number of UTC offset minutes. If the input
utc_offset is None, will try to look for the value of "timezone" if it
is specified in re_file.
Returns:
A dict of {"name": (size, lastmod), ...} where:
name: The file name relative to |path|'s directory.
size: The file size in bytes (0 for directories).
lastmod: The file last modification date in UTC.
"""
re_directory = re.compile('^%s/(?P<dir>[^:]+):$' % re.escape(path))
path_dir = os.path.dirname(path)
current_dir = ''
files = {}
for line in ls_output:
directory_match = re_directory.match(line)
if directory_match:
current_dir = directory_match.group('dir')
continue
file_match = re_file.match(line)
if file_match:
filename = os.path.join(current_dir, file_match.group('filename'))
if filename.startswith(path_dir):
filename = filename[len(path_dir) + 1:]
lastmod = datetime.datetime.strptime(
file_match.group('date') + ' ' + file_match.group('time')[:5],
'%Y-%m-%d %H:%M')
if not utc_offset and 'timezone' in re_file.groupindex:
utc_offset = file_match.group('timezone')
if isinstance(utc_offset, str) and len(utc_offset) == 5:
utc_delta = datetime.timedelta(hours=int(utc_offset[1:3]),
minutes=int(utc_offset[3:5]))
if utc_offset[0:1] == '-':
utc_delta = -utc_delta
lastmod -= utc_delta
files[filename] = (int(file_match.group('size')), lastmod)
return files
def _ParseMd5SumOutput(md5sum_output):
"""Returns a list of tuples from the provided md5sum output.
Args:
md5sum_output: output directly from md5sum binary.
Returns:
List of namedtuples with attributes |hash| and |path|, where |path| is the
absolute path to the file with an Md5Sum of |hash|.
"""
HashAndPath = collections.namedtuple('HashAndPath', ['hash', 'path'])
split_lines = [line.split(' ') for line in md5sum_output]
return [HashAndPath._make(s) for s in split_lines if len(s) == 2]
def _HasAdbPushSucceeded(command_output):
"""Returns whether adb push has succeeded from the provided output."""
# TODO(frankf): We should look at the return code instead of the command
# output for many of the commands in this file.
if not command_output:
return True
# Success looks like this: "3035 KB/s (12512056 bytes in 4.025s)"
# Errors look like this: "failed to copy ... "
if not re.search('^[0-9]', command_output.splitlines()[-1]):
logging.critical('PUSH FAILED: ' + command_output)
return False
return True
def GetLogTimestamp(log_line, year):
"""Returns the timestamp of the given |log_line| in the given year."""
try:
return datetime.datetime.strptime('%s-%s' % (year, log_line[:18]),
'%Y-%m-%d %H:%M:%S.%f')
except (ValueError, IndexError):
logging.critical('Error reading timestamp from ' + log_line)
return None
class AndroidCommands(object):
"""Helper class for communicating with Android device via adb."""
def __init__(self, device=None):
"""Constructor.
Args:
device: If given, adb commands are only send to the device of this ID.
Otherwise commands are sent to all attached devices.
"""
self._adb = adb_interface.AdbInterface(constants.GetAdbPath())
if device:
self._adb.SetTargetSerial(device)
self._device = device
self._logcat = None
self.logcat_process = None
self._logcat_tmpoutfile = None
self._pushed_files = []
self._device_utc_offset = None
self._potential_push_size = 0
self._actual_push_size = 0
self._external_storage = ''
self._util_wrapper = ''
self._system_properties = system_properties.SystemProperties(self.Adb())
self._push_if_needed_cache = {}
self._control_usb_charging_command = {
'command': None,
'cached': False,
}
self._protected_file_access_method_initialized = None
self._privileged_command_runner = None
self._pie_wrapper = None
@property
def system_properties(self):
return self._system_properties
def _LogShell(self, cmd):
"""Logs the adb shell command."""
if self._device:
device_repr = self._device[-4:]
else:
device_repr = '????'
logging.info('[%s]> %s', device_repr, cmd)
def Adb(self):
"""Returns our AdbInterface to avoid us wrapping all its methods."""
# TODO(tonyg): Goal should be to git rid of this method by making this API
# complete and alleviating the need.
return self._adb
def GetDevice(self):
"""Returns the device serial."""
return self._device
def IsOnline(self):
"""Checks whether the device is online.
Returns:
True if device is in 'device' mode, False otherwise.
"""
# TODO(aurimas): revert to using adb get-state when android L adb is fixed.
#out = self._adb.SendCommand('get-state')
#return out.strip() == 'device'
out = self._adb.SendCommand('devices')
for line in out.split('\n'):
if self._device in line and 'device' in line:
return True
return False
def IsRootEnabled(self):
"""Checks if root is enabled on the device."""
root_test_output = self.RunShellCommand('ls /root') or ['']
return not 'Permission denied' in root_test_output[0]
def EnableAdbRoot(self):
"""Enables adb root on the device.
Returns:
True: if output from executing adb root was as expected.
False: otherwise.
"""
if self.GetBuildType() == 'user':
logging.warning("Can't enable root in production builds with type user")
return False
else:
return_value = self._adb.EnableAdbRoot()
# EnableAdbRoot inserts a call for wait-for-device only when adb logcat
# output matches what is expected. Just to be safe add a call to
# wait-for-device.
self._adb.SendCommand('wait-for-device')
return return_value
def GetDeviceYear(self):
"""Returns the year information of the date on device."""
return self.RunShellCommand('date +%Y')[0]
def GetExternalStorage(self):
if not self._external_storage:
self._external_storage = self.RunShellCommand('echo $EXTERNAL_STORAGE')[0]
if not self._external_storage:
raise device_errors.CommandFailedError(
['shell', "'echo $EXTERNAL_STORAGE'"],
'Unable to find $EXTERNAL_STORAGE')
return self._external_storage
def WaitForDevicePm(self, timeout=120):
"""Blocks until the device's package manager is available.
To workaround http://b/5201039, we restart the shell and retry if the
package manager isn't back after 120 seconds.
Raises:
errors.WaitForResponseTimedOutError after max retries reached.
"""
last_err = None
retries = 3
while retries:
try:
self._adb.WaitForDevicePm(wait_time=timeout)
return # Success
except errors.WaitForResponseTimedOutError as e:
last_err = e
logging.warning('Restarting and retrying after timeout: %s', e)
retries -= 1
self.RestartShell()
raise last_err # Only reached after max retries, re-raise the last error.
def RestartShell(self):
"""Restarts the shell on the device. Does not block for it to return."""
self.RunShellCommand('stop')
self.RunShellCommand('start')
def Reboot(self, full_reboot=True):
"""Reboots the device and waits for the package manager to return.
Args:
full_reboot: Whether to fully reboot the device or just restart the shell.
"""
# TODO(torne): hive can't reboot the device either way without breaking the
# connection; work out if we can handle this better
if os.environ.get('USING_HIVE'):
logging.warning('Ignoring reboot request as we are on hive')
return
if full_reboot or not self.IsRootEnabled():
self._adb.SendCommand('reboot')
self._system_properties = system_properties.SystemProperties(self.Adb())
timeout = 300
retries = 1
# Wait for the device to disappear.
while retries < 10 and self.IsOnline():
time.sleep(1)
retries += 1
else:
self.RestartShell()
timeout = 120
# To run tests we need at least the package manager and the sd card (or
# other external storage) to be ready.
self.WaitForDevicePm(timeout)
self.WaitForSdCardReady(timeout)
def Shutdown(self):
"""Shuts down the device."""
self._adb.SendCommand('reboot -p')
self._system_properties = system_properties.SystemProperties(self.Adb())
def Uninstall(self, package):
"""Uninstalls the specified package from the device.
Args:
package: Name of the package to remove.
Returns:
A status string returned by adb uninstall
"""
uninstall_command = 'uninstall %s' % package
self._LogShell(uninstall_command)
return self._adb.SendCommand(uninstall_command, timeout_time=60)
def Install(self, package_file_path, reinstall=False):
"""Installs the specified package to the device.
Args:
package_file_path: Path to .apk file to install.
reinstall: Reinstall an existing apk, keeping the data.
Returns:
A status string returned by adb install
"""
assert os.path.isfile(package_file_path), ('<%s> is not file' %
package_file_path)
install_cmd = ['install']
if reinstall:
install_cmd.append('-r')
install_cmd.append(package_file_path)
install_cmd = ' '.join(install_cmd)
self._LogShell(install_cmd)
return self._adb.SendCommand(install_cmd,
timeout_time=2 * 60,
retry_count=0)
def ManagedInstall(self, apk_path, keep_data=False, package_name=None,
reboots_on_timeout=2):
"""Installs specified package and reboots device on timeouts.
If package_name is supplied, checks if the package is already installed and
doesn't reinstall if the apk md5sums match.
Args:
apk_path: Path to .apk file to install.
keep_data: Reinstalls instead of uninstalling first, preserving the
application data.
package_name: Package name (only needed if keep_data=False).
reboots_on_timeout: number of time to reboot if package manager is frozen.
"""
# Check if package is already installed and up to date.
if package_name:
installed_apk_path = self.GetApplicationPath(package_name)
if (installed_apk_path and
not self.GetFilesChanged(apk_path, installed_apk_path,
ignore_filenames=True)):
logging.info('Skipped install: identical %s APK already installed' %
package_name)
return
# Install.
reboots_left = reboots_on_timeout
while True:
try:
if not keep_data:
assert package_name
self.Uninstall(package_name)
install_status = self.Install(apk_path, reinstall=keep_data)
if 'Success' in install_status:
return
else:
raise Exception('Install failure: %s' % install_status)
except errors.WaitForResponseTimedOutError:
print '@@@STEP_WARNINGS@@@'
logging.info('Timeout on installing %s on device %s', apk_path,
self._device)
if reboots_left <= 0:
raise Exception('Install timed out')
# Force a hard reboot on last attempt
self.Reboot(full_reboot=(reboots_left == 1))
reboots_left -= 1
def MakeSystemFolderWritable(self):
"""Remounts the /system folder rw."""
out = self._adb.SendCommand('remount')
if out.strip() != 'remount succeeded':
raise errors.MsgException('Remount failed: %s' % out)
def RestartAdbdOnDevice(self):
logging.info('Restarting adbd on the device...')
with DeviceTempFile(self, suffix=".sh") as temp_script_file:
host_script_path = os.path.join(constants.DIR_SOURCE_ROOT,
'build',
'android',
'pylib',
'restart_adbd.sh')
self._adb.Push(host_script_path, temp_script_file.name)
self.RunShellCommand('. %s' % temp_script_file.name)
self._adb.SendCommand('wait-for-device')
def RestartAdbServer(self):
"""Restart the adb server."""
ret = self.KillAdbServer()
if ret != 0:
raise errors.MsgException('KillAdbServer: %d' % ret)
ret = self.StartAdbServer()
if ret != 0:
raise errors.MsgException('StartAdbServer: %d' % ret)
@staticmethod
def KillAdbServer():
"""Kill adb server."""
adb_cmd = [constants.GetAdbPath(), 'kill-server']
ret = cmd_helper.RunCmd(adb_cmd)
retry = 0
while retry < 3:
ret, _ = cmd_helper.GetCmdStatusAndOutput(['pgrep', 'adb'])
if ret != 0:
# pgrep didn't find adb, kill-server succeeded.
return 0
retry += 1
time.sleep(retry)
return ret
def StartAdbServer(self):
"""Start adb server."""
adb_cmd = ['taskset', '-c', '0', constants.GetAdbPath(), 'start-server']
ret, _ = cmd_helper.GetCmdStatusAndOutput(adb_cmd)
retry = 0
while retry < 3:
ret, _ = cmd_helper.GetCmdStatusAndOutput(['pgrep', 'adb'])
if ret == 0:
# pgrep found adb, start-server succeeded.
# Waiting for device to reconnect before returning success.
self._adb.SendCommand('wait-for-device')
return 0
retry += 1
time.sleep(retry)
return ret
def WaitForSystemBootCompleted(self, wait_time):
"""Waits for targeted system's boot_completed flag to be set.
Args:
wait_time: time in seconds to wait
Raises:
WaitForResponseTimedOutError if wait_time elapses and flag still not
set.
"""
logging.info('Waiting for system boot completed...')
self._adb.SendCommand('wait-for-device')
# Now the device is there, but system not boot completed.
# Query the sys.boot_completed flag with a basic command
boot_completed = False
attempts = 0
wait_period = 5
while not boot_completed and (attempts * wait_period) < wait_time:
output = self.system_properties['sys.boot_completed']
output = output.strip()
if output == '1':
boot_completed = True
else:
# If 'error: xxx' returned when querying the flag, it means
# adb server lost the connection to the emulator, so restart the adb
# server.
if 'error:' in output:
self.RestartAdbServer()
time.sleep(wait_period)
attempts += 1
if not boot_completed:
raise errors.WaitForResponseTimedOutError(
'sys.boot_completed flag was not set after %s seconds' % wait_time)
def WaitForSdCardReady(self, timeout_time):
"""Wait for the SD card ready before pushing data into it."""
logging.info('Waiting for SD card ready...')
sdcard_ready = False
attempts = 0
wait_period = 5
external_storage = self.GetExternalStorage()
while not sdcard_ready and attempts * wait_period < timeout_time:
output = self.RunShellCommand('ls ' + external_storage)
if output:
sdcard_ready = True
else:
time.sleep(wait_period)
attempts += 1
if not sdcard_ready:
raise errors.WaitForResponseTimedOutError(
'SD card not ready after %s seconds' % timeout_time)
def GetAndroidToolStatusAndOutput(self, command, lib_path=None, *args, **kw):
"""Runs a native Android binary, wrapping the command as necessary.
This is a specialization of GetShellCommandStatusAndOutput, which is meant
for running tools/android/ binaries and handle properly: (1) setting the
lib path (for component=shared_library), (2) using the PIE wrapper on ICS.
See crbug.com/373219 for more context.
Args:
command: String containing the command to send.
lib_path: (optional) path to the folder containing the dependent libs.
Same other arguments of GetCmdStatusAndOutput.
"""
# The first time this command is run the device is inspected to check
# whether a wrapper for running PIE executable is needed (only Android ICS)
# or not. The results is cached, so the wrapper is pushed only once.
if self._pie_wrapper is None:
# None: did not check; '': did check and not needed; '/path': use /path.
self._pie_wrapper = ''
if self.GetBuildId().startswith('I'): # Ixxxx = Android ICS.
run_pie_dist_path = os.path.join(constants.GetOutDirectory(), 'run_pie')
assert os.path.exists(run_pie_dist_path), 'Please build run_pie'
# The PIE loader must be pushed manually (i.e. no PushIfNeeded) because
# PushIfNeeded requires md5sum and md5sum requires the wrapper as well.
adb_command = 'push %s %s' % (run_pie_dist_path, PIE_WRAPPER_PATH)
assert _HasAdbPushSucceeded(self._adb.SendCommand(adb_command))
self._pie_wrapper = PIE_WRAPPER_PATH
if self._pie_wrapper:
command = '%s %s' % (self._pie_wrapper, command)
if lib_path:
command = 'LD_LIBRARY_PATH=%s %s' % (lib_path, command)
return self.GetShellCommandStatusAndOutput(command, *args, **kw)
# It is tempting to turn this function into a generator, however this is not
# possible without using a private (local) adb_shell instance (to ensure no
# other command interleaves usage of it), which would defeat the main aim of
# being able to reuse the adb shell instance across commands.
def RunShellCommand(self, command, timeout_time=20, log_result=False):
"""Send a command to the adb shell and return the result.
Args:
command: String containing the shell command to send.
timeout_time: Number of seconds to wait for command to respond before
retrying, used by AdbInterface.SendShellCommand.
log_result: Boolean to indicate whether we should log the result of the
shell command.
Returns:
list containing the lines of output received from running the command
"""
self._LogShell(command)
if "'" in command:
command = command.replace('\'', '\'\\\'\'')
result = self._adb.SendShellCommand(
"'%s'" % command, timeout_time).splitlines()
# TODO(b.kelemen): we should really be able to drop the stderr of the
# command or raise an exception based on what the caller wants.
result = [ l for l in result if not l.startswith('WARNING') ]
if ['error: device not found'] == result:
raise errors.DeviceUnresponsiveError('device not found')
if log_result:
self._LogShell('\n'.join(result))
return result
def GetShellCommandStatusAndOutput(self, command, timeout_time=20,
log_result=False):
"""See RunShellCommand() above.
Returns:
The tuple (exit code, list of output lines).
"""
lines = self.RunShellCommand(
command + '; echo %$?', timeout_time, log_result)
last_line = lines[-1]
status_pos = last_line.rfind('%')
assert status_pos >= 0
status = int(last_line[status_pos + 1:])
if status_pos == 0:
lines = lines[:-1]
else:
lines = lines[:-1] + [last_line[:status_pos]]
return (status, lines)
def KillAll(self, process, signum=9, with_su=False):
"""Android version of killall, connected via adb.
Args:
process: name of the process to kill off.
signum: signal to use, 9 (SIGKILL) by default.
with_su: wether or not to use su to kill the processes.
Returns:
the number of processes killed
"""
pids = self.ExtractPid(process)
if pids:
cmd = 'kill -%d %s' % (signum, ' '.join(pids))
if with_su:
self.RunShellCommandWithSU(cmd)
else:
self.RunShellCommand(cmd)
return len(pids)
def KillAllBlocking(self, process, timeout_sec, signum=9, with_su=False):
"""Blocking version of killall, connected via adb.
This waits until no process matching the corresponding name appears in ps'
output anymore.
Args:
process: name of the process to kill off
timeout_sec: the timeout in seconds
signum: same as |KillAll|
with_su: same as |KillAll|
Returns:
the number of processes killed
"""
processes_killed = self.KillAll(process, signum=signum, with_su=with_su)
if processes_killed:
elapsed = 0
wait_period = 0.1
# Note that this doesn't take into account the time spent in ExtractPid().
while self.ExtractPid(process) and elapsed < timeout_sec:
time.sleep(wait_period)
elapsed += wait_period
if elapsed >= timeout_sec:
return processes_killed - self.ExtractPid(process)
return processes_killed
@staticmethod
def _GetActivityCommand(package, activity, wait_for_completion, action,
category, data, extras, trace_file_name, force_stop,
flags):
"""Creates command to start |package|'s activity on the device.
Args - as for StartActivity
Returns:
the command to run on the target to start the activity
"""
cmd = 'am start -a %s' % action
if force_stop:
cmd += ' -S'
if wait_for_completion:
cmd += ' -W'
if category:
cmd += ' -c %s' % category
if package and activity:
cmd += ' -n %s/%s' % (package, activity)
if data:
cmd += ' -d "%s"' % data
if extras:
for key in extras:
value = extras[key]
if isinstance(value, str):
cmd += ' --es'
elif isinstance(value, bool):
cmd += ' --ez'
elif isinstance(value, int):
cmd += ' --ei'
else:
raise NotImplementedError(
'Need to teach StartActivity how to pass %s extras' % type(value))
cmd += ' %s %s' % (key, value)
if trace_file_name:
cmd += ' --start-profiler ' + trace_file_name
if flags:
cmd += ' -f %s' % flags
return cmd
def StartActivity(self, package, activity, wait_for_completion=False,
action='android.intent.action.VIEW',
category=None, data=None,
extras=None, trace_file_name=None,
force_stop=False, flags=None):
"""Starts |package|'s activity on the device.
Args:
package: Name of package to start (e.g. 'com.google.android.apps.chrome').
activity: Name of activity (e.g. '.Main' or
'com.google.android.apps.chrome.Main').
wait_for_completion: wait for the activity to finish launching (-W flag).
action: string (e.g. "android.intent.action.MAIN"). Default is VIEW.
category: string (e.g. "android.intent.category.HOME")
data: Data string to pass to activity (e.g. 'http://www.example.com/').
extras: Dict of extras to pass to activity. Values are significant.
trace_file_name: If used, turns on and saves the trace to this file name.
force_stop: force stop the target app before starting the activity (-S
flag).
Returns:
The output of the underlying command as a list of lines.
"""
cmd = self._GetActivityCommand(package, activity, wait_for_completion,
action, category, data, extras,
trace_file_name, force_stop, flags)
return self.RunShellCommand(cmd)
def StartActivityTimed(self, package, activity, wait_for_completion=False,
action='android.intent.action.VIEW',
category=None, data=None,
extras=None, trace_file_name=None,
force_stop=False, flags=None):
"""Starts |package|'s activity on the device, returning the start time
Args - as for StartActivity
Returns:
A tuple containing:
- the output of the underlying command as a list of lines, and
- a timestamp string for the time at which the activity started
"""
cmd = self._GetActivityCommand(package, activity, wait_for_completion,
action, category, data, extras,
trace_file_name, force_stop, flags)
self.StartMonitoringLogcat()
out = self.RunShellCommand('log starting activity; ' + cmd)
activity_started_re = re.compile('.*starting activity.*')
m = self.WaitForLogMatch(activity_started_re, None)
assert m
start_line = m.group(0)
return (out, GetLogTimestamp(start_line, self.GetDeviceYear()))
def StartCrashUploadService(self, package):
# TODO(frankf): We really need a python wrapper around Intent
# to be shared with StartActivity/BroadcastIntent.
cmd = (
'am startservice -a %s.crash.ACTION_FIND_ALL -n '
'%s/%s.crash.MinidumpUploadService' %
(constants.PACKAGE_INFO['chrome'].package,
package,
constants.PACKAGE_INFO['chrome'].package))
am_output = self.RunShellCommandWithSU(cmd)
assert am_output and 'Starting' in am_output[-1], (
'Service failed to start: %s' % am_output)
time.sleep(15)
def BroadcastIntent(self, package, intent, *args):
"""Send a broadcast intent.
Args:
package: Name of package containing the intent.
intent: Name of the intent.
args: Optional extra arguments for the intent.
"""
cmd = 'am broadcast -a %s.%s %s' % (package, intent, ' '.join(args))
self.RunShellCommand(cmd)
def GoHome(self):
"""Tell the device to return to the home screen. Blocks until completion."""
self.RunShellCommand('am start -W '
'-a android.intent.action.MAIN -c android.intent.category.HOME')
def CloseApplication(self, package):
"""Attempt to close down the application, using increasing violence.
Args:
package: Name of the process to kill off, e.g.
com.google.android.apps.chrome
"""
self.RunShellCommand('am force-stop ' + package)
def GetApplicationPath(self, package):
"""Get the installed apk path on the device for the given package.
Args:
package: Name of the package.
Returns:
Path to the apk on the device if it exists, None otherwise.
"""
pm_path_output = self.RunShellCommand('pm path ' + package)
# The path output contains anything if and only if the package
# exists.
if pm_path_output:
# pm_path_output is of the form: "package:/path/to/foo.apk"
return pm_path_output[0].split(':')[1]
else:
return None
def ClearApplicationState(self, package):
"""Closes and clears all state for the given |package|."""
# Check that the package exists before clearing it. Necessary because
# calling pm clear on a package that doesn't exist may never return.
pm_path_output = self.RunShellCommand('pm path ' + package)
# The path output only contains anything if and only if the package exists.
if pm_path_output:
self.RunShellCommand('pm clear ' + package)
def SendKeyEvent(self, keycode):
"""Sends keycode to the device.
Args:
keycode: Numeric keycode to send (see "enum" at top of file).
"""
self.RunShellCommand('input keyevent %d' % keycode)
def _RunMd5Sum(self, host_path, device_path):
"""Gets the md5sum of a host path and device path.
Args:
host_path: Path (file or directory) on the host.
device_path: Path on the device.
Returns:
A tuple containing lists of the host and device md5sum results as
created by _ParseMd5SumOutput().
"""
md5sum_dist_path = os.path.join(constants.GetOutDirectory(),
'md5sum_dist')
assert os.path.exists(md5sum_dist_path), 'Please build md5sum.'
md5sum_dist_mtime = os.stat(md5sum_dist_path).st_mtime
if (md5sum_dist_path not in self._push_if_needed_cache or
self._push_if_needed_cache[md5sum_dist_path] != md5sum_dist_mtime):
command = 'push %s %s' % (md5sum_dist_path, MD5SUM_DEVICE_FOLDER)
assert _HasAdbPushSucceeded(self._adb.SendCommand(command))
self._push_if_needed_cache[md5sum_dist_path] = md5sum_dist_mtime
(_, md5_device_output) = self.GetAndroidToolStatusAndOutput(
self._util_wrapper + ' ' + MD5SUM_DEVICE_PATH + ' ' + device_path,
lib_path=MD5SUM_DEVICE_FOLDER,
timeout_time=2 * 60)
device_hash_tuples = _ParseMd5SumOutput(md5_device_output)
assert os.path.exists(host_path), 'Local path not found %s' % host_path
md5sum_output = cmd_helper.GetCmdOutput(
[os.path.join(constants.GetOutDirectory(), 'md5sum_bin_host'),
host_path])
host_hash_tuples = _ParseMd5SumOutput(md5sum_output.splitlines())
return (host_hash_tuples, device_hash_tuples)
def GetFilesChanged(self, host_path, device_path, ignore_filenames=False):
"""Compares the md5sum of a host path against a device path.
Note: Ignores extra files on the device.
Args:
host_path: Path (file or directory) on the host.
device_path: Path on the device.
ignore_filenames: If True only the file contents are considered when
checking whether a file has changed, otherwise the relative path
must also match.
Returns:
A list of tuples of the form (host_path, device_path) for files whose
md5sums do not match.
"""
# Md5Sum resolves symbolic links in path names so the calculation of
# relative path names from its output will need the real path names of the
# base directories. Having calculated these they are used throughout the
# function since this makes us less subject to any future changes to Md5Sum.
real_host_path = os.path.realpath(host_path)
real_device_path = self.RunShellCommand('realpath "%s"' % device_path)[0]
host_hash_tuples, device_hash_tuples = self._RunMd5Sum(
real_host_path, real_device_path)
if len(host_hash_tuples) > len(device_hash_tuples):
logging.info('%s files do not exist on the device' %
(len(host_hash_tuples) - len(device_hash_tuples)))
host_rel = [(os.path.relpath(os.path.normpath(t.path), real_host_path),
t.hash)
for t in host_hash_tuples]
if os.path.isdir(real_host_path):
def RelToRealPaths(rel_path):
return (os.path.join(real_host_path, rel_path),
os.path.join(real_device_path, rel_path))
else:
assert len(host_rel) == 1
def RelToRealPaths(_):
return (real_host_path, real_device_path)
if ignore_filenames:
# If we are ignoring file names, then we want to push any file for which
# a file with an equivalent MD5 sum does not exist on the device.
device_hashes = set([h.hash for h in device_hash_tuples])
ShouldPush = lambda p, h: h not in device_hashes
else:
# Otherwise, we want to push any file on the host for which a file with
# an equivalent MD5 sum does not exist at the same relative path on the
# device.
device_rel = dict([(os.path.relpath(os.path.normpath(t.path),
real_device_path),
t.hash)
for t in device_hash_tuples])
ShouldPush = lambda p, h: p not in device_rel or h != device_rel[p]
return [RelToRealPaths(path) for path, host_hash in host_rel
if ShouldPush(path, host_hash)]
def PushIfNeeded(self, host_path, device_path):
"""Pushes |host_path| to |device_path|.
Works for files and directories. This method skips copying any paths in
|test_data_paths| that already exist on the device with the same hash.
All pushed files can be removed by calling RemovePushedFiles().
"""
MAX_INDIVIDUAL_PUSHES = 50
if not os.path.exists(host_path):
raise device_errors.CommandFailedError(
'Local path not found %s' % host_path, device=str(self))
# See if the file on the host changed since the last push (if any) and
# return early if it didn't. Note that this shortcut assumes that the tests
# on the device don't modify the files.
if not os.path.isdir(host_path):
if host_path in self._push_if_needed_cache:
host_path_mtime = self._push_if_needed_cache[host_path]
if host_path_mtime == os.stat(host_path).st_mtime:
return
size = host_utils.GetRecursiveDiskUsage(host_path)
self._pushed_files.append(device_path)
self._potential_push_size += size
if os.path.isdir(host_path):
self.RunShellCommand('mkdir -p "%s"' % device_path)
changed_files = self.GetFilesChanged(host_path, device_path)
logging.info('Found %d files that need to be pushed to %s',
len(changed_files), device_path)
if not changed_files:
return
def Push(host, device):
# NOTE: We can't use adb_interface.Push() because it hardcodes a timeout
# of 60 seconds which isn't sufficient for a lot of users of this method.
push_command = 'push %s %s' % (host, device)
self._LogShell(push_command)
# Retry push with increasing backoff if the device is busy.
retry = 0
while True:
output = self._adb.SendCommand(push_command, timeout_time=30 * 60)
if _HasAdbPushSucceeded(output):
if not os.path.isdir(host_path):
self._push_if_needed_cache[host] = os.stat(host).st_mtime
return
if retry < 3:
retry += 1
wait_time = 5 * retry
logging.error('Push failed, retrying in %d seconds: %s' %
(wait_time, output))
time.sleep(wait_time)
else:
raise Exception('Push failed: %s' % output)
diff_size = 0
if len(changed_files) <= MAX_INDIVIDUAL_PUSHES:
diff_size = sum(host_utils.GetRecursiveDiskUsage(f[0])
for f in changed_files)
# TODO(craigdh): Replace this educated guess with a heuristic that
# approximates the push time for each method.
if len(changed_files) > MAX_INDIVIDUAL_PUSHES or diff_size > 0.5 * size:
self._actual_push_size += size
Push(host_path, device_path)
else:
for f in changed_files:
Push(f[0], f[1])
self._actual_push_size += diff_size
def GetPushSizeInfo(self):
"""Get total size of pushes to the device done via PushIfNeeded()
Returns:
A tuple:
1. Total size of push requests to PushIfNeeded (MB)
2. Total size that was actually pushed (MB)
"""
return (self._potential_push_size, self._actual_push_size)
def GetFileContents(self, filename, log_result=False):
"""Gets contents from the file specified by |filename|."""
return self.RunShellCommand('cat "%s" 2>/dev/null' % filename,
log_result=log_result)
def SetFileContents(self, filename, contents):
"""Writes |contents| to the file specified by |filename|."""
with tempfile.NamedTemporaryFile() as f:
f.write(contents)
f.flush()
self._adb.Push(f.name, filename)
def RunShellCommandWithSU(self, command, timeout_time=20, log_result=False):
return self.RunShellCommand('su -c %s' % command, timeout_time, log_result)
def CanAccessProtectedFileContents(self):
"""Returns True if Get/SetProtectedFileContents would work via "su" or adb
shell running as root.
Devices running user builds don't have adb root, but may provide "su" which
can be used for accessing protected files.
"""
return (self._GetProtectedFileCommandRunner() != None)
def _GetProtectedFileCommandRunner(self):
"""Finds the best method to access protected files on the device.
Returns:
1. None when privileged files cannot be accessed on the device.
2. Otherwise: A function taking a single parameter: a string with command
line arguments. Running that function executes the command with
the appropriate method.
"""
if self._protected_file_access_method_initialized:
return self._privileged_command_runner
self._privileged_command_runner = None
self._protected_file_access_method_initialized = True
for cmd in [self.RunShellCommand, self.RunShellCommandWithSU]:
# Get contents of the auxv vector for the init(8) process from a small
# binary file that always exists on linux and is always read-protected.
contents = cmd('cat /proc/1/auxv')
# The leading 4 or 8-bytes of auxv vector is a_type. There are not many
# reserved a_type values, hence byte 2 must always be '\0' for a realistic
# auxv. See /usr/include/elf.h.
if len(contents) > 0 and (contents[0][2] == '\0'):
self._privileged_command_runner = cmd
break
return self._privileged_command_runner
def GetProtectedFileContents(self, filename):
"""Gets contents from the protected file specified by |filename|.
This is potentially less efficient than GetFileContents.
"""
command = 'cat "%s" 2> /dev/null' % filename
command_runner = self._GetProtectedFileCommandRunner()
if command_runner:
return command_runner(command)
else:
logging.warning('Could not access protected file: %s' % filename)
return []
def SetProtectedFileContents(self, filename, contents):
"""Writes |contents| to the protected file specified by |filename|.
This is less efficient than SetFileContents.
"""
with DeviceTempFile(self) as temp_file:
with DeviceTempFile(self, suffix=".sh") as temp_script:
# Put the contents in a temporary file
self.SetFileContents(temp_file.name, contents)
# Create a script to copy the file contents to its final destination
self.SetFileContents(temp_script.name,
'cat %s > %s' % (temp_file.name, filename))
command = 'sh %s' % temp_script.name
command_runner = self._GetProtectedFileCommandRunner()
if command_runner:
return command_runner(command)
else:
logging.warning(
'Could not set contents of protected file: %s' % filename)
def RemovePushedFiles(self):
"""Removes all files pushed with PushIfNeeded() from the device."""
for p in self._pushed_files:
self.RunShellCommand('rm -r %s' % p, timeout_time=2 * 60)
def ListPathContents(self, path):
"""Lists files in all subdirectories of |path|.
Args:
path: The path to list.
Returns:
A dict of {"name": (size, lastmod), ...}.
"""
# Example output:
# /foo/bar:
# -rw-r----- user group 102 2011-05-12 12:29:54.131623387 +0100 baz.txt
re_file = re.compile('^-(?P<perms>[^\s]+)\s+'
'(?P<user>[^\s]+)\s+'
'(?P<group>[^\s]+)\s+'
'(?P<size>[^\s]+)\s+'
'(?P<date>[^\s]+)\s+'
'(?P<time>[^\s]+)\s+'
'(?P<filename>[^\s]+)$')
return _GetFilesFromRecursiveLsOutput(
path, self.RunShellCommand('ls -lR %s' % path), re_file,
self.GetUtcOffset())
def GetUtcOffset(self):
if not self._device_utc_offset:
self._device_utc_offset = self.RunShellCommand('date +%z')[0]
return self._device_utc_offset
def SetJavaAssertsEnabled(self, enable):
"""Sets or removes the device java assertions property.
Args:
enable: If True the property will be set.
Returns:
True if the file was modified (reboot is required for it to take effect).
"""
# First ensure the desired property is persisted.
temp_props_file = tempfile.NamedTemporaryFile()
properties = ''
if self._adb.Pull(LOCAL_PROPERTIES_PATH, temp_props_file.name):
with open(temp_props_file.name) as f:
properties = f.read()
re_search = re.compile(r'^\s*' + re.escape(JAVA_ASSERT_PROPERTY) +
r'\s*=\s*all\s*$', re.MULTILINE)
if enable != bool(re.search(re_search, properties)):
re_replace = re.compile(r'^\s*' + re.escape(JAVA_ASSERT_PROPERTY) +
r'\s*=\s*\w+\s*$', re.MULTILINE)
properties = re.sub(re_replace, '', properties)
if enable:
properties += '\n%s=all\n' % JAVA_ASSERT_PROPERTY
file(temp_props_file.name, 'w').write(properties)
self._adb.Push(temp_props_file.name, LOCAL_PROPERTIES_PATH)
# Next, check the current runtime value is what we need, and
# if not, set it and report that a reboot is required.
was_set = 'all' in self.system_properties[JAVA_ASSERT_PROPERTY]
if was_set == enable:
return False
self.system_properties[JAVA_ASSERT_PROPERTY] = enable and 'all' or ''
return True
def GetBuildId(self):
"""Returns the build ID of the system (e.g. JRM79C)."""
build_id = self.system_properties['ro.build.id']
assert build_id
return build_id
def GetBuildType(self):
"""Returns the build type of the system (e.g. eng)."""
build_type = self.system_properties['ro.build.type']
assert build_type
return build_type
def GetBuildProduct(self):
"""Returns the build product of the device (e.g. maguro)."""
build_product = self.system_properties['ro.build.product']
assert build_product
return build_product
def GetProductName(self):
"""Returns the product name of the device (e.g. takju)."""
name = self.system_properties['ro.product.name']
assert name
return name
def GetBuildFingerprint(self):
"""Returns the build fingerprint of the device."""
build_fingerprint = self.system_properties['ro.build.fingerprint']
assert build_fingerprint
return build_fingerprint
def GetDescription(self):
"""Returns the description of the system.
For example, "yakju-userdebug 4.1 JRN54F 364167 dev-keys".
"""
description = self.system_properties['ro.build.description']
assert description
return description
def GetProductModel(self):
"""Returns the name of the product model (e.g. "Galaxy Nexus") """
model = self.system_properties['ro.product.model']
assert model
return model
def GetWifiIP(self):
"""Returns the wifi IP on the device."""
wifi_ip = self.system_properties['dhcp.wlan0.ipaddress']
# Do not assert here. Devices (e.g. emulators) may not have a WifiIP.
return wifi_ip
def GetSubscriberInfo(self):
"""Returns the device subscriber info (e.g. GSM and device ID) as string."""
iphone_sub = self.RunShellCommand('dumpsys iphonesubinfo')
# Do not assert here. Devices (e.g. Nakasi on K) may not have iphonesubinfo.
return '\n'.join(iphone_sub)
def GetBatteryInfo(self):
"""Returns a {str: str} dict of battery info (e.g. status, level, etc)."""
battery = self.RunShellCommand('dumpsys battery')
assert battery
battery_info = {}
for line in battery[1:]:
k, _, v = line.partition(': ')
battery_info[k.strip()] = v.strip()
return battery_info
def GetSetupWizardStatus(self):
"""Returns the status of the device setup wizard (e.g. DISABLED)."""
status = self.system_properties['ro.setupwizard.mode']
# On some devices, the status is empty if not otherwise set. In such cases
# the caller should expect an empty string to be returned.
return status
def StartMonitoringLogcat(self, clear=True, logfile=None, filters=None):
"""Starts monitoring the output of logcat, for use with WaitForLogMatch.
Args:
clear: If True the existing logcat output will be cleared, to avoiding
matching historical output lurking in the log.
filters: A list of logcat filters to be used.
"""
if clear:
self.RunShellCommand('logcat -c')
args = []
if self._adb._target_arg:
args += shlex.split(self._adb._target_arg)
args += ['logcat', '-v', 'threadtime']
if filters:
args.extend(filters)
else:
args.append('*:v')
if logfile:
logfile = NewLineNormalizer(logfile)
# Spawn logcat and synchronize with it.
for _ in range(4):
self._logcat = pexpect.spawn(constants.GetAdbPath(), args, timeout=10,
logfile=logfile)
if not clear or self.SyncLogCat():
break
self._logcat.close(force=True)
else:
logging.critical('Error reading from logcat: ' + str(self._logcat.match))
sys.exit(1)
def SyncLogCat(self):
"""Synchronize with logcat.
Synchronize with the monitored logcat so that WaitForLogMatch will only
consider new message that are received after this point in time.
Returns:
True if the synchronization succeeded.
"""
assert self._logcat
tag = 'logcat_sync_%s' % time.time()
self.RunShellCommand('log ' + tag)
return self._logcat.expect([tag, pexpect.EOF, pexpect.TIMEOUT]) == 0
def GetMonitoredLogCat(self):
"""Returns an "adb logcat" command as created by pexpected.spawn."""
if not self._logcat:
self.StartMonitoringLogcat(clear=False)
return self._logcat
def WaitForLogMatch(self, success_re, error_re, clear=False, timeout=10):
"""Blocks until a matching line is logged or a timeout occurs.
Args:
success_re: A compiled re to search each line for.
error_re: A compiled re which, if found, terminates the search for
|success_re|. If None is given, no error condition will be detected.
clear: If True the existing logcat output will be cleared, defaults to
false.
timeout: Timeout in seconds to wait for a log match.
Raises:
pexpect.TIMEOUT after |timeout| seconds without a match for |success_re|
or |error_re|.
Returns:
The re match object if |success_re| is matched first or None if |error_re|
is matched first.
"""
logging.info('<<< Waiting for logcat:' + str(success_re.pattern))
t0 = time.time()
while True:
if not self._logcat:
self.StartMonitoringLogcat(clear)
try:
while True:
# Note this will block for upto the timeout _per log line_, so we need
# to calculate the overall timeout remaining since t0.
time_remaining = t0 + timeout - time.time()
if time_remaining < 0:
raise pexpect.TIMEOUT(self._logcat)
self._logcat.expect(PEXPECT_LINE_RE, timeout=time_remaining)
line = self._logcat.match.group(1)
if error_re:
error_match = error_re.search(line)
if error_match:
return None
success_match = success_re.search(line)
if success_match:
return success_match
logging.info('<<< Skipped Logcat Line:' + str(line))
except pexpect.TIMEOUT:
raise pexpect.TIMEOUT(
'Timeout (%ds) exceeded waiting for pattern "%s" (tip: use -vv '
'to debug)' %
(timeout, success_re.pattern))
except pexpect.EOF:
# It seems that sometimes logcat can end unexpectedly. This seems
# to happen during Chrome startup after a reboot followed by a cache
# clean. I don't understand why this happens, but this code deals with
# getting EOF in logcat.
logging.critical('Found EOF in adb logcat. Restarting...')
# Rerun spawn with original arguments. Note that self._logcat.args[0] is
# the path of adb, so we don't want it in the arguments.
self._logcat = pexpect.spawn(constants.GetAdbPath(),
self._logcat.args[1:],
timeout=self._logcat.timeout,
logfile=self._logcat.logfile)
def StartRecordingLogcat(self, clear=True, filters=None):
"""Starts recording logcat output to eventually be saved as a string.
This call should come before some series of tests are run, with either
StopRecordingLogcat or SearchLogcatRecord following the tests.
Args:
clear: True if existing log output should be cleared.
filters: A list of logcat filters to be used.
"""
if not filters:
filters = ['*:v']
if clear:
self._adb.SendCommand('logcat -c')
logcat_command = 'adb %s logcat -v threadtime %s' % (self._adb._target_arg,
' '.join(filters))
self._logcat_tmpoutfile = tempfile.NamedTemporaryFile(bufsize=0)
self.logcat_process = subprocess.Popen(logcat_command, shell=True,
stdout=self._logcat_tmpoutfile)
def GetCurrentRecordedLogcat(self):
"""Return the current content of the logcat being recorded.
Call this after StartRecordingLogcat() and before StopRecordingLogcat().
This can be useful to perform timed polling/parsing.
Returns:
Current logcat output as a single string, or None if
StopRecordingLogcat() was already called.
"""
if not self._logcat_tmpoutfile:
return None
with open(self._logcat_tmpoutfile.name) as f:
return f.read()
def StopRecordingLogcat(self):
"""Stops an existing logcat recording subprocess and returns output.
Returns:
The logcat output as a string or an empty string if logcat was not
being recorded at the time.
"""
if not self.logcat_process:
return ''
# Cannot evaluate directly as 0 is a possible value.
# Better to read the self.logcat_process.stdout before killing it,
# Otherwise the communicate may return incomplete output due to pipe break.
if self.logcat_process.poll() is None:
self.logcat_process.kill()
self.logcat_process.wait()
self.logcat_process = None
self._logcat_tmpoutfile.seek(0)
output = self._logcat_tmpoutfile.read()
self._logcat_tmpoutfile.close()
self._logcat_tmpoutfile = None
return output
@staticmethod
def SearchLogcatRecord(record, message, thread_id=None, proc_id=None,
log_level=None, component=None):
"""Searches the specified logcat output and returns results.
This method searches through the logcat output specified by record for a
certain message, narrowing results by matching them against any other
specified criteria. It returns all matching lines as described below.
Args:
record: A string generated by Start/StopRecordingLogcat to search.
message: An output string to search for.
thread_id: The thread id that is the origin of the message.
proc_id: The process that is the origin of the message.
log_level: The log level of the message.
component: The name of the component that would create the message.
Returns:
A list of dictionaries represeting matching entries, each containing keys
thread_id, proc_id, log_level, component, and message.
"""
if thread_id:
thread_id = str(thread_id)
if proc_id:
proc_id = str(proc_id)
results = []
reg = re.compile('(\d+)\s+(\d+)\s+([A-Z])\s+([A-Za-z]+)\s*:(.*)$',
re.MULTILINE)
log_list = reg.findall(record)
for (tid, pid, log_lev, comp, msg) in log_list:
if ((not thread_id or thread_id == tid) and
(not proc_id or proc_id == pid) and
(not log_level or log_level == log_lev) and
(not component or component == comp) and msg.find(message) > -1):
match = dict({'thread_id': tid, 'proc_id': pid,
'log_level': log_lev, 'component': comp,
'message': msg})
results.append(match)
return results
def ExtractPid(self, process_name):
"""Extracts Process Ids for a given process name from Android Shell.
Args:
process_name: name of the process on the device.
Returns:
List of all the process ids (as strings) that match the given name.
If the name of a process exactly matches the given name, the pid of
that process will be inserted to the front of the pid list.
"""
pids = []
for line in self.RunShellCommand('ps', log_result=False):
data = line.split()
try:
if process_name in data[-1]: # name is in the last column
if process_name == data[-1]:
pids.insert(0, data[1]) # PID is in the second column
else:
pids.append(data[1])
except IndexError:
pass
return pids
def GetIoStats(self):
"""Gets cumulative disk IO stats since boot (for all processes).
Returns:
Dict of {num_reads, num_writes, read_ms, write_ms} or None if there
was an error.
"""
IoStats = collections.namedtuple(
'IoStats',
['device',
'num_reads_issued',
'num_reads_merged',
'num_sectors_read',
'ms_spent_reading',
'num_writes_completed',
'num_writes_merged',
'num_sectors_written',
'ms_spent_writing',
'num_ios_in_progress',
'ms_spent_doing_io',
'ms_spent_doing_io_weighted',
])
for line in self.GetFileContents('/proc/diskstats', log_result=False):
fields = line.split()
stats = IoStats._make([fields[2]] + [int(f) for f in fields[3:]])
if stats.device == 'mmcblk0':
return {
'num_reads': stats.num_reads_issued,
'num_writes': stats.num_writes_completed,
'read_ms': stats.ms_spent_reading,
'write_ms': stats.ms_spent_writing,
}
logging.warning('Could not find disk IO stats.')
return None
def GetMemoryUsageForPid(self, pid):
"""Returns the memory usage for given pid.
Args:
pid: The pid number of the specific process running on device.
Returns:
Dict of {metric:usage_kb}, for the process which has specified pid.
The metric keys which may be included are: Size, Rss, Pss, Shared_Clean,
Shared_Dirty, Private_Clean, Private_Dirty, VmHWM.
"""
showmap = self.RunShellCommand('showmap %d' % pid)
if not showmap or not showmap[-1].endswith('TOTAL'):
logging.warning('Invalid output for showmap %s', str(showmap))
return {}
items = showmap[-1].split()
if len(items) != 9:
logging.warning('Invalid TOTAL for showmap %s', str(items))
return {}
usage_dict = collections.defaultdict(int)
usage_dict.update({
'Size': int(items[0].strip()),
'Rss': int(items[1].strip()),
'Pss': int(items[2].strip()),
'Shared_Clean': int(items[3].strip()),
'Shared_Dirty': int(items[4].strip()),
'Private_Clean': int(items[5].strip()),
'Private_Dirty': int(items[6].strip()),
})
peak_value_kb = 0
for line in self.GetProtectedFileContents('/proc/%s/status' % pid):
if not line.startswith('VmHWM:'): # Format: 'VmHWM: +[0-9]+ kB'
continue
peak_value_kb = int(line.split(':')[1].strip().split(' ')[0])
break
usage_dict['VmHWM'] = peak_value_kb
if not peak_value_kb:
logging.warning('Could not find memory peak value for pid ' + str(pid))
return usage_dict
def ProcessesUsingDevicePort(self, device_port):
"""Lists processes using the specified device port on loopback interface.
Args:
device_port: Port on device we want to check.
Returns:
A list of (pid, process_name) tuples using the specified port.
"""
tcp_results = self.RunShellCommand('cat /proc/net/tcp', log_result=False)
tcp_address = '0100007F:%04X' % device_port
pids = []
for single_connect in tcp_results:
connect_results = single_connect.split()
# Column 1 is the TCP port, and Column 9 is the inode of the socket
if connect_results[1] == tcp_address:
socket_inode = connect_results[9]
socket_name = 'socket:[%s]' % socket_inode
lsof_results = self.RunShellCommand('lsof', log_result=False)
for single_process in lsof_results:
process_results = single_process.split()
# Ignore the line if it has less than nine columns in it, which may
# be the case when a process stops while lsof is executing.
if len(process_results) <= 8:
continue
# Column 0 is the executable name
# Column 1 is the pid
# Column 8 is the Inode in use
if process_results[8] == socket_name:
pids.append((int(process_results[1]), process_results[0]))
break
logging.info('PidsUsingDevicePort: %s', pids)
return pids
def FileExistsOnDevice(self, file_name):
"""Checks whether the given file exists on the device.
Args:
file_name: Full path of file to check.
Returns:
True if the file exists, False otherwise.
"""
assert '"' not in file_name, 'file_name cannot contain double quotes'
try:
status = self._adb.SendShellCommand(
'\'test -e "%s"; echo $?\'' % (file_name))
if 'test: not found' not in status:
return int(status) == 0
status = self._adb.SendShellCommand(
'\'ls "%s" >/dev/null 2>&1; echo $?\'' % (file_name))
return int(status) == 0
except ValueError:
if IsDeviceAttached(self._device):
raise errors.DeviceUnresponsiveError('Device may be offline.')
return False
def IsFileWritableOnDevice(self, file_name):
"""Checks whether the given file (or directory) is writable on the device.
Args:
file_name: Full path of file/directory to check.
Returns:
True if writable, False otherwise.
"""
assert '"' not in file_name, 'file_name cannot contain double quotes'
try:
status = self._adb.SendShellCommand(
'\'test -w "%s"; echo $?\'' % (file_name))
if 'test: not found' not in status:
return int(status) == 0
raise errors.AbortError('"test" binary not found. OS too old.')
except ValueError:
if IsDeviceAttached(self._device):
raise errors.DeviceUnresponsiveError('Device may be offline.')
return False
@staticmethod
def GetTimestamp():
return time.strftime('%Y-%m-%d-%H%M%S', time.localtime())
@staticmethod
def EnsureHostDirectory(host_file):
host_dir = os.path.dirname(os.path.abspath(host_file))
if not os.path.exists(host_dir):
os.makedirs(host_dir)
def TakeScreenshot(self, host_file=None):
"""Saves a screenshot image to |host_file| on the host.
Args:
host_file: Absolute path to the image file to store on the host or None to
use an autogenerated file name.
Returns:
Resulting host file name of the screenshot.
"""
host_file = os.path.abspath(host_file or
'screenshot-%s.png' % self.GetTimestamp())
self.EnsureHostDirectory(host_file)
device_file = '%s/screenshot.png' % self.GetExternalStorage()
self.RunShellCommand(
'/system/bin/screencap -p %s' % device_file)
self.PullFileFromDevice(device_file, host_file)
self.RunShellCommand('rm -f "%s"' % device_file)
return host_file
def PullFileFromDevice(self, device_file, host_file):
"""Download |device_file| on the device from to |host_file| on the host.
Args:
device_file: Absolute path to the file to retrieve from the device.
host_file: Absolute path to the file to store on the host.
"""
if not self._adb.Pull(device_file, host_file):
raise device_errors.AdbCommandFailedError(
['pull', device_file, host_file], 'Failed to pull file from device.')
assert os.path.exists(host_file)
def SetUtilWrapper(self, util_wrapper):
"""Sets a wrapper prefix to be used when running a locally-built
binary on the device (ex.: md5sum_bin).
"""
self._util_wrapper = util_wrapper
def RunUIAutomatorTest(self, test, test_package, timeout):
"""Runs a single uiautomator test.
Args:
test: Test class/method.
test_package: Name of the test jar.
timeout: Timeout time in seconds.
Returns:
An instance of am_instrument_parser.TestResult object.
"""
cmd = 'uiautomator runtest %s -e class %s' % (test_package, test)
self._LogShell(cmd)
output = self._adb.SendShellCommand(cmd, timeout_time=timeout)
# uiautomator doesn't fully conform to the instrumenation test runner
# convention and doesn't terminate with INSTRUMENTATION_CODE.
# Just assume the first result is valid.
(test_results, _) = am_instrument_parser.ParseAmInstrumentOutput(output)
if not test_results:
raise errors.InstrumentationError(
'no test results... device setup correctly?')
return test_results[0]
def DismissCrashDialogIfNeeded(self):
"""Dismiss the error/ANR dialog if present.
Returns: Name of the crashed package if a dialog is focused,
None otherwise.
"""
re_focus = re.compile(
r'\s*mCurrentFocus.*Application (Error|Not Responding): (\S+)}')
def _FindFocusedWindow():
match = None
for line in self.RunShellCommand('dumpsys window windows'):
match = re.match(re_focus, line)
if match:
break
return match
match = _FindFocusedWindow()
if not match:
return
package = match.group(2)
logging.warning('Trying to dismiss %s dialog for %s' % match.groups())
self.SendKeyEvent(KEYCODE_DPAD_RIGHT)
self.SendKeyEvent(KEYCODE_DPAD_RIGHT)
self.SendKeyEvent(KEYCODE_ENTER)
match = _FindFocusedWindow()
if match:
logging.error('Still showing a %s dialog for %s' % match.groups())
return package
def EfficientDeviceDirectoryCopy(self, source, dest):
""" Copy a directory efficiently on the device
Uses a shell script running on the target to copy new and changed files the
source directory to the destination directory and remove added files. This
is in some cases much faster than cp -r.
Args:
source: absolute path of source directory
dest: absolute path of destination directory
"""
logging.info('In EfficientDeviceDirectoryCopy %s %s', source, dest)
with DeviceTempFile(self, suffix=".sh") as temp_script_file:
host_script_path = os.path.join(constants.DIR_SOURCE_ROOT,
'build',
'android',
'pylib',
'efficient_android_directory_copy.sh')
self._adb.Push(host_script_path, temp_script_file.name)
out = self.RunShellCommand(
'sh %s %s %s' % (temp_script_file.name, source, dest),
timeout_time=120)
if self._device:
device_repr = self._device[-4:]
else:
device_repr = '????'
for line in out:
logging.info('[%s]> %s', device_repr, line)
def _GetControlUsbChargingCommand(self):
if self._control_usb_charging_command['cached']:
return self._control_usb_charging_command['command']
self._control_usb_charging_command['cached'] = True
if not self.IsRootEnabled():
return None
for command in CONTROL_USB_CHARGING_COMMANDS:
# Assert command is valid.
assert 'disable_command' in command
assert 'enable_command' in command
assert 'witness_file' in command
witness_file = command['witness_file']
if self.FileExistsOnDevice(witness_file):
self._control_usb_charging_command['command'] = command
return command
return None
def CanControlUsbCharging(self):
return self._GetControlUsbChargingCommand() is not None
def DisableUsbCharging(self, timeout=10):
command = self._GetControlUsbChargingCommand()
if not command:
raise Exception('Unable to act on usb charging.')
disable_command = command['disable_command']
t0 = time.time()
# Do not loop directly on self.IsDeviceCharging to cut the number of calls
# to the device.
while True:
if t0 + timeout - time.time() < 0:
raise pexpect.TIMEOUT('Unable to disable USB charging in time: %s' % (
self.GetBatteryInfo()))
self.RunShellCommand(disable_command)
if not self.IsDeviceCharging():
break
def EnableUsbCharging(self, timeout=10):
command = self._GetControlUsbChargingCommand()
if not command:
raise Exception('Unable to act on usb charging.')
disable_command = command['enable_command']
t0 = time.time()
# Do not loop directly on self.IsDeviceCharging to cut the number of calls
# to the device.
while True:
if t0 + timeout - time.time() < 0:
raise pexpect.TIMEOUT('Unable to enable USB charging in time.')
self.RunShellCommand(disable_command)
if self.IsDeviceCharging():
break
def IsDeviceCharging(self):
for line in self.RunShellCommand('dumpsys battery'):
if 'powered: ' in line:
if line.split('powered: ')[1] == 'true':
return True
class NewLineNormalizer(object):
"""A file-like object to normalize EOLs to '\n'.
Pexpect runs adb within a pseudo-tty device (see
http://www.noah.org/wiki/pexpect), so any '\n' printed by adb is written
as '\r\n' to the logfile. Since adb already uses '\r\n' to terminate
lines, the log ends up having '\r\r\n' at the end of each line. This
filter replaces the above with a single '\n' in the data stream.
"""
def __init__(self, output):
self._output = output
def write(self, data):
data = data.replace('\r\r\n', '\n')
self._output.write(data)
def flush(self):
self._output.flush()
| ResetBadDevices |
GroupEditor.tsx | import React from 'react';
import * as yup from "yup";
import {reqString} from "../../data/validations";
import {FormikActions} from "formik";
import Grid from "@material-ui/core/Grid";
import XForm from "../../components/forms/XForm";
import XTextInput from "../../components/inputs/XTextInput";
import {remoteRoutes} from "../../data/constants";
import {post, put} from "../../utils/ajax";
import Toast from "../../utils/Toast";
import {GroupPrivacy, IGroup} from "./types";
import XSelectInput from "../../components/inputs/XSelectInput";
import {toOptions} from "../../components/inputs/inputHelpers"; | isNew: boolean
onGroupAdded?: (g: any) => any
onGroupEdited?: (g: any) => any
}
const schema = yup.object().shape(
{
name: reqString,
privacy: reqString,
description: reqString,
tag: reqString
}
)
const GroupEditor = ({data, isNew, onGroupAdded, onGroupEdited}: IProps) => {
function handleSubmit(values: any, actions: FormikActions<any>) {
const toSave: IGroup = {
...data,
name: values.name,
description: values.description,
privacy: values.privacy,
tag: values.tag,
parent: values.parent
}
if (isNew) {
post(remoteRoutes.groups, toSave,
(data) => {
Toast.info('Group created')
actions.resetForm()
onGroupAdded && onGroupAdded(data)
},
undefined,
() => {
actions.setSubmitting(false);
}
)
} else {
put(remoteRoutes.groups, toSave,
(data) => {
Toast.info('Group updated')
actions.resetForm()
onGroupEdited && onGroupEdited(data)
},
undefined,
() => {
actions.setSubmitting(false);
}
)
}
}
return (
<XForm
onSubmit={handleSubmit}
schema={schema}
initialValues={data}
>
<Grid spacing={1} container>
<Grid item xs={12}>
<XSelectInput
name="privacy"
label="Privacy"
options={toOptions(enumToArray(GroupPrivacy))}
variant='outlined'
/>
</Grid>
<Grid item xs={12}>
<XTextInput
name="parent"
label="Parent Group"
variant='outlined'
/>
</Grid>
<Grid item xs={12}>
<XTextInput
name="name"
label="Name"
type="text"
variant='outlined'
/>
</Grid>
<Grid item xs={12}>
<XTextInput
name="tag"
label="Tag"
variant='outlined'
/>
</Grid>
<Grid item xs={12}>
<XTextInput
name="description"
label="Description"
variant='outlined'
multiline
rowsMax="4"
rows={4}
/>
</Grid>
</Grid>
</XForm>
);
}
export default GroupEditor; | import {enumToArray} from "../../utils/stringHelpers";
interface IProps {
data?: Partial<IGroup> |
delete-product.rs | use lambda_http::{
handler,
lambda_runtime::{self, Context},
Request,
};
use products::{entrypoints::lambda::apigateway::delete_product, utils::*};
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error + Send + Sync + 'static>> {
// Initialize logger
setup_tracing();
// Initialize store
let store = get_store().await;
// Run the Lambda function
//
// This is the entry point for the Lambda function. The `lambda_runtime`
// crate will take care of contacting the Lambda runtime API and invoking
// the `delete_product` function.
// See https://docs.aws.amazon.com/lambda/latest/dg/runtimes-api.html
//
// This uses a closure to pass the Service without having to reinstantiate
// it for every call. This is a bit of a hack, but it's the only way to
// pass a store to a lambda function.
//
// Furthermore, we don't await the result of `delete_product` because
// async closures aren't stable yet. This way, the closure returns a Future,
// which matches the signature of the lambda function.
// See https://github.com/rust-lang/rust/issues/62290
lambda_runtime::run(handler(|event: Request, ctx: Context| { | }))
.await?;
Ok(())
} | delete_product(&store, event, ctx) |
ts-config-paths-plugin.ts | import path from 'path';
import {
logger,
readTsConfig,
isRelativePath,
createDebugger,
} from '@modern-js/utils';
import type { Resolver } from 'webpack';
import { createMatchPath, MatchPath } from '@modern-js/utils/tsconfig-paths';
import { JS_RESOLVE_EXTENSIONS } from '../utils/constants';
const debug = createDebugger('ts-config-paths');
export class | {
source: string;
target: string;
cwd: string;
compilerOptions: any;
absoluteBaseUrl: string;
matchPath: MatchPath;
resolved: Map<string, string | undefined>;
constructor(cwd: string) {
this.cwd = cwd;
this.source = 'described-resolve';
this.target = 'resolve';
this.compilerOptions = readTsConfig(cwd).compilerOptions || {};
this.absoluteBaseUrl = path.resolve(
cwd,
this.compilerOptions.baseUrl || './',
);
this.matchPath = createMatchPath(
this.absoluteBaseUrl,
this.compilerOptions?.paths || {},
['browser', 'module', 'main'],
false,
);
this.resolved = new Map();
}
apply(resolver: Resolver) {
if (!resolver) {
logger.warn(
'ts-config-paths-plugin: Found no resolver, not apply ts-config-paths-plugin',
);
}
const target = resolver.ensureHook(this.target);
resolver
.getHook('described-resolve')
.tapAsync('TsConfigPathsPlugin', (request, resolveContext, callback) => {
const requestName = request.request;
if (!requestName) {
return callback();
}
if (isRelativePath(requestName)) {
return callback();
}
if (path.isAbsolute(requestName)) {
return callback();
}
if (!this.resolved.has(requestName)) {
const matched = this.matchPath(
requestName,
undefined,
undefined,
JS_RESOLVE_EXTENSIONS,
);
this.resolved.set(requestName, matched);
}
if (this.resolved.get(requestName) === undefined) {
return callback();
}
debug(`resolved ${requestName} to ${this.resolved.get(requestName)!}`);
return resolver.doResolve(
target,
{
...request,
request: this.resolved.get(requestName),
},
`Aliased with tsconfig.json ${requestName} to ${this.resolved.get(
requestName,
)!}`,
resolveContext,
(resolverErr: any, resolvedResult?: any) => {
if (resolverErr) {
return callback(resolverErr);
}
if (!resolvedResult) {
return callback(undefined, undefined);
}
return callback(undefined, resolvedResult);
},
);
});
}
}
| TsConfigPathsPlugin |
html.rs | use nu_test_support::{nu, pipeline};
#[test]
fn out_html_simple() {
let actual = nu!(
cwd: ".", pipeline(
r#"
echo 3 | to html
"#
));
assert_eq!(
actual.out,
r"<html><style>body { background-color:white;color:black; }</style><body>3</body></html>"
);
}
#[test]
fn out_html_partial() |
#[test]
fn out_html_table() {
let actual = nu!(
cwd: ".", pipeline(
r#"
echo '{"name": "darren"}' | from json | to html
"#
));
assert_eq!(
actual.out,
r"<html><style>body { background-color:white;color:black; }</style><body><table><tr><th>name</th></tr><tr><td>darren</td></tr></table></body></html>"
);
}
#[test]
fn test_cd_html_color_flag_dark_false() {
let actual = nu!(
cwd: ".", pipeline(
r#"
cd --help | to html --html-color
"#
)
);
assert_eq!(
actual.out,
r"<html><style>body { background-color:white;color:black; }</style><body>Change directory.<br><br>Usage:<br> > cd (path) <br><br>Flags:<br> -h, --help<br> Display this help message<br><br>Parameters:<br> (optional) path: the path to change to<br><br>Examples:<br> Change to your home directory<br> > <span style='color:#037979;font-weight:bold;'>cd<span style='color:black;font-weight:normal;'> </span></span><span style='color:#037979;'>~<span style='color:black;font-weight:normal;'><br><br> Change to a directory via abbreviations<br> > </span><span style='color:#037979;font-weight:bold;'>cd<span style='color:black;font-weight:normal;'> </span></span></span><span style='color:#037979;'>d/s/9<span style='color:black;font-weight:normal;'><br><br></body></html></span></span>"
);
}
#[test]
fn test_no_color_flag() {
let actual = nu!(
cwd: ".", pipeline(
r#"
cd --help | to html --no-color
"#
)
);
assert_eq!(
actual.out,
r"<html><style>body { background-color:white;color:black; }</style><body>Change directory.<br><br>Usage:<br> > cd (path) <br><br>Flags:<br> -h, --help<br> Display this help message<br><br>Parameters:<br> (optional) path: the path to change to<br><br>Examples:<br> Change to your home directory<br> > cd ~<br><br> Change to a directory via abbreviations<br> > cd d/s/9<br><br></body></html>"
);
}
| {
let actual = nu!(
cwd: ".", pipeline(
r#"
echo 3 | to html -p
"#
));
assert_eq!(
actual.out,
"<div style=\"background-color:white;color:black;\">3</div>"
);
} |
main.go | package main
import (
"net/http"
"os"
"github.com/poximy/url-shortener-backend/api"
"github.com/go-chi/chi/v5"
"github.com/go-chi/chi/v5/middleware"
"github.com/go-chi/cors"
)
func | () {
r := chi.NewRouter()
r.Use(middleware.Logger)
r.Use(cors.Handler(cors.Options{
AllowedOrigins: []string{"https://*", "http://*"},
AllowedMethods: []string{"GET", "POST", "PUT", "DELETE", "OPTIONS"},
AllowedHeaders: []string{"Accept", "Authorization", "Content-Type", "application/json"},
}))
r.Mount("/", api.UrlRouter())
err := http.ListenAndServe(port(), r)
if err != nil {
panic(err)
}
}
func port() string {
portNum := os.Getenv("PORT")
if portNum == "" {
portNum = "8080" // Default port if not specified
}
return ":" + portNum
}
| main |
take.rs | use std::time::Duration;
use Sample;
use Source;
/// Internal function that builds a `TakeDuration` object.
pub fn take_duration<I>(input: I, duration: Duration) -> TakeDuration<I>
where
I: Source,
I::Item: Sample,
{
TakeDuration {
current_frame_len: input.current_frame_len(),
duration_per_sample: TakeDuration::get_duration_per_sample(&input),
input: input,
remaining_duration: duration,
requested_duration: duration,
filter: None,
}
}
/// A filter that requires duration information
#[derive(Clone,Debug)]
enum DurationFilter {
FadeOut,
}
impl DurationFilter
{
fn apply<I: Iterator>(&self, sample: <I as Iterator>::Item, parent: &TakeDuration<I>) -> <I as Iterator>::Item
where
I::Item: Sample,
{
use self::DurationFilter::*;
match self {
FadeOut => {
let remaining = parent.remaining_duration.as_millis() as f32;
let total = parent.requested_duration.as_millis() as f32;
sample.amplify(remaining / total)
},
}
}
}
const NANOS_PER_SEC: u64 = 1_000_000_000;
/// A source that repeats the given source.
#[derive(Clone, Debug)]
pub struct TakeDuration<I> {
input: I,
remaining_duration: Duration,
requested_duration: Duration,
filter: Option<DurationFilter>,
// Remaining samples in current frame.
current_frame_len: Option<usize>,
// Only updated when the current frame len is exausted.
duration_per_sample: Duration,
}
impl<I> TakeDuration<I>
where
I: Source,
I::Item: Sample,
{
/// Returns the duration elapsed for each sample extracted.
#[inline]
fn get_duration_per_sample(input: &I) -> Duration {
let ns = NANOS_PER_SEC / input.sample_rate() as u64 * input.channels() as u64;
// \|/ the maximum value of `ns` is one billion, so this can't fail
Duration::new(0, ns as u32)
}
/// Returns a reference to the inner source.
#[inline]
pub fn inner(&self) -> &I {
&self.input
}
/// Returns a mutable reference to the inner source.
#[inline]
pub fn inner_mut(&mut self) -> &mut I {
&mut self.input
}
/// Returns the inner source.
#[inline]
pub fn into_inner(self) -> I {
self.input
}
pub fn set_filter_fadeout(&mut self) {
self.filter = Some(DurationFilter::FadeOut);
}
pub fn clear_filter(&mut self) |
}
impl<I> Iterator for TakeDuration<I>
where
I: Source,
I::Item: Sample,
{
type Item = <I as Iterator>::Item;
fn next(&mut self) -> Option<<I as Iterator>::Item> {
if let Some(frame_len) = self.current_frame_len.take() {
if frame_len > 0 {
self.current_frame_len = Some(frame_len - 1);
} else {
self.current_frame_len = self.input.current_frame_len();
// Sample rate might have changed
self.duration_per_sample = Self::get_duration_per_sample(&self.input);
}
}
if self.remaining_duration <= self.duration_per_sample {
None
} else {
if let Some(sample) = self.input.next() {
let sample = match &self.filter {
Some(filter) => filter.apply(sample, &self),
None => sample,
};
self.remaining_duration = self.remaining_duration - self.duration_per_sample;
Some(sample)
} else {
None
}
}
}
// TODO: size_hint
}
impl<I> Source for TakeDuration<I>
where
I: Iterator + Source,
I::Item: Sample,
{
#[inline]
fn current_frame_len(&self) -> Option<usize> {
let remaining_nanos = self.remaining_duration.as_secs() * NANOS_PER_SEC
+ self.remaining_duration.subsec_nanos() as u64;
let nanos_per_sample = self.duration_per_sample.as_secs() * NANOS_PER_SEC
+ self.duration_per_sample.subsec_nanos() as u64;
let remaining_samples = (remaining_nanos / nanos_per_sample) as usize;
self.input.current_frame_len()
.filter(|value| *value < remaining_samples)
.or(Some(remaining_samples))
}
#[inline]
fn channels(&self) -> u16 {
self.input.channels()
}
#[inline]
fn sample_rate(&self) -> u32 {
self.input.sample_rate()
}
#[inline]
fn total_duration(&self) -> Option<Duration> {
if let Some(duration) = self.input.total_duration() {
if duration < self.requested_duration {
Some(duration)
} else {
Some(self.requested_duration)
}
} else {
None
}
}
}
| {
self.filter = None;
} |
3aa95a42561c_this_is_a_migration.py | """this is a migration
Revision ID: 3aa95a42561c
Revises: 98fef64846fe
Create Date: 2021-10-04 10:49:46.832296
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = "3aa95a42561c"
down_revision = "98fef64846fe"
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
|
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table("user", schema=None) as batch_op:
batch_op.drop_column("avatar_style")
with op.batch_alter_table("belong", schema=None) as batch_op:
batch_op.drop_constraint("owner_id", type_="foreignkey")
# ### end Alembic commands ###
| with op.batch_alter_table("belong", schema=None) as batch_op:
batch_op.create_foreign_key("owner_id", "user", ["owner_id"], ["id"])
with op.batch_alter_table("user", schema=None) as batch_op:
batch_op.add_column(sa.Column("avatar_style", sa.String(length=1024)))
# ### end Alembic commands ### |
argument.py | try:
from zcrmsdk.src.com.zoho.crm.api.exception import SDKException
from zcrmsdk.src.com.zoho.crm.api.util import Constants
except Exception:
from ..exception import SDKException
from ..util import Constants
class Argument(object):
def __init__(self):
"""Creates an instance of Argument"""
self.__name = None
self.__value = None
self.__key_modified = dict()
def | (self):
"""
The method to get the name
Returns:
string: A string representing the name
"""
return self.__name
def set_name(self, name):
"""
The method to set the value to name
Parameters:
name (string) : A string representing the name
"""
if name is not None and not isinstance(name, str):
raise SDKException(Constants.DATA_TYPE_ERROR, 'KEY: name EXPECTED TYPE: str', None, None)
self.__name = name
self.__key_modified['name'] = 1
def get_value(self):
"""
The method to get the value
Returns:
string: A string representing the value
"""
return self.__value
def set_value(self, value):
"""
The method to set the value to value
Parameters:
value (string) : A string representing the value
"""
if value is not None and not isinstance(value, str):
raise SDKException(Constants.DATA_TYPE_ERROR, 'KEY: value EXPECTED TYPE: str', None, None)
self.__value = value
self.__key_modified['value'] = 1
def is_key_modified(self, key):
"""
The method to check if the user has modified the given key
Parameters:
key (string) : A string representing the key
Returns:
int: An int representing the modification
"""
if key is not None and not isinstance(key, str):
raise SDKException(Constants.DATA_TYPE_ERROR, 'KEY: key EXPECTED TYPE: str', None, None)
if key in self.__key_modified:
return self.__key_modified.get(key)
return None
def set_key_modified(self, key, modification):
"""
The method to mark the given key as modified
Parameters:
key (string) : A string representing the key
modification (int) : An int representing the modification
"""
if key is not None and not isinstance(key, str):
raise SDKException(Constants.DATA_TYPE_ERROR, 'KEY: key EXPECTED TYPE: str', None, None)
if modification is not None and not isinstance(modification, int):
raise SDKException(Constants.DATA_TYPE_ERROR, 'KEY: modification EXPECTED TYPE: int', None, None)
self.__key_modified[key] = modification
| get_name |
0011_auto_20180103_0243.py | # Generated by Django 2.0 on 2018-01-03 02:43
from django.db import migrations, models
| dependencies = [
('ecweb', '0010_event'),
]
operations = [
migrations.AlterField(
model_name='event',
name='end_event',
field=models.DateTimeField(blank=True),
),
migrations.AlterField(
model_name='event',
name='start_event',
field=models.DateTimeField(),
),
] |
class Migration(migrations.Migration):
|
create_def_file.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""create_def_file.py - tool to create a windows def file.
The def file can be used to export symbols from the tensorflow dll to enable
tf.load_library().
Because the linker allows only 64K symbols to be exported per dll
we filter the symbols down to the essentials. The regular expressions
we use for this are specific to tensorflow.
TODO: this works fine but there is an issue with exporting
'const char * const' and importing it from a user_ops. The problem is
on the importing end and using __declspec(dllimport) works around it.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import codecs
import os
import re
import subprocess
import sys
import tempfile
# External tools we use that come with visual studio sdk and
# we assume that the caller has the correct PATH to the sdk
UNDNAME = "undname.exe"
DUMPBIN = "dumpbin.exe"
# Exclude if matched
EXCLUDE_RE = re.compile(r"RTTI|deleting destructor|::internal::|Internal|"
r"python_op_gen_internal|grappler")
# Include if matched before exclude
INCLUDEPRE_RE = re.compile(r"google::protobuf::internal::ExplicitlyConstructed|"
r"tensorflow::internal::LogMessage|"
r"tensorflow::internal::LogString|"
r"tensorflow::internal::CheckOpMessageBuilder|"
r"tensorflow::internal::PickUnusedPortOrDie|"
r"tensorflow::internal::ValidateDevice|"
r"tensorflow::ops::internal::Enter|"
r"tensorflow::strings::internal::AppendPieces|"
r"tensorflow::strings::internal::CatPieces|"
r"tensorflow::errors::Internal|"
r"tensorflow::Tensor::CopyFromInternal|"
r"tensorflow::kernel_factory::"
r"OpKernelRegistrar::InitInternal|"
r"tensorflow::io::internal::JoinPathImpl")
# Include if matched after exclude
INCLUDE_RE = re.compile(r"^(TF_\w*)$|"
r"^(TFE_\w*)$|"
r"tensorflow::|"
r"functor::|"
r"\?nsync_|"
r"stream_executor::")
# We want to identify data members explicitly in the DEF file, so that no one
# can implicitly link against the DLL if they use one of the variables exported
# from the DLL and the header they use does not decorate the symbol with
# __declspec(dllimport). It is easier to detect what a data symbol does
# NOT look like, so doing it with the below regex.
DATA_EXCLUDE_RE = re.compile(r"[)(]|"
r"vftable|"
r"vbtable|"
r"vcall|"
r"RTTI|"
r"protobuf::internal::ExplicitlyConstructed")
def get_args():
|
def main():
"""main."""
args = get_args()
# Pipe dumpbin to extract all linkable symbols from libs.
# Good symbols are collected in candidates and also written to
# a temp file.
candidates = []
tmpfile = tempfile.NamedTemporaryFile(mode="w", delete=False)
for lib_path in args.input:
proc = subprocess.Popen([DUMPBIN, "/nologo", "/linkermember:1", lib_path],
stdout=subprocess.PIPE)
for line in codecs.getreader("utf-8")(proc.stdout):
cols = line.split()
if len(cols) < 2:
continue
sym = cols[1]
tmpfile.file.write(sym + "\n")
candidates.append(sym)
exit_code = proc.wait()
if exit_code != 0:
print("{} failed, exit={}".format(DUMPBIN, exit_code))
return exit_code
tmpfile.file.close()
# Run the symbols through undname to get their undecorated name
# so we can filter on something readable.
with open(args.output, "w") as def_fp:
# track dupes
taken = set()
# Header for the def file.
def_fp.write("LIBRARY " + args.target + "\n")
def_fp.write("EXPORTS\n")
if args.bitness == "64":
def_fp.write("\t??1OpDef@tensorflow@@UEAA@XZ\n")
else:
def_fp.write("\t??1OpDef@tensorflow@@UAE@XZ\n")
# Each symbols returned by undname matches the same position in candidates.
# We compare on undname but use the decorated name from candidates.
dupes = 0
proc = subprocess.Popen([UNDNAME, tmpfile.name], stdout=subprocess.PIPE)
for idx, line in enumerate(codecs.getreader("utf-8")(proc.stdout)):
decorated = candidates[idx]
if decorated in taken:
# Symbol is already in output, done.
dupes += 1
continue
if not INCLUDEPRE_RE.search(line):
if EXCLUDE_RE.search(line):
continue
if not INCLUDE_RE.search(line):
continue
if "deleting destructor" in line:
# Some of the symbols convered by INCLUDEPRE_RE export deleting
# destructor symbols, which is a bad idea.
# So we filter out such symbols here.
continue
if DATA_EXCLUDE_RE.search(line):
def_fp.write("\t" + decorated + "\n")
else:
def_fp.write("\t" + decorated + " DATA\n")
taken.add(decorated)
exit_code = proc.wait()
if exit_code != 0:
print("{} failed, exit={}".format(UNDNAME, exit_code))
return exit_code
os.unlink(tmpfile.name)
print("symbols={}, taken={}, dupes={}"
.format(len(candidates), len(taken), dupes))
return 0
if __name__ == "__main__":
sys.exit(main())
| """Parse command line."""
filename_list = lambda x: x.split(";")
parser = argparse.ArgumentParser()
parser.add_argument("--input", type=filename_list,
help="paths to input libraries separated by semicolons",
required=True)
parser.add_argument("--output", help="output deffile", required=True)
parser.add_argument("--target", help="name of the target", required=True)
parser.add_argument("--bitness", help="build target bitness", required=True)
args = parser.parse_args()
return args |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.