text
stringlengths
184
4.48M
import Buyable from './Buyable'; export default class Cart { private _items: Buyable[] = []; add(item: Buyable): void { if(this._items.some(el => el.id === item.id)) { if(item.canAddMore) { item.count += 1; return; } else { return; } }; this._items.push(item); } get items(): Buyable[] { return [...this._items]; } totalСost(): number { return this.items.reduce((acc, item) => { return acc += item.price * item.count}, 0) } totalСostDiscount(discount: number): number { const total = this.totalСost(); const result = total - total / 100 * discount; return result; } removeItem(id: number): void { const item = this._items.find(el => el.id === id); if(item === undefined) return; if(item.canAddMore) { item.count -= 1; if(item.count > 0) return; } this._items = this.items.filter(item => item.id !== id) } }
package com.examen.forge.app.infraestructure.webApi.controllers; import java.util.List; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.stereotype.Controller; import org.springframework.ui.Model; import org.springframework.validation.BindingResult; import org.springframework.web.bind.annotation.GetMapping; import org.springframework.web.bind.annotation.ModelAttribute; import org.springframework.web.bind.annotation.PostMapping; import com.examen.forge.app.application.services.SongService; import com.examen.forge.app.application.services.UserService; import com.examen.forge.app.domain.entities.SongEntity; import com.examen.forge.app.domain.entities.UserEntity; import com.examen.forge.config.AppConfig; import jakarta.servlet.http.HttpSession; import jakarta.validation.Valid; @Controller public class UserController { @Autowired UserService userService; @Autowired SongService songService; // Registro de usuario @GetMapping({ AppConfig.ROUTE_REGISTRATION }) public String pageRegister(@ModelAttribute(AppConfig.MA_USER) UserEntity user) { return AppConfig.JSP_REGISTRATION; } @PostMapping({ AppConfig.POST_CREATE_USER }) public String newUser( @Valid @ModelAttribute(AppConfig.MA_USER) UserEntity user, BindingResult result, HttpSession session, Model model) { if (result.hasErrors()) { model.addAttribute("globalErrors", result.getGlobalErrors()); return AppConfig.JSP_REGISTRATION; } if (userService.existsByEmail(user.getEmail())) { model.addAttribute("emailError", "El email ya está en uso"); return AppConfig.JSP_REGISTRATION; } if (!user.getPassword().equals(user.getConfirm())) { model.addAttribute("confirmError", "Las contraseñas no coinciden"); return AppConfig.JSP_REGISTRATION; } UserEntity newUser = userService.create(user); session.setAttribute(AppConfig.SESSION_USER, newUser.getId()); return "redirect:/" + AppConfig.ROUTE_HOME; } // Logueo de usuario @GetMapping public String pageLogin() { return AppConfig.JSP_LOGIN; } @PostMapping({ AppConfig.POST_LOGIN_USER }) public String loginUser(@ModelAttribute UserEntity user, Model model, HttpSession session) { String email = user.getEmail(); String password = user.getPassword(); if (email.trim().isEmpty()) { model.addAttribute("errorEmail", "El correo no puede estar vacia."); return AppConfig.JSP_LOGIN; } if (password.trim().isEmpty()) { model.addAttribute("errorPassword", "La contraseña no puede estar vacia."); return AppConfig.JSP_LOGIN; } if (userService.authenticateUser(email, password)) { UserEntity userByEmail = userService.getByEmail(email); session.setAttribute(AppConfig.SESSION_USER, userByEmail.getId()); return "redirect:/" + AppConfig.ROUTE_HOME; } else { model.addAttribute("error", "Credenciales no válidas. Inténtalo de nuevo."); return AppConfig.JSP_LOGIN; } } // Redireccion a home @GetMapping({ AppConfig.ROUTE_HOME }) public String pageHome(HttpSession session, Model model) { Long userId = (Long) session.getAttribute(AppConfig.SESSION_USER); List<SongEntity> songs = songService.getAll(); model.addAttribute("songs", songs); boolean isRegistration = userId != null; model.addAttribute("isRegistration", isRegistration); if (userId != null) { UserEntity user = userService.getById(userId); model.addAttribute(AppConfig.MA_USER, user); } return AppConfig.JSP_HOME; } // Eliminacion de session @GetMapping("logout") public String logoutUser(HttpSession session) { session.removeAttribute(AppConfig.SESSION_USER); return "redirect:/"; } }
This repository contains the files needed to complete the LIFO FIFO project in ALX. The files and their functions are recorded within this readme. # Monty Scripting Language Monty 0.98 is a scripting language that undergoes an initial compilation into Monty bytecode, similar to Python. It operates based on a distinct stack and includes specific instructions for stack manipulation. The primary objective of this project is to develop an interpreter for Monty ByteCodes files. ## Monty Byte Code Files Monty bytecode files typically employ the .m extension, a widely adopted convention in the industry, although it is not mandated by the language specification. Each line contains only one instruction, and any number of spaces before or after the opcode and its associated argument are permissible. Here's an example: ```plaintext push 0$ push 1$ push 2$ push 3$ pall$ push 4$ push 5$ push 6$ pall$ ``` Monty bytecode files can also include blank lines or lines filled with spaces, and any additional text following the opcode or its required argument is ignored. For instance: ```plaintext push 0 Push 0 onto the stack$ push 1 Push 1 onto the stack$ push 2$ push 3$ pall$ $ $ $ push 4$ push 5$ push 6$ pall This is the end of our program. Monty is awesome!$ ``` # Usage To compile all the files, use the following command: ```shell gcc -Wall -Werror -Wextra -pedantic *.c -o monty ``` To execute the program, run: ```shell ./monty bytecode_file ``` Available operation codes are as follows: | Opcode | Description | | ------- | ----------- | | push | Pushes an element onto the stack. For example, `push 1` pushes 1 onto the stack. | | pall | Prints all the values on the stack, starting from the top of the stack. | | pint | Prints the value at the top of the stack. | | pop | Removes the top element from the stack. | | swap | Swaps the top two elements of the stack. | | add | Adds the top two elements of the stack. The result is stored in the second node, and the first node is removed. | | nop | This opcode has no effect. | | sub | Subtracts the top two elements of the stack from the second top element. The result is stored in the second node, and the first node is removed. | | div | Divides the top two elements of the stack from the second top element. The result is stored in the second node, and the first node is removed. | | mul | Multiplies the top two elements of the stack from the second top element. The result is stored in the second node, and the first node is removed. | | mod | Computes the remainder of the top two elements of the stack from the second top element. The result is stored in the second node, and the first node is removed. | | # | When a line begins with a # character, the entire line is treated as a comment. | | pchar | Prints the integer stored at the top of the stack as its ASCII value representation. | | pstr | Prints the integers stored in the stack as their ASCII value representations. It stops printing when the value is 0, when the stack is empty, or when the value of an element is a non-ASCII value. | | rotl | Rotates the top of the stack to the bottom of the stack. | | rotr | Rotates the bottom of the stack to the top of the stack. | | stack | This represents the default behavior, setting the format of the data into a stack (Last In, First Out - LIFO). | | queue | Sets the format of the data into a queue (First In, First Out - FIFO). |
import { conn } from "../../../db/mysql.connection"; import { APIError, HTTPStatusCode } from "../../error/api-error.model"; import { FlatTenancy } from "../models/flat-tenancy.model"; export class FlatTenancyDatabase { constructor() {} fetchFlatTenancyByTenantId = async (tenantId: string, ownerId: string): Promise<FlatTenancy> => { const queryString = "CALL `pFlatTenancy_Get_By_TenantId`(?, ?);"; return new Promise((res, rej) => { conn.query(queryString, [tenantId, ownerId], (error, result) => { if (error) { rej(new APIError(HTTPStatusCode.INTERNAL_SERVER_ERROR, error)); } else { let flatTenancy = result[0][0] as FlatTenancy; res(flatTenancy); } }); }); }; createOrUpdate = async (flatTenancy: FlatTenancy): Promise<FlatTenancy> => { const queryString = "CALL `pFlatTenancy_Create_Update`(?, ?, ?, ?, ?, ?);"; const date = flatTenancy.startDate.toString().split("T")[0]; return new Promise((res, rej) => { conn.query( queryString, [ flatTenancy.flatId, flatTenancy.userId, flatTenancy.rentAmount, flatTenancy.securityDeposit, flatTenancy.dueDayOfMonth, date, ], (error, result) => { if (error) { rej(new APIError(HTTPStatusCode.INTERNAL_SERVER_ERROR, error)); } else { let userTableData = result[0][0] as FlatTenancy; res(userTableData); } } ); }); }; endFlatTenancy = async (flatTenancyId: number, endDate: Date): Promise<boolean> => { const queryString = "CALL `pFlatTenancy_End_Tenancy`(?, ?);"; const date = endDate.toString().split("T")[0]; return new Promise((res, rej) => { conn.query(queryString, [flatTenancyId, date], (error, result) => { if (error) { rej(new APIError(HTTPStatusCode.INTERNAL_SERVER_ERROR, error)); } else { res(result.affectedRows > 0); } }); }); }; }
# CONTRIBUTING Thank you for your interest in contributing to Clash Verge Rev! This document provides guidelines and instructions to help you set up your development environment and start contributing. ## Development Setup Before you start contributing to the project, you need to set up your development environment. Here are the steps you need to follow: ### Prerequisites 1. **Install Rust and Node.js**: Our project requires both Rust and Node.js. Please follow the instructions provided [here](https://tauri.app/v1/guides/getting-started/prerequisites) to install them on your system. ### Setup for Windows Users If you're a Windows user, you may need to perform some additional steps: - Make sure to add Rust and Node.js to your system's PATH. This is usually done during the installation process, but you can verify and manually add them if necessary. - The gnu `patch` tool should be installed ### Install Node.js Packages After installing Rust and Node.js, install the necessary Node.js packages: ```shell pnpm i ``` ### Download the Clash Binary You have two options for downloading the clash binary: - Automatically download it via the provided script: ```shell pnpm run check # Use '--force' to force update to the latest version # pnpm run check --force ``` - Manually download it from the [Clash Meta release](https://github.com/MetaCubeX/Clash.Meta/releases). After downloading, rename the binary according to the [Tauri configuration](https://tauri.app/v1/api/config#bundleconfig.externalbin). ### Run the Development Server To run the development server, use the following command: ```shell pnpm dev # If an app instance already exists, use a different command pnpm dev:diff ``` ### Build the Project If you want to build the project, use: ```shell pnpm build ``` ## Contributing Your Changes Once you have made your changes: 1. Fork the repository. 2. Create a new branch for your feature or bug fix. 3. Commit your changes with clear and concise commit messages. 4. Push your branch to your fork and submit a pull request to our repository. We appreciate your contributions and look forward to your active participation in our project!
<?php namespace App\Controller; use App\Document\Manager\SongManager; use App\Elastic\RomajiAnalyzer; use App\Service\Music163LyricsFetcher; use Symfony\Bundle\FrameworkBundle\Controller\AbstractController; use Symfony\Component\Routing\Annotation\Route; class LyricsController extends AbstractController { /** * @Route("/lyrics/{songId}", name="lyrics_get", requirements={"songId"="\d+"}) */ public function fetch(int $songId, SongManager $songManager, Music163LyricsFetcher $lyricsFetcher, RomajiAnalyzer $romajiAnalyzer) { $song = $songManager->getSongById($songId); if ($song->getLyrics()) { return $this->json([ 'lyrics' => $song->getLyrics(), 'rawLyrics' => $song->getRawLyrics(), ]); } $candidates = $lyricsFetcher->findSongCandidates($song); if (empty($candidates)) { return $this->json('No lyrics found.', 404); } // Todo let user set his candidate in frontend $chosen = $candidates[0]; $lyrics = $lyricsFetcher->findSongLyrics($chosen['id']); if (!$lyrics) { return $this->json('No lyrics found.', 404); } // Todo use timing in frontend. For now just strip them. $filtered = preg_replace('/\[.+?\]/', '', $lyrics); $filtered = str_replace('作词', 'Text', $filtered); $filtered = str_replace('作曲', 'Composition', $filtered); $filtered = str_replace("\n", '<br>', $filtered); $filtered = str_replace(" ", '', $filtered); $filtered = str_replace("!", '!', $filtered); // Todo somehow identify if lyrics are japanese :thinking: $romaji = $romajiAnalyzer->analyze($filtered); // Update Song in Elastic $songManager->getSongManager()->update($song->getId(), [ 'lyrics' => $romaji, 'raw_lyrics' => $lyrics, ]); return $this->json([ 'lyrics' => $romaji, 'rawLyrics' => $lyrics, ]); } }
import React, { useEffect, useState } from "react"; import { Card } from "react-bootstrap"; import "./UseEffect.css"; import { useNavigate } from "react-router"; import { Link } from "react-router-dom"; const UseEffect = () => { const [users, setUsers] = useState([]); const navigate = useNavigate(); const getUsers = async () => { const response = await fetch("https://api.github.com/users"); setUsers(await response.json()); }; useEffect(() => { getUsers(); }, []); return ( <div> <h1>CARD</h1> <div className="card1"> <div className="row text-center"> {users.map((curElem) => { return <> <div className="col-10 col-md-4 mt-5"> <div> <Card style={{ margin: "4rem" }} > <Card.Img variant="top" src={curElem.avatar_url} /> <Card.Body> {/* <Card.Title onClick={""} >{curElem.login}</Card.Title> */} {/* <Card.Title onClick={""} >{curElem.login}</Card.Title> */} <Link to="/idpage">{curElem.login}</Link> {/* <Card.Text>har</Card.Text> */} {/* <Button variant="primary">Go somewhere</Button> */} </Card.Body> </Card> </div> </div> </>; })} </div> </div> </div> ); }; export default UseEffect;
<?php /** * The template for displaying comments * * This is the template that displays the area of the page that contains both the current comments * and the comment form. * * @link https://developer.wordpress.org/themes/basics/template-hierarchy/ * * @package singlecoaching */ /* * If the current post is protected by a password and * the visitor has not yet entered the password we will * return early without loading the comments. */ if ( post_password_required() ) { return; } ?> <div id="reactie"></div> <div id="comments" class="comments-area"> <?php if (!is_singular('tribe_events') ) { comment_form(); } if (is_singular('tribe_events') ) { echo '<h2 style="margin-bottom:2em;"><a href="#reactie1">Geef een reactie</a></h2>'; } // You can start editing here -- including this comment! if ( have_comments() ) : ?> <h2 class="comments-title"> <?php $singlecoaching_comment_count = get_comments_number(); if ( '1' === $singlecoaching_comment_count ) { printf( /* translators: 1: title. */ esc_html__( 'One thought on &ldquo;%1$s&rdquo;', 'singlecoaching' ), '<span>' . get_the_title() . '</span>' ); } else { printf( // WPCS: XSS OK. /* translators: 1: comment count number, 2: title. */ esc_html( _nx( '%1$s thought on &ldquo;%2$s&rdquo;', '%1$s thoughts on &ldquo;%2$s&rdquo;', $singlecoaching_comment_count, 'comments title', 'singlecoaching' ) ), number_format_i18n( $singlecoaching_comment_count ), '<span>' . get_the_title() . '</span>' ); } ?> </h2><!-- .comments-title --> <ol class="comment-list"> <?php wp_list_comments( array( 'style' => 'ol', 'short_ping' => true, ) ); ?> </ol><!-- .comment-list --> <div id="reactie1"></div> <?php /* ************************************************************ */ /* Bij trainingen ed reactieformulier aan einde van de reacties */ /* ************************************************************ */ if (is_singular('tribe_events') ) { comment_form(); } // If comments are closed and there are comments, let's leave a little note, shall we? if ( ! comments_open() ) : ?> <p class="no-comments"><?php esc_html_e( 'Comments are closed.', 'singlecoaching' ); ?></p> <?php endif; endif; // Check for have_comments(). ?> </div><!-- #comments -->
<!DOCTYPE html> <html lang="ja"> <head> <meta charset="UTF-8"> <title>My TODO</title> <link rel="stylesheet" href="css/style.css"> <script src="https://unpkg.com/react@16/umd/react.development.js"></script> <script src="https://unpkg.com/react-dom@16/umd/react-dom.development.js"></script> <script src="https://unpkg.com/[email protected]/babel.min.js"></script> </head> <body> <div id="root"></div> <script type="text/babel"> (() => { const todos = [ ]; function TodoList(props) { const todos = props.todos.map(todo => { return ( <TodoItem key={todo.id} todo={todo} checkTodo={props.checkTodo} deleteTodo={props.deleteTodo} /> ); }); return ( <ul> {props.todos.length ? todos : <li>タスクなし</li>} </ul> ); } function TodoForm(props){ return( <form onSubmit={props.addTodo}> <input type="text" value={props.item} onChange={props.updateItem}/> <input type="submit" value="チャンネル情報"/> </form> ) } class App extends React.Component { constructor() { super(); this.state = { todos: todos, item: '' }; this.checkTodo = this.checkTodo.bind(this); this.updateItem = this.updateItem.bind(this); this.addTodo = this.addTodo.bind(this); this.purge = this.purge.bind(this); } purge(){ if(!confirm('本当に削除しますか')){ return; } const todos = this.state.todos.filter(todo => { return !todo.isDone; }); this.setState({ todos:todos }); } addTodo(e){ e.preventDefault(); if(this.state.item.trim() == ''){ return; } const item = { id: getUniqueId(), title: this.state.item, isDone: false }; const todos = this.state.todos.slice(); todos.push(item); this.setState({ todos: todos, item: '' }); } updateItem(e){ this.setState({ item:e.target.value }); } componentDidUpdate(){ localStorage.setItem('todos',JSON.stringify(this.state.todos)); } componentDidMount(){ this.setState({ todos:JSON.parse(localStorage.getItem('todos')) || [] }); } render() { return ( <div className="container"> <TodoHeader todos={this.state.todos} purge={this.purge} /> <TodoList todos = {this.state.todos} checkTodo = {this.checkTodo} deleteTodo = {this.deleteTodo} /> <TodoForm item = {this.state.item} updateItem = {this.updateItem} addTodo = {this.addTodo} /> </div> ); } } ReactDOM.render( <App/>, document.getElementById('root') ); })(); </script> </body> </html>
type Admin = { name: string; privileges: string[]; }; type Employee = { name: string; startDate: Date; }; type ElevatedEmployee = Admin & Employee; const e1: ElevatedEmployee = { name: "Chandler", privileges: ["create-server"], startDate: new Date(), }; type Combine = string | number; type Numeric = number | boolean; type Universal = Combine & Numeric; function addCombine(a: number, b: number): number function addCombine(a: string, b: string): string function addCombine(a: Combine, b: Combine) { if (typeof a === "string" || typeof b === "string") { return a.toString() + b.toString(); } return a + b; } const results = addCombine('Chandler', ' Bowman') results.split(' ') const fetchedUserData = { id: 'u1', name: 'Chandler', job: {title: 'CTO', description: 'My Company'} } console.log(fetchedUserData?.job?.title) const userInputs = undefined const storedData = userInputs ?? 'DEFAULT' console.log(storedData) // type UnknownEmployee = Employee | Admin; // function printEmployeeInfo(emp: UnknownEmployee) { // console.log("Name: " + emp.name); // if ("privileges" in emp) { // console.log("Privileges: " + emp.privileges); // } // if ("startDate" in emp) { // console.log("Starting Date: " + emp.startDate); // } // } // printEmployeeInfo(e1); // class Car { // drive() { // console.log("Car"); // } // } // class Truck { // drive() { // console.log("Truck"); // } // loadCargo(amount: number) { // console.log(`Loading ${amount} Lbs. of cargo.`); // } // } // type Vehicle = Car | Truck; // const v1 = new Car(); // const v2 = new Truck(); // function useVehicle(vehicle: Vehicle) { // vehicle.drive(); // if (vehicle instanceof Truck) { // vehicle.loadCargo(1000); // } // } // useVehicle(v1); // useVehicle(v2); // interface Bird { // type: "bird"; // flyingSpeed: number; // } // interface Horse { // type: "horse"; // runningSpeed: number; // } // type Animal = Bird | Horse; // function moveAnimal(animal: Animal) { // let speed; // switch (animal.type) { // case "bird": // speed = animal.flyingSpeed; // break; // case "horse": // speed = animal.runningSpeed; // } // console.log(`A ${animal.type} moves at ${speed} mi/hr`); // } // moveAnimal({ type: "bird", flyingSpeed: 10 }); // moveAnimal({ type: "horse", runningSpeed: 25 }); // // const userInputElement = <HTMLInputElement>document.getElementById('user-output')! // const userInputElement = document.getElementById('user-input')! as HTMLInputElement // userInputElement.value = 'Hello.' // interface ErrorContainer { // [prop: string]: string // } // const errorBag: ErrorContainer = { // email: 'Email Invalid.', // username: 'Start with a capital letter.' // }
import React, { Component } from "react"; import PropTypes from "prop-types"; import { ProductConsumer } from "../Context"; import { Link } from "react-router-dom"; import { ButtonContainer } from "./Button"; /** * @author * @class Details **/ class Details extends Component { state = {}; render() { return ( <ProductConsumer> {(value) => { const { id, company, img, info, price, title, inCart, } = value.detailProduct; return ( <div className="container py-5"> {/* title */} <div className="row"> <div className="col-10 mx-auto text-center text-slanted text-blue my-5"> <h1>{title}</h1> </div> </div> {/* end of title */} {/* Product Info */} <div className="row"> <div className="col-10 mx-auto col-md-6 my-3"> <img src={img} alt="product" className="img-fluid" /> </div> {/* Product Text */} <div className="col-10 mx-auto col-md-6 my-3 text-capitalize"> <h1>modal : {title}</h1> <h4 className="text-title text-uppercase text-muted mt-3 mb-2"> made by : <span className="text-uppercase">{company}</span> </h4> <h4 className="tex-blue"> <strong> Price : <span>$</span> {price} </strong> </h4> <p className="text-capitalize font-weight-bold mt-3 mb-0"> some info about product : </p> <p className="text-muted lead">{info}</p> {/* Buttons */} <div> <Link to="/"> <ButtonContainer>Back to product</ButtonContainer> </Link> <ButtonContainer cart disabled={inCart ? true : false} onClick={() => { value.addToCart(id); value.openModal(id); }} > {inCart ? "inCart" : "add to cart"} </ButtonContainer> </div> </div> </div> </div> ); }} </ProductConsumer> ); } } Details.propTypes = {}; export default Details;
/* * Copyright (c) 2023 OceanBase. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.oceanbase.odc.service.flow; import java.util.Collections; import java.util.List; import java.util.Objects; import java.util.Optional; import java.util.function.BiFunction; import java.util.stream.Collectors; import org.flowable.engine.FormService; import org.flowable.engine.TaskService; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.data.jpa.domain.Specification; import org.springframework.stereotype.Service; import com.oceanbase.odc.common.event.EventPublisher; import com.oceanbase.odc.core.authority.util.SkipAuthorize; import com.oceanbase.odc.core.flow.model.FlowableElement; import com.oceanbase.odc.core.flow.model.FlowableElementType; import com.oceanbase.odc.core.shared.PreConditions; import com.oceanbase.odc.core.shared.Verify; import com.oceanbase.odc.core.shared.constant.ResourceType; import com.oceanbase.odc.metadb.flow.FlowInstanceEntity; import com.oceanbase.odc.metadb.flow.FlowInstanceRepository; import com.oceanbase.odc.metadb.flow.FlowInstanceSpecs; import com.oceanbase.odc.metadb.flow.GateWayInstanceEntity; import com.oceanbase.odc.metadb.flow.GateWayInstanceRepository; import com.oceanbase.odc.metadb.flow.NodeInstanceEntity; import com.oceanbase.odc.metadb.flow.NodeInstanceEntityRepository; import com.oceanbase.odc.metadb.flow.SequenceInstanceRepository; import com.oceanbase.odc.metadb.flow.ServiceTaskInstanceEntity; import com.oceanbase.odc.metadb.flow.ServiceTaskInstanceRepository; import com.oceanbase.odc.metadb.flow.UserTaskInstanceEntity; import com.oceanbase.odc.metadb.flow.UserTaskInstanceRepository; import com.oceanbase.odc.service.flow.instance.BaseFlowNodeInstance; import com.oceanbase.odc.service.flow.instance.FlowApprovalInstance; import com.oceanbase.odc.service.flow.instance.FlowGatewayInstance; import com.oceanbase.odc.service.flow.instance.FlowTaskInstance; import com.oceanbase.odc.service.flow.model.FlowNodeType; import com.oceanbase.odc.service.flow.task.mapper.OdcRuntimeDelegateMapper; import com.oceanbase.odc.service.iam.auth.AuthenticationFacade; import lombok.NonNull; import lombok.extern.slf4j.Slf4j; /** * Default implementation for {@link FlowableAdaptor}, Implementation based on database query * * @author yh263208 * @date 2022-02-17 20:16 * @since ODC_release_3.3.0 * @see FlowableAdaptor */ @Slf4j @Service @SkipAuthorize("odc internal usage") public class FlowableAdaptorImpl implements FlowableAdaptor { @Autowired private NodeInstanceEntityRepository nodeInstanceRepository; @Autowired private ServiceTaskInstanceRepository serviceTaskInstanceRepository; @Autowired private EventPublisher eventPublisher; @Autowired private NodeInstanceEntityRepository nodeRepository; @Autowired private SequenceInstanceRepository sequenceRepository; @Autowired private FlowInstanceRepository flowInstanceRepository; @Autowired private UserTaskInstanceRepository userTaskInstanceRepository; @Autowired private TaskService taskService; @Autowired private FormService formService; @Autowired private AuthenticationFacade authenticationFacade; @Autowired private GateWayInstanceRepository gateWayInstanceRepository; @Override public void setProcessInstanceId(@NonNull Long flowInstanceId, @NonNull String processInstanceId) { Optional<FlowInstanceEntity> optional = flowInstanceRepository.findById(flowInstanceId); PreConditions.validExists(ResourceType.ODC_FLOW_INSTANCE, "Id", flowInstanceId, optional::isPresent); int affectRows = flowInstanceRepository.updateProcessInstanceIdById(flowInstanceId, processInstanceId); log.info("Binding Flow/Process Instance Id exception, flowInstanceId={}, processInstanceId={}, affectRows={}", flowInstanceId, processInstanceId, affectRows); Verify.verify(affectRows == 1, "AffectRows has to be equal to one"); } @Override public Optional<Long> getFlowInstanceIdByProcessInstanceId(@NonNull String processInstanceId) { Specification<FlowInstanceEntity> specification = Specification.where(FlowInstanceSpecs.processInstanceIdEquals(processInstanceId)); List<FlowInstanceEntity> entityList = flowInstanceRepository.findAll(specification); if (entityList.isEmpty()) { return Optional.empty(); } Verify.verify(entityList.size() == 1, "A process instance Id " + processInstanceId + " is associated with multiple flow instance Ids " + entityList.stream().map(FlowInstanceEntity::getId).collect(Collectors.toList())); FlowInstanceEntity entity = entityList.get(0); if (entity.getId() == null) { return Optional.empty(); } return Optional.of(entity.getId()); } @Override public void setProcessDefinitionId(@NonNull Long flowInstanceId, @NonNull String processDefinitionId) { Optional<FlowInstanceEntity> optional = flowInstanceRepository.findById(flowInstanceId); PreConditions.validExists(ResourceType.ODC_FLOW_INSTANCE, "Id", flowInstanceId, optional::isPresent); int affectRows = flowInstanceRepository.updateProcessDefinitionIdById(flowInstanceId, processDefinitionId); log.info("Binding Flow/Process Instance Id exception, flowInstanceId={}, processDefinitionId={}, affectRows={}", flowInstanceId, processDefinitionId, affectRows); Verify.verify(affectRows == 1, "AffectRows has to be equal to one"); } @Override public Optional<Long> getFlowInstanceIdByProcessDefinitionId(@NonNull String processDefinitionId) { Specification<FlowInstanceEntity> specification = Specification.where(FlowInstanceSpecs.processDefinitionIdEquals(processDefinitionId)); List<FlowInstanceEntity> entityList = flowInstanceRepository.findAll(specification); if (entityList.isEmpty()) { return Optional.empty(); } Verify.verify(entityList.size() == 1, "A process definition Id " + processDefinitionId + " is associated with multiple flow instance Ids " + entityList.stream().map(FlowInstanceEntity::getId).collect(Collectors.toList())); FlowInstanceEntity entity = entityList.get(0); if (entity.getId() == null) { return Optional.empty(); } return Optional.of(entity.getId()); } @Override public Optional<String> getProcessInstanceIdByFlowInstanceId(@NonNull Long flowInstanceId) { Optional<FlowInstanceEntity> optional = flowInstanceRepository.findById(flowInstanceId); if (!optional.isPresent()) { return Optional.empty(); } String processInstanceId = optional.get().getProcessInstanceId(); if (processInstanceId == null) { return Optional.empty(); } return Optional.of(processInstanceId); } @Override public List<FlowableElement> getFlowableElementByType(@NonNull Long instanceId, @NonNull FlowNodeType instanceType, @NonNull FlowableElementType flowableElementType) { List<NodeInstanceEntity> optional = nodeInstanceRepository .findByInstanceIdAndInstanceTypeAndFlowableElementType(instanceId, instanceType, flowableElementType); if (optional.isEmpty()) { return Collections.emptyList(); } return optional.stream().map( entity -> new FlowableElement(entity.getActivityId(), entity.getName(), entity.getFlowableElementType())) .collect(Collectors.toList()); } @Override public Optional<FlowTaskInstance> getTaskInstanceByActivityId(@NonNull String activityId, @NonNull Long flowInstanceId) { Optional<ServiceTaskInstanceEntity> optional = serviceTaskInstanceRepository .findByInstanceTypeAndActivityId(FlowNodeType.SERVICE_TASK, activityId, flowInstanceId); return innerConvert(optional, this, (entity, flowService) -> new FlowTaskInstance(entity, new OdcRuntimeDelegateMapper(), flowService, eventPublisher, taskService, nodeRepository, sequenceRepository, serviceTaskInstanceRepository)); } @Override public Optional<FlowApprovalInstance> getApprovalInstanceByActivityId(@NonNull String activityId, @NonNull Long flowInstanceId) { Optional<UserTaskInstanceEntity> optional = userTaskInstanceRepository.findByInstanceTypeAndActivityId(FlowNodeType.APPROVAL_TASK, activityId, flowInstanceId); return innerConvert(optional, this, (entity, flowService) -> new FlowApprovalInstance(entity, flowService, taskService, formService, eventPublisher, authenticationFacade, nodeRepository, sequenceRepository, userTaskInstanceRepository)); } @Override public Optional<FlowApprovalInstance> getApprovalInstanceByName(@NonNull String name, @NonNull Long flowInstanceId) { Optional<UserTaskInstanceEntity> optional = userTaskInstanceRepository.findByInstanceTypeAndName(FlowNodeType.APPROVAL_TASK, name, flowInstanceId); return innerConvert(optional, this, (entity, flowService) -> new FlowApprovalInstance(entity, flowService, taskService, formService, eventPublisher, authenticationFacade, nodeRepository, sequenceRepository, userTaskInstanceRepository)); } @Override public Optional<FlowGatewayInstance> getGatewayInstanceByActivityId(@NonNull String activityId, @NonNull Long flowInstanceId) { Optional<GateWayInstanceEntity> optional = gateWayInstanceRepository.findByInstanceTypeAndActivityId(FlowNodeType.GATEWAY, activityId, flowInstanceId); return innerConvert(optional, this, (gateway, flowService) -> new FlowGatewayInstance(gateway, flowService, nodeRepository, sequenceRepository, gateWayInstanceRepository)); } @Override public void setFlowableElement(@NonNull BaseFlowNodeInstance nodeInstance, @NonNull FlowableElement flowableElement) { List<NodeInstanceEntity> entities = nodeInstanceRepository.findByInstanceIdAndInstanceTypeAndFlowableElementType(nodeInstance.getId(), nodeInstance.getNodeType(), flowableElement.getType()); for (NodeInstanceEntity entity : entities) { if (Objects.equals(entity.getFlowableElementType(), flowableElement.getType()) && (Objects.equals(entity.getName(), flowableElement.getName()) || Objects.equals(entity.getActivityId(), flowableElement.getActivityId()))) { return; } } NodeInstanceEntity entity = new NodeInstanceEntity(); entity.setActivityId(flowableElement.getActivityId()); entity.setName(flowableElement.getName()); entity.setFlowableElementType(flowableElement.getType()); entity.setInstanceType(nodeInstance.getNodeType()); entity.setInstanceId(nodeInstance.getId()); entity.setFlowInstanceId(nodeInstance.getFlowInstanceId()); nodeInstanceRepository.save(entity); } private <T, V> Optional<T> innerConvert(@NonNull Optional<V> optional, @NonNull FlowableAdaptor flowableAdaptor, @NonNull BiFunction<V, FlowableAdaptor, T> function) { if (!optional.isPresent()) { return Optional.empty(); } T value = function.apply(optional.get(), flowableAdaptor); if (value == null) { return Optional.empty(); } return Optional.of(value); } }
// // ViewController.swift // aulaMVVM_parte2 // // Created by Jessica Santana on 27/05/22. // import UIKit class ViewController: UIViewController { @IBOutlet weak var primeiroNumeroTextField: UITextField! @IBOutlet weak var segundoNumeroTextField: UITextField! @IBOutlet weak var resultadoLabel: UILabel! let viewModel = SomaViewModel() override func viewDidLoad() { super.viewDidLoad() viewModel.delegate = self } @IBAction func somarButtonAction(_ sender: Any) { // realizar uma soma viewModel.somar( primeiroValor: primeiroNumeroTextField.text, segundoValor: segundoNumeroTextField.text ) // para realizar uma soma, os dois campos devem ser numéricos // o campo que nao for numérico, deve ficar com a borda vermellha // se o campo estiver vazio, deve ficar com a borda vermelha // depois de realizada a soma, o resultado deve ser apresentado // na label de resultado } // private func soma() -> Int } extension ViewController: SomaViewModelDelegate { func exibeResultado(_ resultadoSoma: String) { resultadoLabel.text = resultadoSoma } }
package com.example.task; import androidx.appcompat.app.AlertDialog; import androidx.appcompat.app.AppCompatActivity; import android.content.Intent; import android.database.Cursor; import android.database.sqlite.SQLiteDatabase; import android.os.Bundle; import android.view.View; import android.widget.Button; import android.widget.GridView; import java.util.ArrayList; import java.util.Collections; import java.util.List; public class MainActivity extends AppCompatActivity { List<String> buttons = new ArrayList<>(); GridView gridView; GridAdapter adapter; SQLiteDatabase db; int id; public void showDialog(String text,String title){ AlertDialog alertDialog = new AlertDialog.Builder(MainActivity.this).create(); alertDialog.setTitle(title); alertDialog.setMessage(text); alertDialog.show(); } protected void createDB(){ db = getBaseContext().openOrCreateDatabase("app.db", MODE_PRIVATE, null); db.execSQL("DROP TABLE combinations "); db.execSQL("CREATE TABLE IF NOT EXISTS combinations (id integer primary key autoincrement,combination text)"); } protected void insertCombination(String value){ if (!isCombinationExist(value)) { System.out.println("INSERT INTO combinations(combination) VALUES(" + value + " )"); db.execSQL("INSERT INTO combinations(combination) VALUES('" + value + "')"); showDialog("Insert complete","Insert"); } else { showDialog("Already exist number:"+id,"Yes"); } } protected boolean isCombinationExist(String value){ Cursor query = db.rawQuery("SELECT * FROM combinations;", null); System.out.println(value); while(query.moveToNext()){ int id = query.getInt(0); String combination = query.getString(1); System.out.println(id+" "+ combination); if (combination.equals(value)) { this.id = id; return true; } } query.close(); return false; } @Override protected void onCreate(Bundle savedInstanceState) { super.onCreate(savedInstanceState); setContentView(R.layout.activity_main); gridView = findViewById(R.id.grid); gridView.setNumColumns(4); for (int i = 0; i < 12; i++) { buttons.add("0"); } adapter = new GridAdapter(this, buttons); gridView.setAdapter(adapter); Button button = findViewById(R.id.buttonMirror); button.setOnClickListener(new View.OnClickListener() { @Override public void onClick(View view) { createMirror(); } }); Button button1 = findViewById(R.id.buttonSave); button1.setOnClickListener(new View.OnClickListener() { @Override public void onClick(View view) { insertCombination(getCombination()); } }); Button button2 = findViewById(R.id.buttonCheck); button2.setOnClickListener(new View.OnClickListener() { @Override public void onClick(View view) { if(isCombinationExist(getCombination())){ showDialog("Already exist number:"+id,"Yes"); } else { showDialog("Not exist","No"); } } }); ////////// Button button3 = findViewById(R.id.buttonAll); button3.setOnClickListener(new View.OnClickListener() { @Override public void onClick(View view) { Intent intent = new Intent(MainActivity.this,AdditionalActivity.class); startActivity(intent); } }); createDB(); } protected String getCombination(){ StringBuilder stringBuilder = new StringBuilder(""); for (int i = 0; i < gridView.getCount(); i++) { Button btn = (Button) gridView.getChildAt(i); stringBuilder.append(btn.getText().toString()); } return stringBuilder.toString(); } protected void createMirror(){ buttons.clear(); List<String> listTm = new ArrayList<>(); for (int i = 0; i < gridView.getCount(); i++) { Button btn = (Button) gridView.getChildAt(i); listTm.add(btn.getText().toString()); if(listTm.size() == 4){ Collections.reverse(listTm); buttons.addAll(listTm); listTm.clear(); } } for (int i = 0; i < gridView.getCount(); i++) { Button btn = (Button) gridView.getChildAt(i); if(btn.getText().toString() != buttons.get(i)){ btn.callOnClick(); } } } }
import wandb import torch import copy from datetime import datetime from ultralytics.utils.torch_utils import de_parallel import ultralytics try: import dill as pickle except ImportError: import pickle def on_train_start(trainer): wandb.config.train = vars(trainer.args) # Log metrics for learning rate, and "metrics" (mAP etc. and val losses) def on_fit_epoch_end(trainer): wandb.log({**trainer.lr, **trainer.metrics}) if trainer.epoch % 10 == 0 or trainer.epoch == trainer.epochs: save_model(trainer) # Log metrics for training loss def on_train_batch_end(trainer): wandb.log({'train/box_loss': trainer.loss_items[0], 'train/cls_loss': trainer.loss_items[1], 'train/dfl_loss': trainer.loss_items[2]}) # Saves the model checkpoint every, stolen from Wandb's integration for yolo8 def save_model(trainer): current_args = vars(trainer.args) current_args['pc'] = trainer.pc model_checkpoint_artifact = wandb.Artifact( f"run_{wandb.run.id}_model", "model", metadata=current_args ) checkpoint_dict = { "epoch": trainer.epoch, "best_fitness": trainer.best_fitness, "model": copy.deepcopy(de_parallel(trainer.model)).half(), "ema": copy.deepcopy(trainer.ema.ema).half(), "updates": trainer.ema.updates, "optimizer": trainer.optimizer.state_dict(), "train_args": current_args, "date": datetime.now().isoformat(), "version": ultralytics.__version__, } checkpoint_path = trainer.wdir / f"epoch{trainer.epoch}.pt" torch.save(checkpoint_dict, checkpoint_path, pickle_module=pickle) model_checkpoint_artifact.add_file(checkpoint_path) wandb.log_artifact( model_checkpoint_artifact, aliases=[f"epoch_{trainer.epoch}"] )
https://python.langchain.com/docs/integrations/tools/twilio ComponentsToolsTwilio On this page Twilio This notebook goes over how to use the Twilio API wrapper to send a message through SMS or Twilio Messaging Channels. Twilio Messaging Channels facilitates integrations with 3rd party messaging apps and lets you send messages through WhatsApp Business Platform (GA), Facebook Messenger (Public Beta) and Google Business Messages (Private Beta). Setup​ To use this tool you need to install the Python Twilio package twilio # !pip install twilio You'll also need to set up a Twilio account and get your credentials. You'll need your Account String Identifier (SID) and your Auth Token. You'll also need a number to send messages from. You can either pass these in to the TwilioAPIWrapper as named parameters account_sid, auth_token, from_number, or you can set the environment variables TWILIO_ACCOUNT_SID, TWILIO_AUTH_TOKEN, TWILIO_FROM_NUMBER. Sending an SMS​ from langchain.utilities.twilio import TwilioAPIWrapper twilio = TwilioAPIWrapper( # account_sid="foo", # auth_token="bar", # from_number="baz," ) twilio.run("hello world", "+16162904619") Sending a WhatsApp Message​ You'll need to link your WhatsApp Business Account with Twilio. You'll also need to make sure that the number to send messages from is configured as a WhatsApp Enabled Sender on Twilio and registered with WhatsApp. from langchain.utilities.twilio import TwilioAPIWrapper twilio = TwilioAPIWrapper( # account_sid="foo", # auth_token="bar", # from_number="whatsapp: baz," ) twilio.run("hello world", "whatsapp: +16162904619") Previous Tavily Search Next Wikipedia
import React from 'react'; import { Container, Row, Col } from 'react-bootstrap'; import Carousel from 'react-multi-carousel'; import 'react-multi-carousel/lib/styles.css'; import { CircularProgressbar } from 'react-circular-progressbar'; import 'react-circular-progressbar/dist/styl.css'; import colorSharp from '../assets/img/color-sharp.png'; import TrackVisibility from 'react-on-screen'; import 'animate.css'; const Skills = () => { const responsive = { superLargeDesktop: { // the naming can be any, depends on you. breakpoint: { max: 4000, min: 3000 }, items: 5, }, desktop: { breakpoint: { max: 3000, min: 1024 }, items: 3, }, tablet: { breakpoint: { max: 1024, min: 464 }, items: 2, }, mobile: { breakpoint: { max: 464, min: 0 }, items: 1, }, }; return ( <section className="skill" id="skills"> <Container> <Row> <Col> <div className="skill-bx"> <TrackVisibility> {({ isVisible }) => ( <div className={isVisible ? 'animate__animated animate__fadeInDown' : ''}> <h2>Skills</h2> <p> Front End Developer with 3 years experience in hand code in web and mobile applications development. using an array of technologies like HTML5,CSS3, JavaScript, React/Redux. Expert knowledge of the UI/UX design process and development.Fast learner, hard worker and team player who is proficient in an array of scripting languages and web tools. </p> </div> )} </TrackVisibility> <Carousel responsive={responsive} infinite={true} className="skill-slider"> <div className="item"> <CircularProgressbar value={90} text={`${90}%`} />;<h5>HTML|CSS|JS</h5> </div> <div className="item"> <CircularProgressbar value={85} text={`${85}%`} />;<h5>REACT|REDUX</h5> </div> <div className="item"> <CircularProgressbar value={70} text={`${70}%`} />;<h5>WEB Design</h5> </div> <div className="item"> <CircularProgressbar value={65} text={`${65}%`} />;<h5>ADOBE SUIT</h5> </div> </Carousel> </div> </Col> </Row> </Container> <img className="background-image-left" src={colorSharp} alt="bg-left" /> </section> ); }; export default Skills;
# 第五章:自回归模型 到目前为止,我们已经探讨了两种涉及潜变量的生成模型家族——变分自动编码器(VAEs)和生成对抗网络(GANs)。在这两种情况下,引入了一个新变量,其分布易于抽样,模型学习如何将此变量*解码*回原始领域。 现在我们将把注意力转向*自回归模型*——一类通过将生成建模问题简化为一个顺序过程的模型家族。自回归模型将预测条件放在序列中的先前值上,而不是在潜在随机变量上。因此,它们试图明确地对数据生成分布建模,而不是对其进行近似(如 VAEs 的情况)。 在本章中,我们将探讨两种不同的自回归模型:长短期记忆网络和 PixelCNN。我们将把 LSTM 应用于文本数据,将 PixelCNN 应用于图像数据。我们将在第九章中详细介绍另一个非常成功的自回归模型 Transformer。 # 介绍 为了理解 LSTM 的工作原理,我们将首先访问一个奇怪的监狱,那里的囚犯们组成了一个文学社团...​ Sopp 先生及其众包寓言的故事是对一种臭名昭著的用于文本等序列数据的自回归技术的类比:长短期记忆网络。 # 长短期记忆网络(LSTM) LSTM 是一种特殊类型的循环神经网络(RNN)。RNN 包含一个循环层(或*单元),能够通过使其在特定时间步的输出成为下一个时间步的输入的一部分来处理序列数据。 当 RNN 首次引入时,循环层非常简单,仅包含一个 tanh 运算符,确保在时间步之间传递的信息在-1 和 1 之间缩放。然而,这种方法被证明存在梯度消失问题,并且在处理长序列数据时不具备良好的可扩展性。 LSTM 单元最初是在 1997 年由 Sepp Hochreiter 和 Jürgen Schmidhuber 的一篇论文中首次引入的。¹在这篇论文中,作者描述了 LSTM 不会像普通 RNN 那样遭受梯度消失问题,并且可以在数百个时间步长的序列上进行训练。自那时以来,LSTM 架构已经被改进和改良,变体如门控循环单元(本章后面讨论)现在被广泛应用并作为 Keras 中的层可用。 LSTM 已经应用于涉及序列数据的各种问题,包括时间序列预测、情感分析和音频分类。在本章中,我们将使用 LSTM 来解决文本生成的挑战。 # 运行此示例的代码 此示例的代码可以在位于书籍存储库中的 Jupyter 笔记本中找到,路径为*notebooks/05_autoregressive/01_lstm/lstm.ipynb*。 ## 食谱数据集 我们将使用通过 Kaggle 提供的[Epicurious 食谱数据集](https://oreil.ly/laNUt)。这是一个包含超过 20,000 个食谱的数据集,附带有营养信息和配料清单等元数据。 您可以通过在书籍存储库中运行 Kaggle 数据集下载脚本来下载数据集,如示例 5-1 所示。这将把食谱和相关元数据保存到本地的*/data*文件夹中。 ##### 示例 5-1。下载 Epicurious 食谱数据集 ```py bash scripts/download_kaggle_data.sh hugodarwood epirecipes ``` `示例 5-2 展示了如何加载和过滤数据,以便只保留具有标题和描述的食谱。示例中给出了一个食谱文本字符串,详见示例 5-3。 ##### 示例 5-2。加载数据 ```py with open('/app/data/epirecipes/full_format_recipes.json') as json_data: recipe_data = json.load(json_data) filtered_data = [ 'Recipe for ' + x['title']+ ' | ' + ' '.join(x['directions']) for x in recipe_data if 'title' in x and x['title'] is not None and 'directions' in x and x['directions'] is not None ] ``` ##### 示例 5-3。来自食谱数据集的文本字符串 ```py Recipe for Ham Persillade with Mustard Potato Salad and Mashed Peas | Chop enough parsley leaves to measure 1 tablespoon; reserve. Chop remaining leaves and stems and simmer with broth and garlic in a small saucepan, covered, 5 minutes. Meanwhile, sprinkle gelatin over water in a medium bowl and let soften 1 minute. Strain broth through a fine-mesh sieve into bowl with gelatin and stir to dissolve. Season with salt and pepper. Set bowl in an ice bath and cool to room temperature, stirring. Toss ham with reserved parsley and divide among jars. Pour gelatin on top and chill until set, at least 1 hour. Whisk together mayonnaise, mustard, vinegar, 1/4 teaspoon salt, and 1/4 teaspoon pepper in a large bowl. Stir in celery, cornichons, and potatoes. Pulse peas with marjoram, oil, 1/2 teaspoon pepper, and 1/4 teaspoon salt in a food processor to a coarse mash. Layer peas, then potato salad, over ham. ``` 在看如何在 Keras 中构建 LSTM 网络之前,我们必须先快速了解文本数据的结构以及它与本书中迄今为止看到的图像数据有何不同。## 处理文本数据 文本和图像数据之间存在几个关键差异,这意味着许多适用于图像数据的方法并不适用于文本数据。特别是: + 文本数据由离散块(字符或单词)组成,而图像中的像素是连续色谱中的点。我们可以轻松地将绿色像素变成蓝色,但我们不清楚应该如何使单词“猫”更像单词“狗”,例如。这意味着我们可以轻松地将反向传播应用于图像数据,因为我们可以计算损失函数相对于单个像素的梯度,以确定像素颜色应该如何改变以最小化损失的方向。对于离散文本数据,我们不能明显地以同样的方式应用反向传播,因此我们需要找到解决这个问题的方法。 + 文本数据具有时间维度但没有空间维度,而图像数据具有两个空间维度但没有时间维度。文本数据中单词的顺序非常重要,单词倒过来就没有意义,而图像通常可以翻转而不影响内容。此外,单词之间通常存在长期的顺序依赖关系,模型需要捕捉这些依赖关系:例如,回答问题或延续代词的上下文。对于图像数据,所有像素可以同时处理。 + 文本数据对个体单位(单词或字符)的微小变化非常敏感。图像数据通常对个体像素单位的变化不太敏感——即使一些像素被改变,房子的图片仍然可以被识别为房子——但是对于文本数据,即使改变几个单词也可能极大地改变段落的含义,或使其毫无意义。这使得训练模型生成连贯文本非常困难,因为每个单词对段落的整体含义至关重要。 + 文本数据具有基于规则的语法结构,而图像数据不遵循有关如何分配像素值的固定规则。例如,在任何情况下写“猫坐在上面”都没有语法意义。还有一些语义规则极其难以建模;即使从语法上讲,“我在海滩上”这个陈述没有问题,但意义上是不通顺的。 # 基于文本的生成式深度学习的进展 直到最近,大多数最复杂的生成式深度学习模型都集中在图像数据上,因为前面列表中提到的许多挑战甚至超出了最先进技术的范围。然而,在过去的五年中,在基于文本的生成式深度学习领域取得了惊人的进展,这要归功于 Transformer 模型架构的引入,我们将在第九章中探讨。 考虑到这些要点,让我们现在来看看我们需要采取哪些步骤,以便将文本数据整理成适合训练 LSTM 网络的形式。 ## 标记化 第一步是清理和标记化文本。标记化是将文本分割成单独的单位,如单词或字符的过程。 如何对文本进行标记化取决于您尝试使用文本生成模型实现什么目标。使用单词和字符标记都有利弊,您的选择将影响您在建模之前需要如何清理文本以及模型输出。 如果使用单词标记: + 所有文本都可以转换为小写,以确保句子开头的大写单词与句子中间出现的相同单词以相同方式进行标记化。然而,在某些情况下,这可能不是理想的;例如,一些专有名词,如姓名或地点,可能受益于保持大写,以便它们被独立标记化。 + 文本*词汇*(训练集中不同单词的集合)可能非常庞大,有些单词可能非常稀疏,甚至可能只出现一次。将稀疏单词替换为*未知单词*的标记可能是明智的选择,而不是将它们作为单独的标记包含在内,以减少神经网络需要学习的权重数量。 + 单词可以进行*词干处理*,意味着它们被简化为最简单的形式,以便动词的不同时态保持标记化在一起。例如,*browse*、*browsing*、*browses*和*browsed*都将被词干处理为*brows*。 + 您需要将标点标记化,或者完全删除它。 + 使用单词标记化意味着模型永远无法预测训练词汇表之外的单词。 如果您使用字符标记: + 模型可能生成字符序列,形成训练词汇表之外的新单词——在某些情况下,这可能是可取的,但在其他情况下则不是。 + 大写字母可以转换为它们的小写对应词,也可以保留为单独的标记。 + 使用字符标记时,词汇量通常较小。这对模型训练速度有益,因为最终输出层中需要学习的权重较少。 在这个示例中,我们将使用小写单词标记化,不进行词干处理。我们还将标记化标点符号,因为我们希望模型能够预测何时结束句子或使用逗号,例如。 示例 5-4 中的代码清理并标记文本。 ##### 示例 5-4。标记化 ```py def pad_punctuation(s): s = re.sub(f"([{string.punctuation}])", r' \1 ', s) s = re.sub(' +', ' ', s) return s text_data = [pad_punctuation(x) for x in filtered_data] # ① text_ds = tf.data.Dataset.from_tensor_slices(text_data).batch(32).shuffle(1000) # ② vectorize_layer = layers.TextVectorization( # ③ standardize = 'lower', max_tokens = 10000, output_mode = "int", output_sequence_length = 200 + 1, ) vectorize_layer.adapt(text_ds) # ④ vocab = vectorize_layer.get_vocabulary() # ⑤ ``` ① 填充标点符号,将它们视为单独的单词。 ② 转换为 TensorFlow 数据集。 ③ 创建一个 Keras `TextVectorization`层,将文本转换为小写,为最常见的 10,000 个单词分配相应的整数标记,并将序列修剪或填充到 201 个标记长。 ④ 将`TextVectorization`层应用于训练数据。 ⑤ `vocab`变量存储一个单词标记列表。 在标记化后,一个配方的示例显示在示例 5-5 中。我们用于训练模型的序列长度是训练过程的一个参数。在这个示例中,我们选择使用长度为 200 的序列长度,因此我们将配方填充或裁剪到比这个长度多一个,以便我们创建目标变量(在下一节中详细介绍)。为了实现这个期望的长度,向量的末尾用零填充。 # 停止标记 `0`标记被称为*停止标记*,表示文本字符串已经结束。 ##### 示例 5-5。示例 5-3 中的配方进行了标记化 ```py [ 26 16 557 1 8 298 335 189 4 1054 494 27 332 228 235 262 5 594 11 133 22 311 2 332 45 262 4 671 4 70 8 171 4 81 6 9 65 80 3 121 3 59 12 2 299 3 88 650 20 39 6 9 29 21 4 67 529 11 164 2 320 171 102 9 374 13 643 306 25 21 8 650 4 42 5 931 2 63 8 24 4 33 2 114 21 6 178 181 1245 4 60 5 140 112 3 48 2 117 557 8 285 235 4 200 292 980 2 107 650 28 72 4 108 10 114 3 57 204 11 172 2 73 110 482 3 298 3 190 3 11 23 32 142 24 3 4 11 23 32 142 33 6 9 30 21 2 42 6 353 3 3224 3 4 150 2 437 494 8 1281 3 37 3 11 23 15 142 33 3 4 11 23 32 142 24 6 9 291 188 5 9 412 572 2 230 494 3 46 335 189 3 20 557 2 0 0 0 0 0 0 0 0] ``` 在示例 5-6 中,我们可以看到一部分标记列表映射到它们各自的索引。该层将`0`标记保留为填充(即停止标记),将`1`标记保留为超出前 10000 个单词的未知单词(例如,persillade)。其他单词按频率顺序分配标记。要包含在词汇表中的单词数量也是训练过程的一个参数。包含的单词越多,您在文本中看到的*未知*标记就越少;但是,您的模型需要更大以容纳更大的词汇量。 ##### 示例 5-6。`TextVectorization`层的词汇表 ```py 0: 1: [UNK] 2: . 3: , 4: and 5: to 6: in 7: the 8: with 9: a ``` ## 创建训练集 我们的 LSTM 将被训练以预测序列中的下一个单词,给定此点之前的一系列单词。例如,我们可以向模型提供*烤鸡配煮熟的*的标记,期望模型输出一个合适的下一个单词(例如*土豆*,而不是*香蕉*)。 因此,我们可以简单地将整个序列向后移动一个标记,以创建我们的目标变量。 数据集生成步骤可以通过示例 5-7 中的代码实现。 ##### 示例 5-7。创建训练数据集 ```py def prepare_inputs(text): text = tf.expand_dims(text, -1) tokenized_sentences = vectorize_layer(text) x = tokenized_sentences[:, :-1] y = tokenized_sentences[:, 1:] return x, y train_ds = text_ds.map(prepare_inputs) # ① ``` ① 创建包含食谱标记(输入)和相同向量向后移动一个标记(目标)的训练集。 ## LSTM 架构 整个 LSTM 模型的架构如表 5-1 所示。模型的输入是一系列整数标记,输出是 10,000 个词汇表中每个单词在序列中出现的概率。为了详细了解这是如何工作的,我们需要介绍两种新的层类型,即`Embedding`和`LSTM`。 表 5-1。LSTM 模型的摘要 | 层(类型) | 输出形状 | 参数 # | | --- | --- | --- | | InputLayer | (None, None) | 0 | | Embedding | (None, None, 100) | 1,000,000 | | LSTM | (None, None, 128) | 117,248 | | Dense | (None, None, 10000) | 1,290,000 | | 总参数 | 2,407,248 | | 可训练参数 | 2,407,248 | | 不可训练参数 | 0 | # LSTM 的输入层 请注意,`Input`层不需要我们提前指定序列长度。批处理大小和序列长度都是灵活的(因此形状为`(None, None)`)。这是因为所有下游层对通过的序列长度都是不可知的。 ## 嵌入层 *嵌入层*本质上是一个查找表,将每个整数标记转换为长度为`embedding_size`的向量,如图 5-2 所示。模型通过*权重*学习查找向量。因此,该层学习的权重数量等于词汇表的大小乘以嵌入向量的维度(即 10,000 × 100 = 1,000,000)。 ![](img/gdl2_0502.png) ###### 图 5-2。嵌入层是每个整数标记的查找表 我们将每个整数标记嵌入到连续向量中,因为这使得模型能够学习每个单词的表示,这些表示可以通过反向传播进行更新。我们也可以只对每个输入标记进行独热编码,但使用嵌入层更可取,因为它使得嵌入本身是可训练的,从而使模型在决定如何嵌入每个标记以提高性能时更加灵活。 因此,`Input`层将形状为`[batch_size, seq_length]`的整数序列张量传递给`Embedding`层,后者输出形状为`[batch_size, seq_length, embedding_size]`的张量。然后将其传递给`LSTM`层(图 5-3)。 ![](img/gdl2_0503.png) ###### 图 5-3。单个序列在嵌入层中流动 ## LSTM 层 要理解 LSTM 层,我们首先必须看一下通用循环层的工作原理。 循环层具有特殊属性,能够处理顺序输入数据<math alttext="x 1 comma ellipsis comma x Subscript n Baseline"><mrow><msub><mi>x</mi> <mn>1</mn></msub> <mo>,</mo> <mo>⋯</mo> <mo>,</mo> <msub><mi>x</mi> <mi>n</mi></msub></mrow></math>。随着序列中的每个元素<math alttext="x Subscript t"><msub><mi>x</mi> <mi>t</mi></msub></math>逐个时间步通过,它会更新其*隐藏状态*<math alttext="h Subscript t"><msub><mi>h</mi> <mi>t</mi></msub></math>。 隐藏状态是一个向量,其长度等于细胞中的*单元*数——它可以被视为细胞对序列的当前理解。在时间步<math alttext="t"><mi>t</mi></math>,细胞使用先前的隐藏状态值<math alttext="h Subscript t minus 1"><msub><mi>h</mi> <mrow><mi>t</mi><mo>-</mo><mn>1</mn></mrow></msub></math>,以及当前时间步的数据<math alttext="x Subscript t"><msub><mi>x</mi> <mi>t</mi></msub></math>,产生一个更新的隐藏状态向量<math alttext="h Subscript t"><msub><mi>h</mi> <mi>t</mi></msub></math>。这个循环过程持续到序列结束。一旦序列结束,该层输出细胞的最终隐藏状态<math alttext="h Subscript n"><msub><mi>h</mi> <mi>n</mi></msub></math>,然后传递给网络的下一层。这个过程在图 5-4 中显示。 ![](img/gdl2_0504.png) ###### 图 5-4。循环层的简单图示 为了更详细地解释这一点,让我们展开这个过程,这样我们就可以看到单个序列是如何通过该层传递的(图 5-5)。 # 细胞权重 重要的是要记住,这个图中的所有细胞共享相同的权重(因为它们实际上是相同的细胞)。这个图与图 5-4 没有区别;只是以不同的方式绘制了循环层的机制。 ![](img/gdl2_0505.png) ###### 图 5-5。单个序列如何流经循环层 在这里,我们通过在每个时间步绘制细胞的副本来表示循环过程,并展示隐藏状态如何在流经细胞时不断更新。我们可以清楚地看到先前的隐藏状态如何与当前的顺序数据点(即当前嵌入的单词向量)混合以产生下一个隐藏状态。该层的输出是细胞的最终隐藏状态,在输入序列中的每个单词都被处理后。 ###### 警告 细胞的输出被称为*隐藏*状态是一个不幸的命名惯例——它并不真正隐藏,你不应该这样认为。事实上,最后一个隐藏状态是该层的整体输出,我们将利用这一点,稍后在本章中我们可以访问每个时间步的隐藏状态。 ## LSTM 细胞 现在我们已经看到了一个通用循环层是如何工作的,让我们来看看单个 LSTM 细胞的内部。 LSTM 细胞的工作是输出一个新的隐藏状态,<math alttext="h Subscript t"><msub><mi>h</mi> <mi>t</mi></msub></math>,给定其先前的隐藏状态,<math alttext="h Subscript t minus 1"><msub><mi>h</mi> <mrow><mi>t</mi><mo>-</mo><mn>1</mn></mrow></msub></math>,和当前的单词嵌入,<math alttext="x Subscript t"><msub><mi>x</mi> <mi>t</mi></msub></math>。回顾一下,<math alttext="h Subscript t"><msub><mi>h</mi> <mi>t</mi></msub></math>的长度等于 LSTM 中的单元数。这是在定义层时设置的一个参数,与序列的长度无关。 ###### 警告 确保不要混淆术语*细胞*和*单元*。在 LSTM 层中有一个细胞,由它包含的单元数定义,就像我们早期故事中的囚犯细胞包含许多囚犯一样。我们经常将循环层绘制为展开的细胞链,因为这有助于可视化如何在每个时间步更新隐藏状态。 LSTM 单元格维护一个单元格状态,<math alttext="上标 C 下标 t"><msub><mi>C</mi> <mi>t</mi></msub></math>,可以被视为单元格对序列当前状态的内部信念。这与隐藏状态,<math alttext="h 下标 t"><msub><mi>h</mi> <mi>t</mi></msub></math>,是不同的,隐藏状态最终在最后一个时间步输出。单元格状态与隐藏状态相同长度(单元格中的单元数)。 让我们更仔细地看一下单个单元格以及隐藏状态是如何更新的(图 5-6)。 隐藏状态在六个步骤中更新: 1. 上一个时间步的隐藏状态,<math alttext="h 下标 t 减 1"><msub><mi>h</mi> <mrow><mi>t</mi><mo>-</mo><mn>1</mn></mrow></msub></math>,和当前的单词嵌入,<math alttext="x 下标 t"><msub><mi>x</mi> <mi>t</mi></msub></math>,被连接起来并通过*遗忘*门传递。这个门只是一个带有权重矩阵 <math alttext="上标 W 下标 f"><msub><mi>W</mi> <mi>f</mi></msub></math>,偏置 <math alttext="b 下标 f"><msub><mi>b</mi> <mi>f</mi></msub></math> 和 sigmoid 激活函数的稠密层。得到的向量,<math alttext="f 下标 t"><msub><mi>f</mi> <mi>t</msub></math>,长度等于单元格中的单元数,并包含介于 0 和 1 之间的值,确定了应该保留多少先前的单元格状态,<math alttext="上标 C 下标 t 减 1"><msub><mi>C</mi> <mrow><mi>t</mi><mo>-</mo><mn>1</mn></mrow></msub></math>。 ![](img/gdl2_0506.png) ###### 图 5-6\. LSTM 单元格 1. 连接的向量也通过一个*输入*门传递,类似于遗忘门,它是一个带有权重矩阵 <math alttext="上标 W 下标 i"><msub><mi>W</mi> <mi>i</mi></msub></math>,偏置 <math alttext="b 下标 i"><msub><mi>b</mi> <mi>i</msub></math> 和 sigmoid 激活函数的稠密层。这个门的输出,<math alttext="i 下标 t"><msub><mi>i</mi> <mi>t</msub></math>,长度等于单元格中的单元数,并包含介于 0 和 1 之间的值,确定了新信息将被添加到先前单元格状态,<math alttext="上标 C 下标 t 减 1"><msub><mi>C</mi> <mrow><mi>t</mi><mo>-</mo><mn>1</mn></mrow></msub></math>,的程度。 1. 连接的向量也通过一个带有权重矩阵 <math alttext="上标 W 上标 C"><msub><mi>W</mi> <mi>C</mi></msub></math>,偏置 <math alttext="b 上标 C"><msub><mi>b</mi> <mi>C</mi></msub></math> 和 tanh 激活函数的稠密层,生成一个向量 <math alttext="上标 C overTilde 下标 t"><msub><mover accent="true"><mi>C</mi> <mo>˜</mo></mover> <mi>t</msub></math>,其中包含单元格希望考虑保留的新信息。它的长度也等于单元格中的单元数,并包含介于-1 和 1 之间的值。 1. <math alttext="f 下标 t"><msub><mi>f</mi> <mi>t</mi></msub></math> 和 <math alttext="上标 C 下标 t 减 1"><msub><mi>C</mi> <mrow><mi>t</mi><mo>-</mo><mn>1</mn></mrow></msub></math> 逐元素相乘并加到 <math alttext="i 下标 t"><msub><mi>i</mi> <mi>t</mi></msub></math> 和 <math alttext="上标 C overTilde 下标 t"><msub><mover accent="true"><mi>C</mi> <mo>˜</mo></mover> <mi>t</mi></msub></math> 的逐元素乘积中。这代表了遗忘先前单元格状态的部分,并添加新的相关信息以生成更新后的单元格状态,<math alttext="上标 C 下标 t"><msub><mi>C</mi> <mi>t</mi></msub></math>。 1. 连接后的向量通过一个*输出*门传递:一个带有权重矩阵<math alttext="upper W Subscript o"><msub><mi>W</mi> <mi>o</mi></msub></math>、偏置<math alttext="b Subscript o"><msub><mi>b</mi> <mi>o</mi></msub></math>和 sigmoid 激活函数的稠密层。得到的向量<math alttext="o Subscript t"><msub><mi>o</mi> <mi>t</mi></msub></math>的长度等于单元格中的单元数,并存储介于 0 和 1 之间的值,确定要从单元格中输出的更新后的单元格状态<math alttext="upper C Subscript t"><msub><mi>C</mi> <mi>t</mi></msub></math>的多少。 1. <math alttext="o Subscript t"><msub><mi>o</mi> <mi>t</mi></msub></math>与更新后的单元格状态<math alttext="upper C Subscript t"><msub><mi>C</mi> <mi>t</mi></msub></math>进行逐元素相乘,然后应用 tanh 激活函数产生新的隐藏状态<math alttext="h Subscript t"><msub><mi>h</mi> <mi>t</mi></msub></math>。 # Keras LSTM 层 所有这些复杂性都包含在 Keras 的`LSTM`层类型中,因此您不必担心自己实现它! ## 训练 LSTM 构建、编译和训练 LSTM 的代码在 Example 5-8 中给出。 ##### Example 5-8\. 构建、编译和训练 LSTM ```py inputs = layers.Input(shape=(None,), dtype="int32") # ① x = layers.Embedding(10000, 100)(inputs) # ② x = layers.LSTM(128, return_sequences=True)(x) # ③ outputs = layers.Dense(10000, activation = 'softmax')(x) # ④ lstm = models.Model(inputs, outputs) # ⑤ loss_fn = losses.SparseCategoricalCrossentropy() lstm.compile("adam", loss_fn) # ⑥ lstm.fit(train_ds, epochs=25) # ⑦ ``` ① `Input`层不需要我们提前指定序列长度(可以是灵活的),所以我们使用`None`作为占位符。 ② `Embedding`层需要两个参数,词汇量的大小(10,000 个标记)和嵌入向量的维度(100)。 ③ LSTM 层要求我们指定隐藏向量的维度(128)。我们还选择返回完整的隐藏状态序列,而不仅仅是最终时间步的隐藏状态。 ④ `Dense`层将每个时间步的隐藏状态转换为下一个标记的概率向量。 ⑤ 整体的`Model`在给定一系列标记的输入序列时预测下一个标记。它为序列中的每个标记执行此操作。 ⑥ 该模型使用`SparseCategoricalCrossentropy`损失进行编译——这与分类交叉熵相同,但在标签为整数而不是独热编码向量时使用。 ⑦ 模型适合训练数据集。 在 Figure 5-7 中,您可以看到 LSTM 训练过程的前几个时期——请注意随着损失指标下降,示例输出变得更加易懂。Figure 5-8 显示了整个训练过程中交叉熵损失指标的下降。 ![](img/gdl2_0507.png) ###### Figure 5-7\. LSTM 训练过程的前几个时期 ![](img/gdl2_0508.png) ###### Figure 5-8\. LSTM 训练过程中的交叉熵损失指标按时期 ## LSTM 的分析 现在我们已经编译和训练了 LSTM,我们可以开始使用它通过以下过程生成长文本字符串: 1. 用现有的单词序列喂给网络,并要求它预测下一个单词。 1. 将这个单词附加到现有序列并重复。 网络将为每个单词输出一组概率,我们可以从中进行采样。因此,我们可以使文本生成具有随机性,而不是确定性。此外,我们可以引入一个*温度*参数到采样过程中,以指示我们希望过程有多确定性。 # 温度参数 接近 0 的温度使采样更加确定性(即,具有最高概率的单词很可能被选择),而温度为 1 意味着每个单词都以模型输出的概率被选择。 这是通过在示例 5-9 中的代码实现的,该代码创建了一个回调函数,可以在每个训练周期结束时用于生成文本。 ##### 示例 5-9。`TextGenerator`回调函数 ```py class TextGenerator(callbacks.Callback): def __init__(self, index_to_word, top_k=10): self.index_to_word = index_to_word self.word_to_index = { word: index for index, word in enumerate(index_to_word) } # ① def sample_from(self, probs, temperature): # ② probs = probs ** (1 / temperature) probs = probs / np.sum(probs) return np.random.choice(len(probs), p=probs), probs def generate(self, start_prompt, max_tokens, temperature): start_tokens = [ self.word_to_index.get(x, 1) for x in start_prompt.split() ] # ③ sample_token = None info = [] while len(start_tokens) < max_tokens and sample_token != 0: # ④ x = np.array([start_tokens]) y = self.model.predict(x) # ⑤ sample_token, probs = self.sample_from(y[0][-1], temperature) # ⑥ info.append({'prompt': start_prompt , 'word_probs': probs}) start_tokens.append(sample_token) # ⑦ start_prompt = start_prompt + ' ' + self.index_to_word[sample_token] print(f"\ngenerated text:\n{start_prompt}\n") return info def on_epoch_end(self, epoch, logs=None): self.generate("recipe for", max_tokens = 100, temperature = 1.0) ``` ① 创建一个反向词汇映射(从单词到标记)。 ② 此函数使用`temperature`缩放因子更新概率。 ③ 起始提示是您想要给模型以开始生成过程的一串单词(例如,*recipe for*)。首先将这些单词转换为标记列表。 ④ 序列生成直到达到`max_tokens`长度或产生停止令牌(0)为止。 ⑤ 模型输出每个单词成为序列中下一个单词的概率。 ⑥ 概率通过采样器传递以输出下一个单词,由`temperature`参数化。 ⑦ 我们将新单词附加到提示文本中,准备进行生成过程的下一次迭代。 让我们看看这在实际中是如何运作的,使用两个不同的温度值(图 5-9)。 ![](img/gdl2_0509.png) ###### 图 5-9。在`temperature = 1.0`和`temperature = 0.2`时生成的输出 关于这两段文字有几点需要注意。首先,两者在风格上与原始训练集中的食谱相似。它们都以食谱标题开头,并包含通常语法正确的结构。不同之处在于,温度为 1.0 的生成文本更加冒险,因此比温度为 0.2 的示例不够准确。因此,使用温度为 1.0 生成多个样本将导致更多的变化,因为模型正在从具有更大方差的概率分布中进行抽样。 为了证明这一点,图 5-10 显示了一系列提示的前五个具有最高概率的标记,对于两个温度值。 ![](img/gdl2_0510.png) ###### 图 5-10。在不同序列后的单词概率分布,对于温度值为 1.0 和 0.2 该模型能够在一系列上下文中生成下一个最可能的单词的适当分布。例如,即使模型从未被告知过名词、动词或数字等词类,它通常能够将单词分为这些类别并以语法正确的方式使用它们。 此外,该模型能够选择一个适当的动词来开始食谱说明,这取决于前面的标题。对于烤蔬菜,它选择`preheat`、`prepare`、`heat`、`put`或`combine`作为最可能的可能性,而对于冰淇淋,它选择`in`、`combine`、`stir`、`whisk`和`mix`。这表明该模型对于根据其成分而异的食谱之间的差异具有一定的上下文理解。 还要注意`temperature = 0.2`示例的概率更加倾向于第一个选择标记。这就是为什么当温度较低时,生成的变化通常较少的原因。 虽然我们的基本 LSTM 模型在生成逼真文本方面表现出色,但很明显它仍然难以理解所生成单词的一些语义含义。它引入了一些不太可能搭配在一起的成分(例如,酸味日本土豆、山核桃碎屑和果冻)!在某些情况下,这可能是可取的——比如,如果我们希望我们的 LSTM 生成有趣和独特的单词模式——但在其他情况下,我们需要我们的模型对单词如何组合在一起以及在文本中引入的想法有更深入的理解和更长的记忆。 在下一节中,我们将探讨如何改进我们的基本 LSTM 网络。在第九章中,我们将看一看一种新型的自回归模型,Transformer,将语言建模提升到一个新的水平。 前一节中的模型是一个简单的示例,展示了如何训练 LSTM 学习如何以给定风格生成文本。在本节中,我们将探讨这个想法的几个扩展。 ## 堆叠循环网络 我们刚刚看到的网络包含一个单独的 LSTM 层,但我们也可以训练具有堆叠 LSTM 层的网络,以便从文本中学习更深层次的特征。 为了实现这一点,我们只需在第一层之后引入另一层 LSTM。第二层 LSTM 可以使用第一层的隐藏状态作为其输入数据。这在图 5-11 中显示,整体模型架构在表 5-2 中显示。 ![](img/gdl2_0511.png) ###### 图 5-11。多层 RNN 的示意图:g[t]表示第一层的隐藏状态,h[t]表示第二层的隐藏状态 表 5-2。堆叠 LSTM 的模型摘要 | 层(类型) | 输出形状 | 参数 # | | --- | --- | --- | | 输入层 | (None, None) | 0 | | 嵌入 | (None, None, 100) | 1,000,000 | | LSTM | (None, None, 128) | 117,248 | | LSTM | (None, None, 128) | 131,584 | | 稠密 | (None, None, 10000) | 1,290,000 | | 总参数 | 2,538,832 | | 可训练参数 | 2,538,832 | | 不可训练参数 | 0 | 构建堆叠 LSTM 的代码在示例 5-10 中给出。 ##### 示例 5-10。构建堆叠 LSTM ```py text_in = layers.Input(shape = (None,)) embedding = layers.Embedding(total_words, embedding_size)(text_in) x = layers.LSTM(n_units, return_sequences = True)(x) x = layers.LSTM(n_units, return_sequences = True)(x) probabilites = layers.Dense(total_words, activation = 'softmax')(x) model = models.Model(text_in, probabilites) ``` ## 门控循环单元 另一种常用的 RNN 层是*门控循环单元*(GRU)。² 与 LSTM 单元的主要区别如下: 1. *遗忘*和*输入*门被*重置*和*更新*门替换。 1. 没有*细胞状态*或*输出*门,只有从细胞输出的*隐藏状态*。 隐藏状态通过四个步骤更新,如图 5-12 所示。 ![](img/gdl2_0512.png) ###### 图 5-12。单个 GRU 单元 过程如下: 1. 上一个时间步的隐藏状态,<math alttext="h Subscript t minus 1"><msub><mi>h</mi> <mrow><mi>t</mi><mo>-</mo><mn>1</mn></mrow></msub></math>,和当前单词嵌入,<math alttext="x Subscript t"><msub><mi>x</mi> <mi>t</mi></msub></math>,被串联并用于创建*重置*门。这个门是一个密集层,带有权重矩阵<math alttext="upper W Subscript r"><msub><mi>W</mi> <mi>r</mi></msub></math>和一个 sigmoid 激活函数。得到的向量,<math alttext="r Subscript t"><msub><mi>r</mi> <mi>t</mi></msub></math>,长度等于细胞中的单元数,并存储介于 0 和 1 之间的值,确定应该将多少上一个隐藏状态,<math alttext="h Subscript t minus 1"><msub><mi>h</mi> <mrow><mi>t</mi><mo>-</mo><mn>1</mn></mrow></msub></math>,传递到新信念的计算中。 1. 重置门应用于隐藏状态,<math alttext="h Subscript t minus 1"><msub><mi>h</mi> <mrow><mi>t</mi><mo>-</mo><mn>1</mn></mrow></msub></math>,并与当前单词嵌入<math alttext="x Subscript t"><msub><mi>x</mi> <mi>t</mi></msub></math>连接。然后将该向量馈送到具有权重矩阵<math alttext="upper W"><mi>W</mi></math>和 tanh 激活函数的密集层,以生成一个向量<math alttext="h overTilde Subscript t"><msub><mover accent="true"><mi>h</mi> <mo>˜</mo></mover> <mi>t</mi></msub></math>,其中存储了细胞的新信念。它的长度等于细胞中的单元数,并存储在-1 和 1 之间的值。 1. 前一个时间步的隐藏状态<math alttext="h Subscript t minus 1"><msub><mi>h</mi> <mrow><mi>t</mi><mo>-</mo><mn>1</mn></mrow></msub></math>和当前单词嵌入<math alttext="x Subscript t"><msub><mi>x</mi> <mi>t</mi></msub></math>的连接也用于创建*更新*门。该门是一个具有权重矩阵<math alttext="upper W Subscript z"><msub><mi>W</mi> <mi>z</mi></msub></math>和 sigmoid 激活的密集层。生成的向量<math alttext="z Subscript t"><msub><mi>z</mi> <mi>t</msub></math>的长度等于细胞中的单元数,并存储在 0 和 1 之间的值,用于确定新信念<math alttext="h overTilde Subscript t"><msub><mover accent="true"><mi>h</mi> <mo>˜</mo></mover> <mi>t</mi></msub></math>的多少要混合到当前隐藏状态<math alttext="h Subscript t minus 1"><msub><mi>h</mi> <mrow><mi>t</mi><mo>-</mo><mn>1</mn></mrow></msub></math>中。 1. 细胞的新信念<math alttext="h overTilde Subscript t"><msub><mover accent="true"><mi>h</mi> <mo>˜</mo></mover> <mi>t</mi></msub></math>和当前隐藏状态<math alttext="h Subscript t minus 1"><msub><mi>h</mi> <mrow><mi>t</mi><mo>-</mo><mn>1</mn></mrow></msub></math>按照更新门<math alttext="z Subscript t"><msub><mi>z</mi> <mi>t</msub></math>确定的比例混合,以产生更新后的隐藏状态<math alttext="h Subscript t"><msub><mi>h</mi> <mi>t</msub></math>,从细胞中输出。 ## 双向细胞 对于预测问题,在推断时模型可以获得整个文本,没有理由只在正向方向处理序列 - 它同样可以被反向处理。`Bidirectional`层通过存储两组隐藏状态来利用这一点:一组是由序列在通常的正向方向处理时产生的,另一组是在序列被反向处理时产生的。这样,该层可以从给定时间步之前和之后的信息中学习。 在 Keras 中,这被实现为对循环层的包装,如示例 5-11 所示。 ##### 示例 5-11。构建双向 GRU 层 ```py layer = layers.Bidirectional(layers.GRU(100)) ``` # 隐藏状态 结果层中的隐藏状态是长度等于包装细胞中单元数两倍的向量(正向和反向隐藏状态的连接)。因此,在此示例中,该层的隐藏状态是长度为 200 的向量。 到目前为止,我们只将自回归模型(LSTMs)应用于文本数据。在下一节中,我们将看到如何使用自回归模型来生成图像。 # PixelCNN 2016 年,van den Oord 等人³提出了一种通过预测下一个像素的可能性来逐像素生成图像的模型。该模型称为*PixelCNN*,可以训练以自回归方式生成图像。 我们需要介绍两个新概念来理解 PixelCNN - *掩码卷积层*和*残差块*。 # 运行此示例的代码 此示例的代码可以在位于书籍存储库中的 Jupyter 笔记本中找到,路径为*notebooks/05_autoregressive/02_pixelcnn/pixelcnn.ipynb*。 该代码改编自由 ADMoreau 创建的出色的[PixelCNN 教程](https://keras.io/examples/generative/pixelcnn),可在 Keras 网站上找到。 ## 掩码卷积层 正如我们在第二章中看到的,卷积层可以通过应用一系列滤波器从图像中提取特征。在特定像素处的层的输出是滤波器权重乘以围绕像素中心的小正方形上一层值的加权和。这种方法可以检测边缘和纹理,而在更深的层中,可以检测形状和更高级的特征。 虽然卷积层在特征检测方面非常有用,但不能直接以自回归的方式使用,因为像素上没有顺序。它们依赖于所有像素都被平等对待的事实——没有像素被视为图像的*开始*或*结束*。这与我们在本章中已经看到的文本数据形成对比,其中令牌有明确的顺序,因此可以轻松应用循环模型,如 LSTM。 为了能够以自回归的方式将卷积层应用于图像生成,我们必须首先对像素进行排序,并确保滤波器只能看到在问题像素之前的像素。然后,我们可以通过将卷积滤波器应用于当前图像来一次生成一个像素,以预测下一个像素的值。 我们首先需要为像素选择一个顺序——一个明智的建议是按照从左上到右下的顺序对像素进行排序,首先沿着行移动,然后沿着列向下移动。 然后,我们对卷积滤波器进行掩码处理,以便每个像素处的层的输出仅受到在问题像素之前的像素值的影响。这是通过将一个由 1 和 0 组成的掩码与滤波器权重矩阵相乘来实现的,以便在目标像素之后的任何像素的值都被置为零。 在 PixelCNN 中实际上有两种不同类型的掩码,如图 5-13 所示: + 类型 A,中心像素的值被掩码 + 类型 B,中心像素的值*未*被掩码 ![](img/gdl2_0513.png) ###### 图 5-13。左:卷积滤波器掩码;右:应用于一组像素以预测中心像素值分布的掩码(来源:[van den Oord 等人,2016](https://arxiv.org/pdf/1606.05328)) 初始的掩码卷积层(即直接应用于输入图像的层)不能使用中心像素,因为这正是我们希望网络猜测的像素!然而,后续层可以使用中心像素,因为这将仅根据原始输入图像中前面像素的信息计算出来。 我们可以在示例 5-12 中看到如何使用 Keras 构建`MaskedConvLayer`。 ##### 示例 5-12。Keras 中的`MaskedConvLayer` ```py class MaskedConvLayer(layers.Layer): def __init__(self, mask_type, **kwargs): super(MaskedConvLayer, self).__init__() self.mask_type = mask_type self.conv = layers.Conv2D(**kwargs) # ① def build(self, input_shape): self.conv.build(input_shape) kernel_shape = self.conv.kernel.get_shape() self.mask = np.zeros(shape=kernel_shape) # ② self.mask[: kernel_shape[0] // 2, ...] = 1.0 # ③ self.mask[kernel_shape[0] // 2, : kernel_shape[1] // 2, ...] = 1.0 # ④ if self.mask_type == "B": self.mask[kernel_shape[0] // 2, kernel_shape[1] // 2, ...] = 1.0 # ⑤ def call(self, inputs): self.conv.kernel.assign(self.conv.kernel * self.mask) # ⑥ return self.conv(inputs) ``` ① `MaskedConvLayer`基于普通的`Conv2D`层。 ② 掩码初始化为全零。 ③ 前面行中的像素将被一个 1 解除掩码。 ④ 前面列中在同一行中的像素将被一个 1 解除掩码。 ⑤ 如果掩码类型为 B,则中心像素将被一个 1 解除掩码。 ⑥ 掩码与滤波器权重相乘。 请注意,这个简化的例子假设是灰度图像(即,只有一个通道)。如果是彩色图像,我们将有三个颜色通道,我们也可以对它们进行排序,例如,红色通道在蓝色通道之前,蓝色通道在绿色通道之前。 ## 残差块 现在我们已经看到如何对卷积层进行掩码,我们可以开始构建我们的 PixelCNN。我们将使用的核心构建块是残差块。 *残差块*是一组层,其中输出在传递到网络的其余部分之前添加到输入中。换句话说,输入有一条*快速通道*到输出,而无需经过中间层——这被称为*跳跃连接*。包含跳跃连接的理由是,如果最佳转换只是保持输入不变,这可以通过简单地将中间层的权重置零来实现。如果没有跳跃连接,网络将不得不通过中间层找到一个恒等映射,这要困难得多。 我们在 PixelCNN 中的残差块的图示在图 5-14 中显示。 ![](img/gdl2_0514.png) ###### 图 5-14。一个 PixelCNN 残差块(箭头旁边是滤波器的数量,层旁边是滤波器大小) 我们可以使用示例 5-13 中显示的代码构建一个`ResidualBlock`。 ##### 示例 5-13。一个`ResidualBlock` ```py class ResidualBlock(layers.Layer): def __init__(self, filters, **kwargs): super(ResidualBlock, self).__init__(**kwargs) self.conv1 = layers.Conv2D( filters=filters // 2, kernel_size=1, activation="relu" ) # ① self.pixel_conv = MaskedConv2D( mask_type="B", filters=filters // 2, kernel_size=3, activation="relu", padding="same", ) # ② self.conv2 = layers.Conv2D( filters=filters, kernel_size=1, activation="relu" ) # ③ def call(self, inputs): x = self.conv1(inputs) x = self.pixel_conv(x) x = self.conv2(x) return layers.add([inputs, x]) # ④ ``` ① 初始的`Conv2D`层将通道数量减半。 ② Type B `MaskedConv2D`层,核大小为 3,仅使用来自五个像素的信息——上面一行中的三个像素,左边一个像素和焦点像素本身。 ③ 最终的`Conv2D`层将通道数量加倍,以再次匹配输入形状。 ④ 卷积层的输出与输入相加——这是跳跃连接。 ## 训练 PixelCNN 在示例 5-14 中,我们组合了整个 PixelCNN 网络,大致遵循原始论文中的结构。在原始论文中,输出层是一个有 256 个滤波器的`Conv2D`层,使用 softmax 激活。换句话说,网络试图通过预测正确的像素值来重新创建其输入,有点像自动编码器。不同之处在于,PixelCNN 受到限制,以便不允许来自早期像素的信息流通过影响每个像素的预测,这是由于网络设计方式,使用`MaskedConv2D`层。 这种方法的一个挑战是网络无法理解,比如说,像素值 200 非常接近像素值 201。它必须独立学习每个像素输出值,这意味着即使对于最简单的数据集,训练也可能非常缓慢。因此,在我们的实现中,我们简化输入,使每个像素只能取四个值之一。这样,我们可以使用一个有 4 个滤波器的`Conv2D`输出层,而不是 256 个。 ##### 示例 5-14。PixelCNN 架构 ```py inputs = layers.Input(shape=(16, 16, 1)) # ① x = MaskedConv2D(mask_type="A" , filters=128 , kernel_size=7 , activation="relu" , padding="same")(inputs)# ② for _ in range(5): x = ResidualBlock(filters=128)(x) # ③ for _ in range(2): x = MaskedConv2D( mask_type="B", filters=128, kernel_size=1, strides=1, activation="relu", padding="valid", )(x) # ④ out = layers.Conv2D( filters=4, kernel_size=1, strides=1, activation="softmax", padding="valid" )(x) # ⑤ pixel_cnn = models.Model(inputs, out) # ⑥ adam = optimizers.Adam(learning_rate=0.0005) pixel_cnn.compile(optimizer=adam, loss="sparse_categorical_crossentropy") pixel_cnn.fit( input_data , output_data , batch_size=128 , epochs=150 ) # ⑦ ``` ① 模型的`Input`是一个尺寸为 16×16×1 的灰度图像,输入值在 0 到 1 之间缩放。 ② 第一个 Type A `MaskedConv2D`层,核大小为 7,使用来自 24 个像素的信息——在焦点像素上面的三行中的 21 个像素和左边的 3 个像素(焦点像素本身不使用)。 ③ 五个`ResidualBlock`层组被顺序堆叠。 ④ 两个 Type B `MaskedConv2D`层,核大小为 1,作为每个像素通道数量的`Dense`层。 ⑤ 最终的`Conv2D`层将通道数减少到四——本示例中的像素级别数。 ⑥ `Model`被构建为接受一幅图像并输出相同尺寸的图像。 ⑦ 拟合模型——`input_data`在范围[0,1](浮点数)内缩放;`output_data`在范围[0,3](整数)内缩放。 ## PixelCNN 的分析 我们可以在我们在第三章中遇到的 Fashion-MNIST 数据集上训练我们的 PixelCNN。要生成新图像,我们需要要求模型根据所有先前像素预测下一个像素,逐个像素进行预测。与诸如变分自动编码器的模型相比,这是一个非常缓慢的过程!对于一幅 32×32 的灰度图像,我们需要使用模型进行 1,024 次顺序预测,而不是我们需要为 VAE 进行的单次预测。这是自回归模型如 PixelCNN 的主要缺点之一——由于采样过程的顺序性质,它们从中采样速度较慢。 因此,我们使用图像尺寸为 16×16,而不是 32×32,以加快生成新图像的速度。生成回调类如示例 5-15 所示。 ##### 示例 5-15。使用 PixelCNN 生成新图像 ```py class ImageGenerator(callbacks.Callback): def __init__(self, num_img): self.num_img = num_img def sample_from(self, probs, temperature): probs = probs ** (1 / temperature) probs = probs / np.sum(probs) return np.random.choice(len(probs), p=probs) def generate(self, temperature): generated_images = np.zeros( shape=(self.num_img,) + (pixel_cnn.input_shape)[1:] ) # ① batch, rows, cols, channels = generated_images.shape for row in range(rows): for col in range(cols): for channel in range(channels): probs = self.model.predict(generated_images)[ :, row, col, : ] # ② generated_images[:, row, col, channel] = [ self.sample_from(x, temperature) for x in probs ] # ③ generated_images[:, row, col, channel] /= 4 # ④ return generated_images def on_epoch_end(self, epoch, logs=None): generated_images = self.generate(temperature = 1.0) display( generated_images, save_to = "./output/generated_img_%03d.png" % (epoch) s) img_generator_callback = ImageGenerator(num_img=10) ``` ① 从一批空白图像(全零)开始。 ② 循环遍历当前图像的行、列和通道,预测下一个像素值的分布。 ③ 从预测分布中抽取一个像素级别(对于我们的示例,范围在[0,3]内)。 ④ 将像素级别转换为范围[0,1]并覆盖当前图像中的像素值,准备好进行下一次循环迭代。 在图 5-15 中,我们可以看到原始训练集中的几幅图像,以及由 PixelCNN 生成的图像。 ![](img/gdl2_0515.png) ###### 图 5-15。训练集中的示例图像和由 PixelCNN 模型生成的图像 该模型在重新创建原始图像的整体形状和风格方面做得很好!令人惊讶的是,我们可以将图像视为一系列令牌(像素值),并应用自回归模型如 PixelCNN 来生成逼真的样本。 如前所述,自回归模型的一个缺点是它们从中采样速度较慢,这就是为什么本书中提供了它们应用的一个简单示例。然而,正如我们将在第十章中看到的,更复杂形式的自回归模型可以应用于图像以产生最先进的输出。在这种情况下,缓慢的生成速度是为了获得卓越质量输出而必须付出的代价。 自原始论文发表以来,PixelCNN 的架构和训练过程已经进行了几项改进。以下部分介绍了其中一项变化——使用混合分布,并演示了如何使用内置的 TensorFlow 函数训练带有此改进的 PixelCNN 模型。 ## 混合分布 对于我们之前的示例,我们将 PixelCNN 的输出减少到只有 4 个像素级别,以确保网络不必学习 256 个独立像素值的分布,这将减慢训练过程。然而,这远非理想——对于彩色图像,我们不希望我们的画布仅限于少数可能的颜色。 为了解决这个问题,我们可以使网络的输出成为*混合分布*,而不是对 256 个离散像素值进行 softmax,遵循 Salimans 等人提出的想法。4 混合分布简单地是两个或更多其他概率分布的混合。例如,我们可以有五个具有不同参数的逻辑分布的混合分布。混合分布还需要一个离散分类分布,表示选择混合中包含的每个分布的概率。示例显示在图 5-16 中。 ![](img/gdl2_0516.png) ###### 图 5-16。三个具有不同参数的正态分布的混合分布——三个正态分布上的分类分布为`[0.5, 0.3, 0.2]` 要从混合分布中抽样,我们首先从分类分布中抽样以选择特定的子分布,然后以通常的方式从中抽样。这样,我们可以用相对较少的参数创建复杂的分布。例如,图 5-16 中的混合分布仅需要八个参数——两个用于分类分布,以及三个正态分布的均值和方差。这与定义整个像素范围上的分类分布所需的 255 个参数相比要少。 方便地,TensorFlow Probability 库提供了一个函数,允许我们用一行代码创建具有混合分布输出的 PixelCNN。示例 5-16 说明了如何使用此函数构建 PixelCNN。 # 运行此示例的代码 此示例的代码可以在书籍存储库中的 Jupyter 笔记本*notebooks/05_autoregressive/03_pixelcnn_md/pixelcnn_md.ipynb*中找到。 ##### 示例 5-16。使用 TensorFlow 函数构建 PixelCNN ```py import tensorflow_probability as tfp dist = tfp.distributions.PixelCNN( image_shape=(32, 32, 1), num_resnet=1, num_hierarchies=2, num_filters=32, num_logistic_mix=5, dropout_p=.3, ) # ① image_input = layers.Input(shape=(32, 32, 1)) # ② log_prob = dist.log_prob(image_input) model = models.Model(inputs=image_input, outputs=log_prob) # ③ model.add_loss(-tf.reduce_mean(log_prob)) # ④ ``` ① 将 PixelCNN 定义为一个分布——即,输出层是由五个逻辑分布组成的混合分布。 ② 输入是大小为 32×32×1 的灰度图像。 ③ `Model`以灰度图像作为输入,并输出在 PixelCNN 计算的混合分布下图像的对数似然。 ④ 损失函数是输入图像批次上的平均负对数似然。 该模型的训练方式与以前相同,但这次接受整数像素值作为输入,范围为[0, 255]。可以使用`sample`函数从分布中生成输出,如示例 5-17 所示。 ##### 示例 5-17。从 PixelCNN 混合分布中抽样 ```py dist.sample(10).numpy() ``` 示例生成的图像显示在图 5-17 中。与以前的示例不同的是,现在正在利用完整的像素值范围。 ![](img/gdl2_0517.png) ###### 图 5-17。使用混合分布输出的 PixelCNN 的输出 # 总结 在本章中,我们看到了自回归模型,如循环神经网络如何应用于生成模仿特定写作风格的文本序列,以及 PixelCNN 如何以顺序方式生成图像,每次一个像素。 我们探索了两种不同类型的循环层——长短期记忆(LSTM)和门控循环单元(GRU)——并看到这些单元如何可以堆叠或双向化以形成更复杂的网络架构。我们构建了一个 LSTM 来使用 Keras 生成逼真的食谱,并看到如何操纵采样过程的温度以增加或减少输出的随机性。 我们还看到了如何以自回归方式生成图像,使用了 PixelCNN。我们使用 Keras 从头开始构建了一个 PixelCNN,编写了掩膜卷积层和残差块,以允许信息在网络中流动,从而只能使用前面的像素来生成当前的像素。最后,我们讨论了 TensorFlow Probability 库提供了一个独立的 `PixelCNN` 函数,实现了混合分布作为输出层,使我们能够进一步改进学习过程。 在下一章中,我们将探讨另一种生成建模家族,明确地对数据生成分布进行建模—正规化流模型。 ¹ Sepp Hochreiter 和 Jürgen Schmidhuber, “长短期记忆,” *神经计算* 9 (1997): 1735–1780, [*https://www.bioinf.jku.at/publications/older/2604.pdf*](https://www.bioinf.jku.at/publications/older/2604.pdf). ² Kyunghyun Cho 等人, “使用 RNN 编码器-解码器学习短语表示进行统计机器翻译,” 2014 年 6 月 3 日, [*https://arxiv.org/abs/1406.1078*](https://arxiv.org/abs/1406.1078). ³ Aaron van den Oord 等人, “像素递归神经网络,” 2016 年 8 月 19 日, [*https://arxiv.org/abs/1601.06759*](https://arxiv.org/abs/1601.06759). ⁴ Tim Salimans 等人, “PixelCNN++: 使用离散化逻辑混合似然和其他修改改进 PixelCNN,” 2017 年 1 月 19 日, [*http://arxiv.org/abs/1701.05517*](http://arxiv.org/abs/1701.05517).
package microfb.server; import java.util.ArrayList; import java.util.List; import java.util.Properties; import java.util.UUID; import javax.mail.Message; import javax.mail.Session; import javax.mail.Transport; import javax.mail.internet.InternetAddress; import javax.mail.internet.MimeMessage; import microfb.client.services.MicroFBService; import microfb.server.entities.Korisnik; import microfb.server.entities.Slike; import microfb.shared.Validator; import microfb.shared.IO.AktivnostIO; import microfb.shared.IO.ImageIO; import microfb.shared.IO.KorisnikIO; import microfb.shared.IO.TypeIO; import microfb.shared.exceptions.LogedOutException; import com.google.gwt.core.client.GWT; import com.google.gwt.user.server.rpc.RemoteServiceServlet; public class Service extends RemoteServiceServlet implements MicroFBService { private static final long serialVersionUID = 1L; private boolean loggedIn(String sessionID) { if( EntityHelper.sessionIDExists("sessionID") ) { try { EntityHelper.userActive(sessionID); } catch (Exception e) {e.printStackTrace();} return true; } return false; } @Override public String login(String mail, String password, Boolean keepMeLoggedIn ) throws IllegalArgumentException { String sessionID = EntityHelper.getSessionID(mail); if( !sessionID.equals("") ) return sessionID; sessionID = UUID.randomUUID().toString(); try { EntityHelper.setSessionID( mail, sessionID , keepMeLoggedIn ); }catch(Exception e) { java.util.logging.Logger log = java.util.logging.Logger.getLogger("Test"); log.severe("login failed"); } return sessionID; } @Override public void logout(String sessionID) throws IllegalArgumentException { if( !loggedIn(sessionID) ) throw new LogedOutException(); try { EntityHelper.setSessionID( EntityHelper.getKorisnikBySessionId(sessionID).getEmail() , "", false ); }catch(Exception e) { java.util.logging.Logger log = java.util.logging.Logger.getLogger("Test"); log.severe("logout failed"); } } public static void sendMail( String email, String subject, String msgBody ) throws IllegalArgumentException { try { Properties props = new Properties(); Session session = Session.getDefaultInstance(props, null); Message msg = new MimeMessage(session); msg.setFrom(new InternetAddress("[email protected]", "MicroFB")); msg.addRecipient(Message.RecipientType.TO, new InternetAddress(email, email)); msg.setSubject(subject); msg.setText(msgBody); Transport.send(msg); } catch (Exception ex) { GWT.log("Error while sending an e-mail to '" + email + "'", ex); throw new IllegalArgumentException("greska sa mailom"); } } @Override public void register(String email, String password, String name, String birthdate, String city, String workplace, String quote, String sex ) throws Exception { if( !Validator.email(email) ) throw new IllegalArgumentException("not a valid email"); if( !Validator.password(password) ) throw new IllegalArgumentException("not a valid password"); if( EntityHelper.userExists(email) ) throw new IllegalArgumentException("user with that email already exists"); String actCode = UUID.randomUUID().toString(); try { sendMail(email, "MicroFB User Registration", "Dear User,\n\n" + "Thank you for registering with our service!\n\n" + "In order to complete the registration procedure, please visit the following link:\n\n" + "http://mikrofejs.appspot.com/activation?code=" + actCode + "\n\n" + "The link will expire in 24h. After that, you will have to repeat the registration procedure.\n\n" + "Thank you once again for choosing our service!"); EntityHelper.createNewKorisnik( email, password,name, birthdate, city, workplace, quote,sex, actCode ); } catch (Exception e) { GWT.log("create new korisnik failed", e); throw new IllegalArgumentException("FATAL ERROR when creating user"); } } @Override public KorisnikIO getKorisnik(String sessionID) throws IllegalArgumentException { if( !loggedIn(sessionID) ) throw new LogedOutException(); Korisnik k = EntityHelper.getKorisnikBySessionId(sessionID); if( k == null ) throw new IllegalArgumentException("no user with that sessionID"); return new KorisnikIO( k.getName(),k.getPassword(),k.getEmail(), k.getBirthdate().toString(),k.getCity(), k.getWorkplace(),k.getQuote(),k.getSex()); } @Override public List<KorisnikIO> getAllKorisnik(String sessionID) throws IllegalArgumentException { if( !loggedIn(sessionID) ) throw new LogedOutException(); List<KorisnikIO> lista = new ArrayList<KorisnikIO>(); for( Korisnik k : EntityHelper.getAllKorisnik() ) lista.add( new KorisnikIO( k.getName(),k.getPassword(),k.getEmail(), k.getBirthdate().toString(),k.getCity(), k.getWorkplace(),k.getQuote(),k.getSex())); return lista; } @Override public List<KorisnikIO> searchForKorisnik(String sessionID, String queryString) throws IllegalArgumentException { if( !loggedIn(sessionID) ) throw new LogedOutException(); List<KorisnikIO> lista = new ArrayList<KorisnikIO>(); for( Korisnik k : EntityHelper.findKirisnikByQueryString(queryString) ) lista.add( new KorisnikIO( k.getName(),k.getPassword(),k.getEmail(), k.getBirthdate().toString(),k.getCity(), k.getWorkplace(),k.getQuote(),k.getSex())); return lista; } @Override public void updateKorisnik(String sessionID, KorisnikIO korisnik) throws IllegalArgumentException { if( !loggedIn(sessionID) ) throw new LogedOutException(); //EntityHelper.updateKorinik(korisnik); } @Override public boolean post(String sessionID, String value, TypeIO tip) throws IllegalArgumentException { if( !loggedIn(sessionID) ) throw new LogedOutException(); // TODO Auto-generated method stub return false; } @Override public boolean updatePost(String sessionID, AktivnostIO aktivnostIO, String value, TypeIO tip) throws IllegalArgumentException { if( !loggedIn(sessionID) ) throw new LogedOutException(); // TODO Auto-generated method stub return false; } @Override public boolean deletePost(String sessionID, AktivnostIO aktivnostIO) throws IllegalArgumentException { if( !loggedIn(sessionID) ) throw new LogedOutException(); // TODO Auto-generated method stub return false; } @Override public List<AktivnostIO> getWallItemsCount(String email, long from, long to) throws IllegalArgumentException { // TODO Auto-generated method stub return null; } @Override public List<AktivnostIO> getWallItems(String email, long from, int to) throws IllegalArgumentException { // TODO Auto-generated method stub return null; } @Override public boolean likeEnabled(String sessionID, AktivnostIO aktivnostIO) throws IllegalArgumentException { if( !loggedIn(sessionID) ) throw new LogedOutException(); // TODO Auto-generated method stub return false; } @Override public boolean like(String sessionID, AktivnostIO aktivnostIO) throws IllegalArgumentException { if( !loggedIn(sessionID) ) throw new LogedOutException(); // TODO Auto-generated method stub return false; } @Override public boolean unlike(String sessionID, AktivnostIO aktivnostIO) throws IllegalArgumentException { if( !loggedIn(sessionID) ) throw new LogedOutException(); // TODO Auto-generated method stub return false; } }
# Copyright (C) 2021 Nippon Telegraph and Telephone Corporation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. class SolException(Exception): """Exception for SOL ProblemDetails Generally status, title and message should be defined in derived class. detail is constructed from message and kwargs. Attributes in ProblemDetails can be specified in kwargs of object initialization. Use `sol_*` (ex. `sol_instance`) to avoid confliction with kwargs. """ status = 500 title = None message = 'Internal Server Error' def __init__(self, **kwargs): self.status = kwargs.pop('sol_status', self.status) self.title = kwargs.pop('sol_title', self.title) self.type = kwargs.pop('sol_type', None) self.instance = kwargs.pop('sol_instance', None) self.detail = kwargs.pop('sol_detail', self.message % kwargs) super().__init__(self.detail) def make_problem_details(self): res = {'status': self.status, 'detail': self.detail} if self.title is not None: res['title'] = self.title if self.type is not None: res['type'] = self.type if self.instance is not None: res['instance'] = self.instance return res class SolHttpError400(SolException): status = 400 title = 'Bad Request' class SolHttpError403(SolException): status = 403 title = 'Forbidden' class SolHttpError404(SolException): status = 404 title = 'Not Found' class SolHttpError405(SolException): status = 405 title = 'Method Not Allowed' class SolHttpError406(SolException): status = 406 title = 'Not Acceptable' class SolHttpError409(SolException): status = 409 title = 'Conflict' class SolHttpError422(SolException): status = 422 title = 'Unprocessable Entity' class MethodNotAllowed(SolHttpError405): message = _("Method %(method)s is not supported.") class SolValidationError(SolHttpError400): message = _("%(detail)s") class InvalidAPIVersionString(SolHttpError400): message = _("Version String %(version)s is of invalid format. Must " "be of format Major.Minor.Patch.") class APIVersionMissing(SolHttpError400): message = _("'Version' HTTP header missing.") class APIVersionNotSupported(SolHttpError406): message = _("Version %(version)s not supported.") class VnfdIdNotEnabled(SolHttpError422): message = _("VnfId %(vnfd_id)s not ENABLED.") class VnfInstanceNotFound(SolHttpError404): message = _("VnfInstance %(inst_id)s not found.") class NotSupportUpgradeType(SolHttpError400): message = _("not support upgrade_type %(upgrade_type)s") class VnfInstanceIsInstantiated(SolHttpError409): message = _("VnfInstance %(inst_id)s is instantiated.") class VnfInstanceIsNotInstantiated(SolHttpError409): message = _("VnfInstance %(inst_id)s isn't instantiated.") class VnfInstanceIsNotChanged(SolHttpError409): message = _("VnfInstance %(inst_id)s isn't changed.") class LccnSubscriptionNotFound(SolHttpError404): message = _("LccnSubscription %(subsc_id)s not found.") class VnfLcmOpOccNotFound(SolHttpError404): message = _("VnfLcmOpOcc %(lcmocc_id)s not found.") class VnfdIdNotFound(SolHttpError422): message = _("VnfPackage of vnfdId %(vnfd_id)s is not found or " "not operational.") class FlavourIdNotFound(SolHttpError400): message = _("FlavourId %(flavour_id)s not found in the vnfd.") class NoVimConnectionInfo(SolHttpError422): message = _("No VimConnectionInfo set to the VnfInstance.") class InvalidVnfdFormat(SolHttpError400): message = _("Vnfd is unexpected format.") class StackOperationFailed(SolHttpError422): # title and detail are set in the code from stack_status_reason pass class MgmtDriverExecutionFailed(SolHttpError422): title = 'Mgmt driver execution failed' # detail set in the code class BaseHOTNotDefined(SolHttpError400): message = _("BaseHOT is not defined.") class UserdataMissing(SolHttpError400): message = _("'lcm-operation-user-data' or " "'lcm-operation-user-data-class' missing.") class UserdataExecutionFailed(SolHttpError422): title = 'Userdata execution failed' # detail set in the code class TestNotificationFailed(SolHttpError422): message = _("Can't get from notification callback Uri.") class VimNotFound(SolHttpError404): message = _("VIM %(vim_id)s not found.") class OtherOperationInProgress(SolHttpError409): message = _("Other LCM operation of vnfInstance %(inst_id)s " "is in progress.") class UserDataClassNotImplemented(SolHttpError400): message = _("Userdata class not implemented.") class InvalidAttributeFilter(SolHttpError400): message = _("Attribute filter expression is invalid.") class InvalidAttributeSelector(SolHttpError400): message = _("Attribute selector expression is invalid.") class InvalidSubscription(SolHttpError400): # detail set in the code pass class ResponseTooBig(SolHttpError400): title = 'Response too big' message = _("Content length of the response is larger " "than %(size)d bytes.") class LocalNfvoGrantFailed(SolHttpError403): title = 'Grant failed' # detail set in the code class LcmOpOccNotFailedTemp(SolHttpError409): message = _("LCM operation %(lcmocc_id)s not FAILED_TEMP.") class GrantRequestOrGrantNotFound(SolHttpError404): message = _("GrantRequest or Grant for LCM operation " "%(lcmocc_id)s not found.") class RollbackNotSupported(SolHttpError422): message = _("Rollback of %(op)s is not supported.") class UnexpectedParentResourceDefinition(SolHttpError422): message = _("Parent resource is necessary for VDU definition.") class InvalidScaleAspectId(SolHttpError400): message = _("Invalid aspectId '%(aspect_id)s'.") class InvalidScaleNumberOfSteps(SolHttpError400): message = _("Invalid numberOfSteps '%(num_steps)d'.") class DeltaMissingInVnfd(SolHttpError400): message = _("Delta '%(delta)s' is not defined in " "VduScalingAspectDeltas.") class ConductorProcessingError(SolException): title = 'Internal Server Error' message = _("Failure due to conductor processing error.") class InvalidVolumeSize(SolHttpError400): message = _("The volume size set in VNFD is invalid.") class VduIdNotFound(SolHttpError404): message = _("This vdu_id '%(vdu_id)s' does not exist" " in current VnfInstance.") class SshIpNotFoundException(SolHttpError404): message = _("Ssh ip not found.") class CoordinateVNFExecutionFailed(SolHttpError422): message = _('CoordinateVNF execution failed.') class VmRunningFailed(SolHttpError422): message = _("VM is running incorrectly. Reason: '%(error_info)s'") class CnfDefinitionNotFound(SolHttpError400): message = _("'%(diff_files)s' do not exist in vnf artifact files") class NamespaceNotUniform(SolHttpError400): message = _("There are multiple namespaces in the manifest file. Only one" "namespace can be used in one VNF.") class ExecuteK8SResourceCreateApiFailed(SolHttpError400): message = _("An error occurred when creating k8s resource.") class CreateK8SResourceFailed(SolHttpError400): message = _("An error occurred when creating k8s resource.") class ReadEndpointsFalse(SolHttpError400): message = _("read endpoints failed. kind:'%(kind)s'.") class DeleteK8SResourceFailed(SolHttpError400): message = _("An error occurred when deleting k8s resource.") class UnmatchedFileException(SolHttpError400): message = _("The updated file '%(new_file_path)s' does not match the" " original file. Some resources may be missing.") class UnSupportedKindException(SolHttpError400): message = _("The update file '%(new_file_path)s' does not contain" " 'Deployment' resource and other types of updates are not" " currently supported.") class NotFoundUpdateFileException(SolHttpError400): message = _("No original file matching the update file" " '%(new_file_path)s' was found.") class MissingParameterException(SolHttpError400): message = _("If you set vdu_params parameter in request body," " the 'vdu_id' is necessary.") class UpdateK8SResourceFailed(SolHttpError400): message = _("An error occurred when updating k8s resource.") class NotSupportOperationType(SolHttpError404): message = _("This operation is not currently supported.")
import React from 'react'; import { Routes, Route } from 'react-router-dom'; import Layout from '../components/Layout'; import MainSection from '../sections/MainSection'; import ScudSection from '../sections/ScudSection'; import CabinetSection from '../sections/CabinetSection'; import SettingsSection from '../sections/SettingsSection'; import LoginSection from '../sections/LoginSection'; import VocabularySection from '../sections/VocabularySection'; import RequireAuth from '../components/Auth/RequireAuth'; import NotFoundSection from '../sections/NotFoundSection'; export default function useRoutes() { return ( <Routes> <Route element={<Layout />}> <Route path="/" element={<MainSection />} /> <Route path="login" element={<LoginSection />} /> <Route path="scud" element={ <RequireAuth> <ScudSection /> </RequireAuth> } /> <Route path="cabinet" element={ <RequireAuth> <CabinetSection /> </RequireAuth> } /> <Route path="vocabulary" element={ <RequireAuth> <VocabularySection /> </RequireAuth> } /> <Route path="settings" element={ <RequireAuth> <SettingsSection /> </RequireAuth> } /> <Route path="*" element={<NotFoundSection />} /> </Route> </Routes> ); }
// // ViewController.swift // QuizApp_Completed // import UIKit class ViewController: UIViewController { @IBOutlet weak var questionLabel: UILabel! @IBOutlet weak var background: UIImageView! @IBOutlet weak var buttonOne: UIButton! @IBOutlet weak var buttonTwo: UIButton! @IBOutlet weak var scoreLabel: UILabel! var quizlogic = QuizLogic() override func viewDidLoad() { super.viewDidLoad() updateUI() } @IBAction func answerSubmitted(_ sender: UIButton) { // get user response let userAnswer = sender.titleLabel!.text! let isCorrect = quizlogic.compareUserResponse(userAnswer) if isCorrect { sender.backgroundColor = UIColor.green } else { sender.backgroundColor = UIColor.red } quizlogic.increaseIndex() Timer.scheduledTimer(timeInterval: 0.1, target: self, selector: #selector(updateUI), userInfo: nil, repeats: false) } @objc func updateUI() { scoreLabel.text = "Score: \(quizlogic.getScore())" questionLabel.text = quizlogic.getNextQuestion() background.image = UIImage(named: String(quizlogic.getImageName() + 1)) buttonOne.setTitle(quizlogic.getChoiceOne(), for: .normal) buttonTwo.setTitle(quizlogic.getChoiceTwo(), for: .normal) buttonOne.backgroundColor = UIColor.clear buttonTwo.backgroundColor = UIColor.clear } }
// // ViewController.swift // RecordApp // // Created by Terry Jason on 2024/1/3. // import UIKit import AVFoundation class RecordViewController: UIViewController { private var audioRecorder: AVAudioRecorder! private var audioPlayer: AVAudioPlayer? private var timer: Timer? private var elapsedTimeInSecond: Int = 0 // MARK: - @IBOulet @IBOutlet private var stopButton: UIButton! @IBOutlet private var playButton: UIButton! @IBOutlet private var recordButton: UIButton! @IBOutlet private var timeLabel: UILabel! } // MARK: - Life Cycle extension RecordViewController { override func viewDidLoad() { super.viewDidLoad() configure() } } // MARK: - @IBAction extension RecordViewController { @IBAction func stop(sender: UIButton) { recordButton.setImage(UIImage(named: "Record"), for: .normal) recordButton.isEnabled = true stopButton.isEnabled = false playButton.isEnabled = true audioRecorder.stop() resetTimer() let audioSession = AVAudioSession.sharedInstance() do { try audioSession.setActive(false) } catch { print(error) } } @IBAction func play(sender: UIButton) { if !(audioRecorder.isRecording) { guard let player = try? AVAudioPlayer(contentsOf: audioRecorder.url) else { return } audioPlayer = player audioPlayer?.delegate = self audioPlayer?.play() startTimer() } } @IBAction func record(sender: UIButton) { if let player = audioPlayer, player.isPlaying { player.stop() } if !(audioRecorder.isRecording) { let audioSession = AVAudioSession.sharedInstance() do { try audioSession.setActive(true) audioRecorder.record() startTimer() recordButton.setImage(UIImage(named: "Pause"), for: .normal) } catch { print(error) } } else { audioRecorder.pause() pauseTimer() recordButton.setImage(UIImage(named: "Record"), for: .normal) } stopButton.isEnabled = true playButton.isEnabled = false } } // MARK: - Set Timer extension RecordViewController { private func startTimer() { timer = Timer.scheduledTimer(withTimeInterval: 1.0, repeats: true, block: { _ in self.elapsedTimeInSecond += 1 self.updateTimeLabel() }) } private func pauseTimer() { timer?.invalidate() } private func resetTimer() { timer?.invalidate() elapsedTimeInSecond = 0 updateTimeLabel() } private func updateTimeLabel() { let seconds = elapsedTimeInSecond % 60 let minutes = (elapsedTimeInSecond / 60) % 60 timeLabel.text = String(format: "%02d:%02d", minutes, seconds) } } // MARK: - Helper Method extension RecordViewController { private func configure() { stopButton.isEnabled = false playButton.isEnabled = false guard let directoryURL = FileManager.default.urls(for: .documentDirectory, in: .userDomainMask).first else { alertController(title: "Error", message: "Failed to get the document directory for recording the audio. Please try again later.") return } let audioFileURL = directoryURL.appendingPathComponent("MyAudio.m4a") let audioSession = AVAudioSession.sharedInstance() recordAudio(audioFileURL, audioSession) } private func recordAudio(_ fileURL: Foundation.URL, _ session: AVAudioSession) { do { try session.setCategory(.playAndRecord, options: .defaultToSpeaker) let recorderSetting: [String: Any] = [ AVFormatIDKey: Int(kAudioFormatMPEG4AAC), AVSampleRateKey: 44100.0, AVNumberOfChannelsKey: 2, AVEncoderAudioQualityKey: AVAudioQuality.high.rawValue ] audioRecorder = try AVAudioRecorder(url: fileURL, settings: recorderSetting) audioRecorder.delegate = self audioRecorder.isMeteringEnabled = true audioRecorder.prepareToRecord() } catch { print(error) } } } // MARK: - Alert extension RecordViewController { private func alertController(title: String, message: String) { let alert = UIAlertController(title: title, message: message, preferredStyle: .alert) let okAction = UIAlertAction(title: "OK", style: .cancel) alert.addAction(okAction) present(alert, animated: true) } } // MARK: - AVAudioRecorderDelegate extension RecordViewController: AVAudioRecorderDelegate { func audioRecorderDidFinishRecording(_ recorder: AVAudioRecorder, successfully flag: Bool) { if flag { alertController(title: "Finish Recording", message: "Successfully recorded the audio!") } } } // MARK: - AVAudioPlayerDelegate extension RecordViewController: AVAudioPlayerDelegate { func audioPlayerDidFinishPlaying(_ player: AVAudioPlayer, successfully flag: Bool) { playButton.isSelected = false alertController(title: "Finish Playing", message: "Finish playing the recording!") resetTimer() } }
import { Body, Controller, Post, Get, Req, UseFilters, HttpStatus, Param, Patch, Query } from '@nestjs/common'; import { HttpService } from '@nestjs/axios'; import { ApplicationServiceURL } from '../app.config'; import { Request } from 'express'; import { AxiosExceptionFilter } from '../filters/axios-exception.filter'; import { ApiResponse, ApiTags } from '@nestjs/swagger'; import { CreateUserCoachDto } from '../dto/create-user-coach.dto'; import { CreateUserSimpleDto } from '../dto/create-user-simple.dto'; import { LoginUserDto } from '../dto/login-user.dto'; import { UserCoachRdo } from '../rdo/user-coach.rdo'; import { UserSimpleRdo } from '../rdo/user-simple.rdo'; import { LoggedUserRdo } from '../rdo/logged-user.rdo'; import { UserQuery } from '../query/user.query'; import { UpdateUserCoachDto } from '../dto/update-user-coach.dto'; import { UpdateUserSimpleDto } from '../dto/update-user-simple.dto'; import { ChangeFriendDto } from '../dto/change-friend.dto'; import { UpdateBalanceDto } from '../dto/update-balance.dto'; import { DeleteNotificationDto } from '../dto/delete-notification.dto'; @ApiTags('users') @Controller('users') @UseFilters(AxiosExceptionFilter) export class UsersController { constructor( private readonly httpService: HttpService ) {} @ApiResponse({ type: UserCoachRdo || UserSimpleRdo, status: HttpStatus.CREATED, description: 'The new user has been successfully created.' }) @ApiResponse({ status: HttpStatus.CONFLICT, description: 'The user has already existed.' }) @Post('register') public async create(@Body() createUserDto: CreateUserCoachDto | CreateUserSimpleDto) { const { data } = await this.httpService.axiosRef.post(`${ApplicationServiceURL.Users}/register`, createUserDto); return data; } @ApiResponse({ type: LoggedUserRdo, status: HttpStatus.OK, description: 'User has been successfully logged.' }) @ApiResponse({ status: HttpStatus.UNAUTHORIZED, description: 'Password or Login is wrong.', }) @Post('login') public async login(@Body() loginUserDto: LoginUserDto) { const { data } = await this.httpService.axiosRef.post(`${ApplicationServiceURL.Users}/login`, loginUserDto); return data; } @ApiResponse({ status: HttpStatus.OK, description: 'Get a new access/refresh tokens' }) @Post('refresh') public async refreshToken(@Req() req: Request) { const { data } = await this.httpService.axiosRef.post(`${ApplicationServiceURL.Users}/refresh`, null, { headers: { 'Authorization': req.headers['authorization'] } }); return data; } @ApiResponse({ status: HttpStatus.OK, description: 'User found.' }) @Get(':id') public async show(@Req() req: Request, @Param('id') id: string) { const { data } = await this.httpService.axiosRef.get(`${ApplicationServiceURL.Users}/${id}`, { headers: { 'Authorization': req.headers['authorization'] } }); return data; } @ApiResponse({ status: HttpStatus.OK, description: 'User found.' }) @Get('/') public async index(@Req() req: Request, @Query() query: UserQuery) { const { data } = await this.httpService.axiosRef.get(`${ApplicationServiceURL.Users}/`, { params: query, headers: { 'Authorization': req.headers['authorization'] } }); return data; } @ApiResponse({ status: HttpStatus.OK, description: 'User has been successfully updated.' }) @Patch(':id') public async update(@Req() req: Request, @Param('id') id: string, @Body() dto: UpdateUserCoachDto | UpdateUserSimpleDto) { const { data } = await this.httpService.axiosRef.patch(`${ApplicationServiceURL.Users}/${id}`, dto, { headers: { 'Authorization': req.headers['authorization'] } }); return data; } @ApiResponse({ status: HttpStatus.OK, description: 'User\'s friends found.' }) @Get(':id/friends') public async indexFriends(@Req() req: Request, @Param('id') id: string, @Query() query: UserQuery) { const { data } = await this.httpService.axiosRef.get(`${ApplicationServiceURL.Users}/${id}/friends`, { params: query, headers: { 'Authorization': req.headers['authorization'] } }); return data; } @ApiResponse({ status: HttpStatus.OK, description: 'Friend has been successfully added.' }) @Patch(':id/friends/add') public async addFriend(@Req() req: Request, @Param('id') id: string, @Body() dto: ChangeFriendDto) { const { data } = await this.httpService.axiosRef.patch(`${ApplicationServiceURL.Users}/${id}/friends/add`, dto, { headers: { 'Authorization': req.headers['authorization'] } }); return data; } @ApiResponse({ status: HttpStatus.OK, description: 'Friend has been successfully added.' }) @Patch(':id/friends/remove') public async removeFriend(@Req() req: Request, @Param('id') id: string, @Body() dto: ChangeFriendDto) { const { data } = await this.httpService.axiosRef.patch(`${ApplicationServiceURL.Users}/${id}/friends/remove`, dto, { headers: { 'Authorization': req.headers['authorization'] } }); return data; } @ApiResponse({ status: HttpStatus.OK, description: 'User balance has been successfully updated.' }) @Patch(':id/balance/add') public async incBalance(@Req() req: Request, @Param('id') id: string, @Body() dto: UpdateBalanceDto) { const { workoutId } = dto; await this.httpService.axiosRef.get(`${ApplicationServiceURL.Workouts}/${workoutId}`, { headers: { 'Authorization': req.headers['authorization'] } }); const { data } = await this.httpService.axiosRef.patch(`${ApplicationServiceURL.Users}/${id}/balance/add`, dto, { headers: { 'Authorization': req.headers['authorization'] } }); return data; } @ApiResponse({ status: HttpStatus.OK, description: 'User balance has been successfully updated.' }) @Patch(':id/balance/sub') public async decBalance(@Req() req: Request, @Param('id') id: string, @Body() dto: UpdateBalanceDto) { const { workoutId } = dto; await this.httpService.axiosRef.get(`${ApplicationServiceURL.Workouts}/${workoutId}`, { headers: { 'Authorization': req.headers['authorization'] } }); const { data } = await this.httpService.axiosRef.patch(`${ApplicationServiceURL.Users}/${id}/balance/sub`, dto, { headers: { 'Authorization': req.headers['authorization'] } }); return data; } @ApiResponse({ status: HttpStatus.OK, description: 'Notification has been successfully deleted.' }) @Patch(':id/notification') public async removeNotification(@Req() req: Request, @Param('id') id: string, @Body() dto: DeleteNotificationDto) { const { data } = await this.httpService.axiosRef.patch(`${ApplicationServiceURL.Users}/${id}`, dto, { headers: { 'Authorization': req.headers['authorization'] } }); return data; } }
package net.minecraft.command; import java.util.List; import net.minecraft.server.MinecraftServer; import net.minecraft.util.BlockPos; import net.minecraft.util.ChatComponentTranslation; import net.minecraft.world.EnumDifficulty; /**+ * This portion of EaglercraftX contains deobfuscated Minecraft 1.8 source code. * * Minecraft 1.8.8 bytecode is (c) 2015 Mojang AB. "Do not distribute!" * Mod Coder Pack v9.18 deobfuscation configs are (c) Copyright by the MCP Team * * EaglercraftX 1.8 patch files (c) 2022-2024 lax1dude, ayunami2000. All Rights Reserved. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * */ public class CommandDifficulty extends CommandBase { /**+ * Gets the name of the command */ public String getCommandName() { return "difficulty"; } /**+ * Return the required permission level for this command. */ public int getRequiredPermissionLevel() { return 2; } /**+ * Gets the usage string for the command. */ public String getCommandUsage(ICommandSender var1) { return "commands.difficulty.usage"; } /**+ * Callback when the command is invoked */ public void processCommand(ICommandSender parICommandSender, String[] parArrayOfString) throws CommandException { if (parArrayOfString.length <= 0) { throw new WrongUsageException("commands.difficulty.usage", new Object[0]); } else { EnumDifficulty enumdifficulty = this.getDifficultyFromCommand(parArrayOfString[0]); MinecraftServer.getServer().setDifficultyForAllWorlds(enumdifficulty); notifyOperators(parICommandSender, this, "commands.difficulty.success", new Object[] { new ChatComponentTranslation(enumdifficulty.getDifficultyResourceKey(), new Object[0]) }); } } protected EnumDifficulty getDifficultyFromCommand(String parString1) throws NumberInvalidException { return !parString1.equalsIgnoreCase("peaceful") && !parString1.equalsIgnoreCase("p") ? (!parString1.equalsIgnoreCase("easy") && !parString1.equalsIgnoreCase("e") ? (!parString1.equalsIgnoreCase("normal") && !parString1.equalsIgnoreCase("n") ? (!parString1.equalsIgnoreCase("hard") && !parString1.equalsIgnoreCase("h") ? EnumDifficulty.getDifficultyEnum(parseInt(parString1, 0, 3)) : EnumDifficulty.HARD) : EnumDifficulty.NORMAL) : EnumDifficulty.EASY) : EnumDifficulty.PEACEFUL; } /**+ * Return a list of options when the user types TAB */ public List<String> addTabCompletionOptions(ICommandSender var1, String[] astring, BlockPos var3) { return astring.length == 1 ? getListOfStringsMatchingLastWord(astring, new String[] { "peaceful", "easy", "normal", "hard" }) : null; } }
// Copyright 2022 The Chromium Authors // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "content/browser/attribution_reporting/attribution_interop_parser.h" #include <cmath> #include <functional> #include <optional> #include <string> #include <utility> #include <vector> #include "base/functional/overloaded.h" #include "base/strings/strcat.h" #include "base/test/gmock_expected_support.h" #include "base/test/values_test_util.h" #include "base/time/time.h" #include "base/types/expected.h" #include "base/values.h" #include "components/attribution_reporting/source_type.mojom.h" #include "components/attribution_reporting/suitable_origin.h" #include "content/browser/attribution_reporting/attribution_config.h" #include "content/browser/attribution_reporting/attribution_reporting.mojom.h" #include "content/browser/attribution_reporting/attribution_test_utils.h" #include "testing/gmock/include/gmock/gmock.h" #include "testing/gtest/include/gtest/gtest.h" #include "third_party/abseil-cpp/absl/types/variant.h" namespace content { namespace { using ::base::test::ErrorIs; using ::base::test::ValueIs; using ::testing::AllOf; using ::testing::ElementsAre; using ::testing::Eq; using ::testing::Field; using ::testing::HasSubstr; using ::testing::IsEmpty; using ::attribution_reporting::SuitableOrigin; // Pick an arbitrary offset time to test correct handling. constexpr base::Time kOffsetTime = base::Time::UnixEpoch() + base::Days(5); TEST(AttributionInteropParserTest, EmptyInputParses) { const char* const kTestCases[] = { R"json({})json", R"json({"registrations":[]})json", }; for (const char* json : kTestCases) { base::Value::Dict value = base::test::ParseJsonDict(json); EXPECT_THAT(ParseAttributionInteropInput(std::move(value), kOffsetTime), base::test::ValueIs(IsEmpty())) << json; } } TEST(AttributionInteropParserTest, ValidSourceParses) { constexpr char kJson[] = R"json({"registrations": [ { "timestamp": "1643235573123", "registration_request": { "source_type": "navigation", "attribution_src_url": "https://a.r.test", "context_origin": "https://a.s.test" }, "responses": [{ "url": "https://a.r.test", "debug_permission": true, "response": { "Attribution-Reporting-Register-Source": 123 } }] }, { "timestamp": "1643235574123", "registration_request": { "source_type": "event", "attribution_src_url": "https://b.r.test", "context_origin": "https://b.s.test", }, "responses": [{ "url": "https://b.r.test", "response": { "Attribution-Reporting-Register-Source": 456 } }] } ]})json"; base::Value::Dict value = base::test::ParseJsonDict(kJson); ASSERT_OK_AND_ASSIGN( auto result, ParseAttributionInteropInput(std::move(value), kOffsetTime)); ASSERT_EQ(result.size(), 2u); EXPECT_EQ(result.front().time, kOffsetTime + base::Milliseconds(1643235573123)); EXPECT_EQ(result.front().source_type, attribution_reporting::mojom::SourceType::kNavigation); EXPECT_EQ(result.front().reporting_origin, *SuitableOrigin::Deserialize("https://a.r.test")); EXPECT_EQ(result.front().context_origin, *SuitableOrigin::Deserialize("https://a.s.test")); EXPECT_EQ(result.front().registration, base::Value(123)); EXPECT_TRUE(result.front().debug_permission); EXPECT_EQ(result.back().time, kOffsetTime + base::Milliseconds(1643235574123)); EXPECT_EQ(result.back().source_type, attribution_reporting::mojom::SourceType::kEvent); EXPECT_EQ(result.back().reporting_origin, *SuitableOrigin::Deserialize("https://b.r.test")); EXPECT_EQ(result.back().context_origin, *SuitableOrigin::Deserialize("https://b.s.test")); EXPECT_EQ(result.back().registration, base::Value(456)); EXPECT_FALSE(result.back().debug_permission); } TEST(AttributionInteropParserTest, ValidTriggerParses) { constexpr char kJson[] = R"json({"registrations": [ { "timestamp": "1643235575123", "registration_request": { "attribution_src_url": "https://a.r.test", "context_origin": " https://b.d.test", }, "responses": [{ "url": "https://a.r.test", "debug_permission": true, "response": { "Attribution-Reporting-Register-Trigger": 789 } }] } ]})json"; base::Value::Dict value = base::test::ParseJsonDict(kJson); ASSERT_OK_AND_ASSIGN( auto result, ParseAttributionInteropInput(std::move(value), kOffsetTime)); ASSERT_EQ(result.size(), 1u); EXPECT_EQ(result.front().time, kOffsetTime + base::Milliseconds(1643235575123)); EXPECT_EQ(result.front().reporting_origin, *SuitableOrigin::Deserialize("https://a.r.test")); EXPECT_EQ(result.front().context_origin, *SuitableOrigin::Deserialize("https://b.d.test")); EXPECT_EQ(result.front().source_type, std::nullopt); EXPECT_EQ(result.front().registration, base::Value(789)); EXPECT_TRUE(result.front().debug_permission); } struct ParseErrorTestCase { const char* expected_failure_substr; const char* json; }; class AttributionInteropParserInputErrorTest : public testing::TestWithParam<ParseErrorTestCase> {}; TEST_P(AttributionInteropParserInputErrorTest, InvalidInputFails) { const ParseErrorTestCase& test_case = GetParam(); base::Value::Dict value = base::test::ParseJsonDict(test_case.json); auto result = ParseAttributionInteropInput(std::move(value), kOffsetTime); EXPECT_THAT(result, base::test::ErrorIs( HasSubstr(test_case.expected_failure_substr))); } const ParseErrorTestCase kParseErrorTestCases[] = { { R"(["registrations"]: must be a list)", R"json({"registrations": ""})json", }, { R"(["registrations"][0]: must be a dictionary)", R"json({"registrations": [""]})json", }, { R"(["registrations"][0]["timestamp"]: must be an integer number of)", R"json({"registrations": [{}]})json", }, { R"(["registrations"][0]["registration_request"]: must be present)", R"json({"registrations":[{}]})json", }, { R"(["registrations"][0]["registration_request"]: must be a dictionary)", R"json({"registrations": [{ "registration_request": "" }]})json", }, { R"(["registrations"][0]["registration_request"]["attribution_src_url"]: must be a valid, secure origin)", R"json({"registrations": [{ "registration_request": {} }]})json", }, { R"(["registrations"][0]["registration_request"]["attribution_src_url"]: must be a valid, secure origin)", R"json({"registrations": [{ "registration_request": { "attribution_src_url": "http://r.test" } }]})json", }, { R"(["registrations"][0]["registration_request"]["context_origin"]: must be a valid, secure origin)", R"json({"registrations": [{ "registration_request": {} }]})json", }, { R"(["registrations"][0]["registration_request"]["context_origin"]: must be a valid, secure origin)", R"json({"registrations": [{ "registration_request": { "context_origin": "http://s.test" } }]})json", }, { R"(["registrations"][0]["responses"]: must be present)", R"json({"registrations": [{ "timestamp": "1643235574000", "registration_request": { "source_type": "navigation", "attribution_src_url": "https://a.r.test", "context_origin": "https://a.s.test" } }]})json", }, { R"(["registrations"][0]["responses"]: must be a list)", R"json({"registrations": [{ "timestamp": "1643235574000", "registration_request": { "source_type": "navigation", "attribution_src_url": "https://a.r.test", "context_origin": "https://a.s.test" }, "responses": "" }]})json", }, { R"(["registrations"][0]["responses"]: must have size 1)", R"json({"registrations": [{ "timestamp": "1643235574000", "registration_request": { "source_type": "navigation", "attribution_src_url": "https://a.r.test", "context_origin": "https://a.s.test" }, "responses": [{}, {}] }]})json", }, { R"(["registrations"][0]["responses"][0]: must be a dictionary)", R"json({"registrations": [{ "timestamp": "1643235574000", "registration_request": { "source_type": "navigation", "attribution_src_url": "https://a.r.test", "context_origin": "https://a.s.test" }, "responses": [""] }]})json", }, { R"(["registrations"][0]["responses"][0]["url"]: must be a valid, secure origin)", R"json({"registrations": [{ "timestamp": "1643235574000", "registration_request": { "source_type": "navigation", "attribution_src_url": "https://a.r.test", "context_origin": "https://a.s.test" }, "responses": [{}] }]})json", }, { R"(["registrations"][0]["responses"][0]["url"]: must match https://a.r.test)", R"json({"registrations": [{ "timestamp": "1643235574000", "registration_request": { "source_type": "navigation", "attribution_src_url": "https://a.r.test", "context_origin": "https://a.s.test" }, "responses": [{ "url": "https://b.r.test" }] }]})json", }, { R"(["registrations"][0]["responses"][0]["response"]: must be present)", R"json({"registrations": [{ "timestamp": "1643235574000", "registration_request": { "source_type": "navigation", "attribution_src_url": "https://a.r.test", "context_origin": "https://a.s.test" }, "responses": [{ "url": "https://a.r.test" }] }]})json", }, { R"(["registrations"][0]["responses"][0]["response"]: must be a dictionary)", R"json({"registrations": [{ "timestamp": "1643235574000", "registration_request": { "source_type": "navigation", "attribution_src_url": "https://a.r.test", "context_origin": "https://a.s.test" }, "responses": [{ "url": "https://a.r.test", "response": "" }] }]})json", }, { R"(["registrations"][0]["responses"][0]["response"]: must contain either source or trigger)", R"json({"registrations": [{ "timestamp": "1643235574000", "registration_request": { "source_type": "navigation", "attribution_src_url": "https://a.r.test", "context_origin": "https://a.s.test" }, "responses": [{ "url": "https://a.r.test", "response": {} }] }]})json", }, { R"(["registrations"][0]["responses"][0]["response"]: must contain either source or trigger)", R"json({"registrations": [{ "timestamp": "1643235574000", "registration_request": { "source_type": "navigation", "attribution_src_url": "https://a.r.test", "context_origin": "https://a.s.test" }, "responses": [{ "url": "https://a.r.test", "response": { "Attribution-Reporting-Register-Source": {}, "Attribution-Reporting-Register-Trigger": {} } }] }]})json", }, { R"(["registrations"][0]["registration_request"]["source_type"]: must be either)", R"json({"registrations": [{ "timestamp": "1643235574000", "registration_request": { "source_type": "NAVIGATION" } }]})json", }, { R"(["registrations"][0]["registration_request"]["source_type"]: must be present)", R"json({"registrations": [{ "timestamp": "1643235574000", "registration_request": { "attribution_src_url": "https://a.r.test", "context_origin": "https://a.s.test" }, "responses": [{ "url": "https://a.r.test", "response": { "Attribution-Reporting-Register-Source": {} } }] }]})json", }, { R"(["registrations"][0]["registration_request"]["source_type"]: must not be present)", R"json({"registrations": [{ "timestamp": "1643235574000", "registration_request": { "source_type": "navigation", "attribution_src_url": "https://a.r.test", "context_origin": "https://a.s.test" }, "responses": [{ "url": "https://a.r.test", "response": { "Attribution-Reporting-Register-Trigger": {} } }] }]})json", }, { R"(["registrations"][1]["timestamp"]: must be greater than previous time)", R"json({"registrations": [ { "timestamp": "1", "registration_request": { "context_origin": "https://a.d1.test", "attribution_src_url": "https://a.r.test" }, "responses": [{ "url": "https://a.r.test", "response": { "Attribution-Reporting-Register-Trigger": {} } }] }, { "timestamp": "0", "registration_request": { "context_origin": "https://a.d1.test", "attribution_src_url": "https://a.r.test" }, "responses": [{ "url": "https://a.r.test", "response": { "Attribution-Reporting-Register-Trigger": {} } }] }, ]})json", }, }; INSTANTIATE_TEST_SUITE_P(AttributionInteropParserInvalidInputs, AttributionInteropParserInputErrorTest, ::testing::ValuesIn(kParseErrorTestCases)); TEST(AttributionInteropParserTest, ValidConfig) { typedef void (*MakeAttributionConfigFunc)(AttributionConfig&); typedef void (*MakeInteropConfigFunc)(AttributionInteropConfig&); using MakeExpectedFunc = absl::variant<MakeAttributionConfigFunc, MakeInteropConfigFunc>; const struct { const char* json; bool required; MakeExpectedFunc make_expected; } kTestCases[] = { {R"json({})json", false, [](AttributionConfig&) {}}, {R"json({"max_sources_per_origin":"100"})json", false, [](AttributionConfig& c) { c.max_sources_per_origin = 100; }}, {R"json({"max_destinations_per_source_site_reporting_site":"100"})json", false, [](AttributionConfig& c) { c.max_destinations_per_source_site_reporting_site = 100; }}, {R"json({"max_destinations_per_rate_limit_window_reporting_site":"100"})json", false, [](AttributionConfig& c) { c.destination_rate_limit = {.max_per_reporting_site = 100}; }}, {R"json({"max_destinations_per_rate_limit_window":"100"})json", false, [](AttributionConfig& c) { c.destination_rate_limit = {.max_total = 100}; }}, {R"json({"destination_rate_limit_window_in_minutes":"5"})json", false, [](AttributionConfig& c) { c.destination_rate_limit = {.rate_limit_window = base::Minutes(5)}; }}, {R"json({"rate_limit_time_window_in_days":"30"})json", false, [](AttributionConfig& c) { c.rate_limit.time_window = base::Days(30); }}, {R"json({"rate_limit_max_source_registration_reporting_origins":"10"})json", false, [](AttributionConfig& c) { c.rate_limit.max_source_registration_reporting_origins = 10; }}, {R"json({"rate_limit_max_attribution_reporting_origins":"10"})json", false, [](AttributionConfig& c) { c.rate_limit.max_attribution_reporting_origins = 10; }}, {R"json({"rate_limit_max_attributions":"10"})json", false, [](AttributionConfig& c) { c.rate_limit.max_attributions = 10; }}, {R"json({"rate_limit_max_reporting_origins_per_source_reporting_site":"2"})json", false, [](AttributionConfig& c) { c.rate_limit.max_reporting_origins_per_source_reporting_site = 2; }}, {R"json({"rate_limit_origins_per_site_window_in_days":"2"})json", false, [](AttributionConfig& c) { c.rate_limit.origins_per_site_window = base::Days(2); }}, {R"json({"randomized_response_epsilon":"inf"})json", false, [](AttributionInteropConfig& c) { c.max_event_level_epsilon = std::numeric_limits<double>::infinity(); }}, {R"json({"max_event_level_reports_per_destination":"10"})json", false, [](AttributionConfig& c) { c.event_level_limit.max_reports_per_destination = 10; }}, {R"json({"max_navigation_info_gain":"0.2"})json", false, [](AttributionConfig& c) { c.event_level_limit.max_navigation_info_gain = 0.2; }}, {R"json({"max_event_info_gain":"0.2"})json", false, [](AttributionConfig& c) { c.event_level_limit.max_event_info_gain = 0.2; }}, {R"json({"max_aggregatable_reports_per_destination":"10"})json", false, [](AttributionConfig& c) { c.aggregate_limit.max_reports_per_destination = 10; }}, {R"json({"aggregatable_report_min_delay":"0"})json", false, [](AttributionConfig& c) { c.aggregate_limit.min_delay = base::TimeDelta(); }}, {R"json({"aggregatable_report_delay_span":"0"})json", false, [](AttributionConfig& c) { c.aggregate_limit.delay_span = base::TimeDelta(); }}, {R"json({ "max_sources_per_origin":"10", "max_destinations_per_source_site_reporting_site":"10", "max_destinations_per_rate_limit_window_reporting_site": "1", "max_destinations_per_rate_limit_window": "2", "destination_rate_limit_window_in_minutes": "10", "rate_limit_time_window_in_days":"10", "rate_limit_max_source_registration_reporting_origins":"20", "rate_limit_max_attribution_reporting_origins":"15", "rate_limit_max_attributions":"10", "rate_limit_max_reporting_origins_per_source_reporting_site":"5", "rate_limit_origins_per_site_window_in_days":"5", "randomized_response_epsilon":"0.2", "max_event_level_reports_per_destination":"10", "max_navigation_info_gain":"5.5", "max_event_info_gain":"0.5", "max_aggregatable_reports_per_destination":"10", "aggregatable_report_min_delay":"10", "aggregatable_report_delay_span":"20" })json", true, [](AttributionInteropConfig& config) { AttributionConfig& c = config.attribution_config; c.max_sources_per_origin = 10; c.max_destinations_per_source_site_reporting_site = 10; c.rate_limit.time_window = base::Days(10); c.rate_limit.max_source_registration_reporting_origins = 20; c.rate_limit.max_attribution_reporting_origins = 15; c.rate_limit.max_attributions = 10; c.rate_limit.max_reporting_origins_per_source_reporting_site = 5; c.rate_limit.origins_per_site_window = base::Days(5); config.max_event_level_epsilon = 0.2; c.event_level_limit.max_reports_per_destination = 10; c.event_level_limit.max_navigation_info_gain = 5.5; c.event_level_limit.max_event_info_gain = 0.5; c.aggregate_limit.max_reports_per_destination = 10; c.aggregate_limit.min_delay = base::Minutes(10); c.aggregate_limit.delay_span = base::Minutes(20); c.destination_rate_limit = {.max_total = 2, .max_per_reporting_site = 1, .rate_limit_window = base::Minutes(10)}; }}}; for (const auto& test_case : kTestCases) { AttributionInteropConfig expected; absl::visit(base::Overloaded{ [&](MakeAttributionConfigFunc f) { f(expected.attribution_config); }, [&](MakeInteropConfigFunc f) { f(expected); }, }, test_case.make_expected); base::Value::Dict json = base::test::ParseJsonDict(test_case.json); if (test_case.required) { EXPECT_THAT(ParseAttributionInteropConfig(json), base::test::ValueIs(expected)) << json; } else { AttributionInteropConfig config; EXPECT_EQ("", MergeAttributionInteropConfig(json, config)) << json; EXPECT_EQ(config, expected) << json; } } } TEST(AttributionInteropParserTest, InvalidConfigPositiveIntegers) { const char* const kFields[] = { "max_sources_per_origin", "max_destinations_per_source_site_reporting_site", "max_destinations_per_rate_limit_window_reporting_site", "max_destinations_per_rate_limit_window", "destination_rate_limit_window_in_minutes", "rate_limit_time_window_in_days", "rate_limit_max_source_registration_reporting_origins", "rate_limit_max_attribution_reporting_origins", "rate_limit_max_attributions", "rate_limit_max_reporting_origins_per_source_reporting_site", "rate_limit_origins_per_site_window_in_days", "max_event_level_reports_per_destination", "max_aggregatable_reports_per_destination", }; { auto result = ParseAttributionInteropConfig(base::Value::Dict()); for (const char* field : kFields) { EXPECT_THAT(result, base::test::ErrorIs(HasSubstr( base::StrCat({"[\"", field, "\"]: must be a positive integer " "formatted as base-10 string"})))) << field; } } { AttributionInteropConfig config; base::Value::Dict dict; for (const char* field : kFields) { dict.Set(field, "0"); } std::string error = MergeAttributionInteropConfig(dict, config); for (const char* field : kFields) { EXPECT_THAT( error, HasSubstr(base::StrCat( {"[\"", field, "\"]: must be a positive integer formatted as base-10 string"}))) << field; } } } TEST(AttributionInteropParserTest, InvalidConfigNonNegativeIntegers) { const char* const kFields[] = { "aggregatable_report_min_delay", "aggregatable_report_delay_span", }; { auto result = ParseAttributionInteropConfig(base::Value::Dict()); for (const char* field : kFields) { EXPECT_THAT(result, base::test::ErrorIs(HasSubstr(base::StrCat( {"[\"", field, "\"]: must be a non-negative integer " "formatted as base-10 string"})))) << field; } } { AttributionInteropConfig config; base::Value::Dict dict; for (const char* field : kFields) { dict.Set(field, "-10"); } std::string error = MergeAttributionInteropConfig(dict, config); for (const char* field : kFields) { EXPECT_THAT(error, HasSubstr(base::StrCat({"[\"", field, "\"]: must be a non-negative integer " "formatted as base-10 string"}))) << field; } } } TEST(AttributionInteropParserTest, InvalidConfigRandomizedResponseEpsilon) { { auto result = ParseAttributionInteropConfig(base::Value::Dict()); EXPECT_THAT(result, base::test::ErrorIs(HasSubstr( "[\"randomized_response_epsilon\"]: must be \"inf\" or a " "non-negative double formated as a base-10 string"))); } { AttributionInteropConfig config; base::Value::Dict dict; dict.Set("randomized_response_epsilon", "-1.5"); std::string error = MergeAttributionInteropConfig(dict, config); EXPECT_THAT( error, HasSubstr("[\"randomized_response_epsilon\"]: must be \"inf\" or a " "non-negative double formated as a base-10 string")); } } TEST(AttributionInteropParserTest, InvalidConfigMaxInfGain) { { auto result = ParseAttributionInteropConfig(base::Value::Dict()); ASSERT_FALSE(result.has_value()); EXPECT_THAT( result.error(), HasSubstr("[\"randomized_response_epsilon\"]: must be \"inf\" or a " "non-negative double formated as a base-10 string")); } { AttributionInteropConfig config; base::Value::Dict dict; dict.Set("max_navigation_info_gain", "-1.5"); std::string error = MergeAttributionInteropConfig(dict, config); EXPECT_THAT( error, HasSubstr("[\"max_navigation_info_gain\"]: must be \"inf\" or a " "non-negative double formated as a base-10 string")); } { AttributionInteropConfig config; base::Value::Dict dict; dict.Set("max_event_info_gain", "-1.5"); std::string error = MergeAttributionInteropConfig(dict, config); EXPECT_THAT(error, HasSubstr("[\"max_event_info_gain\"]: must be \"inf\" or a " "non-negative double formated as a base-10 string")); } } TEST(AttributionInteropParserTest, ParseOutput) { const base::Value kExpectedPayload("abc"); const struct { const char* desc; const char* json; ::testing::Matcher<base::expected<AttributionInteropOutput, std::string>> matches; } kTestCases[] = { { "top_level_errors", R"json({"foo": []})json", ErrorIs(AllOf( HasSubstr(R"(["reports"]: must be present)"), HasSubstr(R"(["unparsable_registrations"]: must be present)"), HasSubstr(R"(["foo"]: unknown field)"))), }, { "second_level_errors", R"json({ "reports": [{"foo": null}], "unparsable_registrations": [{"bar": 123}] })json", ErrorIs(AllOf( HasSubstr(R"(["reports"][0]["report_time"]: must be an integer)"), HasSubstr(R"(["reports"][0]["report_url"]: must be a valid URL)"), HasSubstr(R"(["reports"][0]["payload"]: required)"), HasSubstr(R"(["reports"][0]["foo"]: unknown field)"), HasSubstr( R"(["unparsable_registrations"][0]["time"]: must be an integer)"), HasSubstr( R"(["unparsable_registrations"][0]["type"]: must be either)"), HasSubstr( R"(["unparsable_registrations"][0]["bar"]: unknown field)"))), }, { "unsorted_reports", R"json({ "reports": [ { "report_time": "2", "report_url": "https://a.test/x", "payload": "abc" }, { "report_time": "1", "report_url": "https://a.test/y", "payload": "def" } ], "unparsable_registrations": [] })json", ErrorIs(HasSubstr( R"(["reports"][1]["report_time"]: must be greater than or equal)")), }, { "unsorted_unparsable_registrations", R"json({ "unparsable_registrations": [ {"time": "4", "type": "source"}, {"time": "3", "type": "trigger"} ], "reports": [] })json", ErrorIs(HasSubstr( R"(["unparsable_registrations"][1]["time"]: must be greater than or equal)")), }, { "ok", R"json({ "reports": [{ "report_time": "123", "report_url": "https://a.test/x", "payload": "abc" }], "unparsable_registrations": [{ "time": "456", "type": "trigger" }] })json", ValueIs(AllOf( Field( &AttributionInteropOutput::reports, ElementsAre(AllOf( Field(&AttributionInteropOutput::Report::time, base::Time::UnixEpoch() + base::Milliseconds(123)), Field(&AttributionInteropOutput::Report::url, GURL("https://a.test/x")), Field(&AttributionInteropOutput::Report::payload, // `std::ref` needed because `base::Value` isn't // copyable Eq(std::ref(kExpectedPayload)))))), Field( &AttributionInteropOutput::unparsable_registrations, ElementsAre(AllOf( Field(&AttributionInteropOutput::UnparsableRegistration:: time, base::Time::UnixEpoch() + base::Milliseconds(456)), Field(&AttributionInteropOutput::UnparsableRegistration:: type, attribution_reporting::mojom::RegistrationType:: kTrigger)))))), }, }; for (const auto& test_case : kTestCases) { SCOPED_TRACE(test_case.desc); base::Value::Dict value = base::test::ParseJsonDict(test_case.json); EXPECT_THAT(AttributionInteropOutput::Parse(std::move(value)), test_case.matches); } } } // namespace } // namespace content
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.hadoop.yarn.server.timelineservice.storage.reader; import java.io.IOException; import java.util.Map; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.Result; import org.apache.hadoop.hbase.client.ResultScanner; import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.filter.FilterList; import org.apache.hadoop.hbase.filter.PageFilter; import org.apache.hadoop.yarn.api.records.timelineservice.FlowActivityEntity; import org.apache.hadoop.yarn.api.records.timelineservice.FlowRunEntity; import org.apache.hadoop.yarn.api.records.timelineservice.TimelineEntity; import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineDataToRetrieve; import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineEntityFilters; import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineReaderContext; import org.apache.hadoop.yarn.server.timelineservice.reader.TimelineReaderUtils; import org.apache.hadoop.yarn.server.timelineservice.storage.common.BaseTable; import org.apache.hadoop.yarn.server.timelineservice.storage.common.KeyConverter; import org.apache.hadoop.yarn.server.timelineservice.storage.common.LongKeyConverter; import org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowActivityColumnPrefix; import org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowActivityRowKey; import org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowActivityRowKeyPrefix; import org.apache.hadoop.yarn.server.timelineservice.storage.flow.FlowActivityTable; import org.apache.hadoop.yarn.webapp.BadRequestException; import com.google.common.base.Preconditions; /** * Timeline entity reader for flow activity entities that are stored in the * flow activity table. */ class FlowActivityEntityReader extends TimelineEntityReader { private static final FlowActivityTable FLOW_ACTIVITY_TABLE = new FlowActivityTable(); /** * Used to convert Long key components to and from storage format. */ private final KeyConverter<Long> longKeyConverter = new LongKeyConverter(); public FlowActivityEntityReader(TimelineReaderContext ctxt, TimelineEntityFilters entityFilters, TimelineDataToRetrieve toRetrieve) { super(ctxt, entityFilters, toRetrieve); } public FlowActivityEntityReader(TimelineReaderContext ctxt, TimelineDataToRetrieve toRetrieve) { super(ctxt, toRetrieve); } /** * Uses the {@link FlowActivityTable}. */ @Override protected BaseTable<?> getTable() { return FLOW_ACTIVITY_TABLE; } @Override protected void validateParams() { Preconditions.checkNotNull(getContext().getClusterId(), "clusterId shouldn't be null"); } @Override protected void augmentParams(Configuration hbaseConf, Connection conn) throws IOException { createFiltersIfNull(); } @Override protected FilterList constructFilterListBasedOnFilters() throws IOException { return null; } @Override protected FilterList constructFilterListBasedOnFields() { return null; } @Override protected Result getResult(Configuration hbaseConf, Connection conn, FilterList filterList) throws IOException { throw new UnsupportedOperationException( "we don't support a single entity query"); } @Override protected ResultScanner getResults(Configuration hbaseConf, Connection conn, FilterList filterList) throws IOException { Scan scan = new Scan(); String clusterId = getContext().getClusterId(); if (getFilters().getFromId() == null && getFilters().getCreatedTimeBegin() == 0L && getFilters().getCreatedTimeEnd() == Long.MAX_VALUE) { // All records have to be chosen. scan.setRowPrefixFilter(new FlowActivityRowKeyPrefix(clusterId) .getRowKeyPrefix()); } else if (getFilters().getFromId() != null) { FlowActivityRowKey key = null; try { key = FlowActivityRowKey.parseRowKeyFromString(getFilters().getFromId()); } catch (IllegalArgumentException e) { throw new BadRequestException("Invalid filter fromid is provided."); } if (!clusterId.equals(key.getClusterId())) { throw new BadRequestException( "fromid doesn't belong to clusterId=" + clusterId); } scan.setStartRow(key.getRowKey()); scan.setStopRow( new FlowActivityRowKeyPrefix(clusterId, (getFilters().getCreatedTimeBegin() <= 0 ? 0 : (getFilters().getCreatedTimeBegin() - 1))) .getRowKeyPrefix()); } else { scan.setStartRow(new FlowActivityRowKeyPrefix(clusterId, getFilters() .getCreatedTimeEnd()).getRowKeyPrefix()); scan.setStopRow(new FlowActivityRowKeyPrefix(clusterId, (getFilters() .getCreatedTimeBegin() <= 0 ? 0 : (getFilters().getCreatedTimeBegin() - 1))).getRowKeyPrefix()); } // use the page filter to limit the result to the page size // the scanner may still return more than the limit; therefore we need to // read the right number as we iterate scan.setFilter(new PageFilter(getFilters().getLimit())); return getTable().getResultScanner(hbaseConf, conn, scan); } @Override protected TimelineEntity parseEntity(Result result) throws IOException { FlowActivityRowKey rowKey = FlowActivityRowKey.parseRowKey(result.getRow()); Long time = rowKey.getDayTimestamp(); String user = rowKey.getUserId(); String flowName = rowKey.getFlowName(); FlowActivityEntity flowActivity = new FlowActivityEntity( getContext().getClusterId(), time, user, flowName); // set the id flowActivity.setId(flowActivity.getId()); // get the list of run ids along with the version that are associated with // this flow on this day Map<Long, Object> runIdsMap = FlowActivityColumnPrefix.RUN_ID.readResults(result, longKeyConverter); for (Map.Entry<Long, Object> e : runIdsMap.entrySet()) { Long runId = e.getKey(); String version = (String)e.getValue(); FlowRunEntity flowRun = new FlowRunEntity(); flowRun.setUser(user); flowRun.setName(flowName); flowRun.setRunId(runId); flowRun.setVersion(version); // set the id flowRun.setId(flowRun.getId()); flowActivity.addFlowRun(flowRun); } flowActivity.getInfo().put(TimelineReaderUtils.FROMID_KEY, rowKey.getRowKeyAsString()); return flowActivity; } }
# References and Borrowing - We use `&` to borrow a reference to a value. - We can have multiple references to the same value. - References must always be valid. - References are immutable by default. ``` fn main() { let s = String::from("Hello"); let len = calculate_length(&s); println!("The size of `{}` is {}", s, len); } //Function to calculate length fn calculate_length(s: &String) -> usize { let len = s.len(); len } ``` - We can make a reference mutable by using `&mut`. ``` fn main() { //Mutable References let mut s = String::from("Hello"); change(&mut s); println!("{}", s); } fn change(some_string: &mut String) { some_string.push_str(", world"); } ``` - We can have only one mutable reference to a value at a time. ``` fn main(){ //Multiple mutable references let mut s = String::from("Hello"); let r1 = &mut s; let r2 = &mut s; println!("{}, {}", r1, r2); } ``` > We cannot do this. Rather we can do this. ``` fn main() { //Multiple mutable references let mut s = String::from("Hello"); { let r1 = &mut s; r1.push_str(" World"); } let r2 = &mut s; println!("{}", r2); } ``` - We can have multiple immutable references to a value at the same time. - We can have immutable and mutable references in the same scope. ``` fn main() { let mut s = String::from("Hello"); let r1 = &s; let r2 = &s; println!("{} and {}", r1, r2); let r3 = &mut s; println!("{}", r3); } ``` ``` fn main() { let mut s = String::from("Hello"); let r1 = &s; let r2 = &s; let r3 = &mut s; println!("{} and {}", r1, r2); println!("{}", r3); } ``` > Here above code will throw error because it is getting confused about which reference is mutable and which is immutable. ### Dangling References > A reference to a value that is no longer valid. ``` // Dangling References fn main() { let reference_to_nothing = dangle(); } fn dangle() -> &String { let s = String::from("hello"); &s } ``` > This code will not compile because the return value of dangle() is a reference to the String s that dangle() no longer owns. The reference will never be valid. > Instead of returning a reference to the String, we could return the String itself. ``` fn main() { let reference_to_nothing = no_dangle(); println!("{}", reference_to_nothing); } fn no_dangle() -> String { let s = String::from("hello"); s } ```
import { insertIntoObject, getFromObject } from '../src/index'; describe('object-json-path-ts insertIntoObject', () => { describe('invalid call', () => { it('should throw an error when an empty path is provided', () => { expect( () => { insertIntoObject({}, { path: '', value: 'joe' }); }, ).toThrow('Path can not be empty.'); }); }); describe('building an object', () => { it('should add an item to the root of the object', () => { expect( insertIntoObject({}, { path: 'user', value: 'joe' }), ).toEqual({ user: 'joe' }); }); it('should add an item deep within the object the object', () => { expect( insertIntoObject({}, { path: 'user.name', value: 'joe' }), ).toEqual({ user: { name: 'joe' } }); }); it('should add multiple items at the same time', () => { expect( insertIntoObject({}, [ { path: 'user.name', value: 'joe' }, { path: 'user.age', value: 3 }, ]), ).toEqual({ user: { name: 'joe', age: 3 } }); }); }); describe('error', () => { it(`should throw an error when an item is added to a path that resolves to a primitive`, () => { const user = {}; insertIntoObject(user, { path: 'user', value: 'joe' }); expect( () => { insertIntoObject(user, { path: 'user.name', value: 'joe' }); }, ).toThrow('Unable to add a value to path \'user.name\' because the value at \'user\' is a primitive when an object is needed.'); }); }); }); describe('object-json-path-ts getFromObject', () => { describe('invalid call', () => { it('should throw an error when the path does not resolve to an object', () => { expect( () => { getFromObject({ user: 'joe' }, 'user.name'); }, ).toThrow('Unable to get a value from path \'user.name\' because the value at \'user\' is a primitive when an object is needed.'); }); it('should throw an error when the path at the last value was not found', () => { expect( () => { getFromObject({ user: { name: 'joe' } }, 'user.age'); }, ).toThrow('Unable to get a value from path \'user.age\' because the object at \'user\' did not have property age.'); }); it('should NOT throw an error when the path at the last value was not found but required is false', () => { expect(getFromObject({ user: { name: 'joe' } }, 'user.age', false)).toBeUndefined; }); it('should throw an error when the path was not found', () => { expect( () => { getFromObject({ admin: { user: { name: 'joe' } } }, 'admin.customer.age'); }, ).toThrow('Unable to resolve path. Object did not have a property \'customer\' at path \'admin\'.'); }); it('should throw an error path is totally different from object', () => { expect( () => { getFromObject({ admin: { user: { name: 'joe' } } }, 'nope.user.name'); }, ).toThrow('Unable to resolve path. Object did not have a property \'nope\' at path \'\'.'); }); }); describe('valid call', () => { it('should successfully get a value', () => { expect(getFromObject({ admin: 'joe' }, 'admin')).toEqual('joe'); expect(getFromObject({ admin: { age: 32 } }, 'admin.age')).toEqual(32); expect( getFromObject({ admin: { privileges: ['a', 'b'] } }, 'admin.privileges'), ).toEqual(['a', 'b']); expect( getFromObject({ admin: { privileges: ['a', 'b'] } }, 'admin'), ).toEqual({ privileges: ['a', 'b'] }); expect( getFromObject({ admin: { privileges: 'a privilege' } }, 'admin'), ).toEqual({ privileges: 'a privilege' }); }); }); });
import Contract from '@/services/contract' import { CONTRACTS } from '@/const/contracts' import web3 from '@/services/web3' import { isAddressCorrect } from '@/utils/web3' class Souli { constructor() { this.contract = new Contract(CONTRACTS.token.address, CONTRACTS.token.abi) } async getBalance(address) { return this.contract.call('balanceOf', [address]) } async getSupply() { return this.contract.call('totalSupply') } async getDecimals() { return this.contract.call('decimals') } async getSeedByIndex(address, index) { return this.contract.call('inscriptionOfOwnerByIndex', [address, index]) } async getSeedSvg(seed) { return this.contract.call('getSvg', [seed]) } async getSeedMeta(seed) { return this.contract.call('getMeta', [seed]) } async getHoldersAmount() { return this.contract.call('holdersCount') } async getInscriptionSupply() { const [spores, mushrooms] = await Promise.all([ this.contract.call('dynamicInscriptionTotalCount'), this.contract.call('inscriptionsTotalCount'), ]) return Number(spores) + Number(mushrooms) } async getInscription(seed) { const [ svg, meta, ] = await Promise.all([ this.getSeedSvg(seed), this.getSeedMeta(seed), ]) return { svg, seed, meta: JSON.parse(meta), } } async getInscriptionsByAddress(address) { const [ count, degree, ] = await Promise.all([ this.contract.call('inscriptionCount', [address]), this.contract.call('dynamicInscription', [address]), ]) const seedPromises = [] for (let index = 0; index < count; index++) { seedPromises.push(this.getSeedByIndex(address, index)) } let seeds = await Promise.all(seedPromises) seeds = seeds.map(seed => ({ seed: seed.seed, extra: seed.extra, owner: address })) if (Number(degree.seed)) { seeds.unshift({ isDynamic: true, seed: degree.seed, extra: degree.extra, owner: address }) } return seeds } async getHolders(fromIndex = 0, holderAmount = 4) { const holders = await this.contract.call('getHoldersList', [fromIndex, holderAmount]) return holders.filter(address => !address.startsWith('0x000000')) } async getHoldersSeeds(fromIndex = 0, amount = 4) { const holders = await this.getHolders(fromIndex, amount) const holdersSeeds = await Promise.all( holders.map(address => this.getInscriptionsByAddress(address)) ) let seeds = [] for (let holderSeeds of holdersSeeds) { seeds = [...seeds, ...holderSeeds] } return seeds } async fetchInscriptionsBySeeds(seeds) { const promises = [] for (let seed of seeds) { promises.push(souli.getInscription(seed)) } let list = await Promise.all(promises) return list || [] } async fetchSeedsByAddress(address) { if (address && isAddressCorrect) { return souli.getInscriptionsByAddress(address) } return [] } } const souli = new Souli() export default souli
#Trayectoria con resistencia al aire #Muchos problemas de mecánica elemental se refieren a la física de los objetos. #moviéndose o volando por el aire, pero casi siempre ignoran la fricción #y resistencia al aire para hacer que las ecuaciones sean solucionables. Si estamos usando un #computadora, sin embargo, no necesitamos ecuaciones solucionables. #Considere, por ejemplo, un disparo esférico de bala de cañón desde un cañón de pie #en terreno llano. La resistencia del aire en una esfera en movimiento es una fuerza en el #dirección opuesta al movimiento con magnitud #F = 1/2*pi*R^2*rho*C*^2 #dónde R es el radio de la esfera, rho es la densidad del aire, v es el #velocidad, y C es el llamado defn {coeficiente de arrastre} (a #propiedad de la forma del objeto en movimiento, en este caso una esfera). #A) partir de la segunda ley de Newton, F = ma , muestra que el #las ecuaciones de movimiento para la posición (x, y) de la bala de cañón son ddot {x} = - (pi*R^2*rho*C/2*m)*punto{x}*sqrt{dot{x}^2+dot{y}^2}, ddot {y} = - g-{pi*R^2*rho*C/2*m}*punto {y}*sqrt {dot{x}^2+dot{y}^2} #donde m es la masa de la bala de cañón, g es la aceleración debida a #gravedad, y dot{x} y ddot{x} son la primera y segunda derivada #de x con respecto al tiempo. #Cambia estas dos ecuaciones de segundo orden en cuatro de primer orden #ecuaciones usando los métodos que has aprendido, luego escribe un programa que #resuelve las ecuaciones para una bala de cañón de masa 1kg y radio 8cm, #disparó a 30grados a la horizontal con velocidad inicial $ 100 \, \ mathrm {ms} ^ {- 1} $ . #La densidad del aire es \ rho = 1.22 \, \ textrm {kg} \, \ textrm {m} ^ {- 3} $ y el coeficiente de arrastre para # una esfera es $ C = 0.47 $ . Haz un diagrama de la trayectoria de la bala de cañón # (es decir, ~ una gráfica de $ y $ en función de x ). #Cuando uno ignora la resistencia del aire, la distancia recorrida por un # El proyectil no depende de la masa del proyectil. En la vida real, #sin embargo, la masa ciertamente hace la diferencia. Use su programa para # estimar la distancia total recorrida (sobre terreno horizontal) por el # bala de cañón arriba, y luego experimente con el programa para determinar # si la bala de cañón viaja más lejos si es más pesada o más ligera. usted # podría, por ejemplo, trazar una serie de trayectorias para balas de cañón de # diferentes masas, o podrías hacer un gráfico de la distancia recorrida como # función de la masa. Describe brevemente lo que descubres. from scipy import array, arange, pi, sin, cos, sqrt from pylab import plot, show, xlabel, ylabel t0 = 0 tf = 7 N = 10000 h = (tf - t0) / N g = 9.81 m = 1 R = 0.08 theta0 = 30 * pi / 180 v0 = 100 rho = 1.22 C = 0.47 c = pi * R ** 2 * rho * C / 2 def constante(m): return c / m def f(r, t, m): # x = r[0] vx = r[1] # y = r[2] vy = r[3] v = sqrt(vx ** 2 + vy ** 2) return array([vx, - constante(m) * vx * v, vy, -g - constante(m) * vy * v], float) tpoints = arange(t0, tf, h) def trajectoria(m): xpoints = [] ypoints = [] r = array([0, v0 * cos(theta0), 0, v0 * sin(theta0)], float) for t in tpoints: xpoints.append(r[0]) ypoints.append(r[2]) k1 = h * f(r, t, m) k2 = h * f(r + 0.5 * k1, t + 0.5 * h, m) k3 = h * f(r + 0.5 * k2, t + 0.5 * h, m) k4 = h * f(r + k3, t + h, m) r += (k1 + 2 * k2 + 2 * k3 + k4) / 6 return array(xpoints, float), array(ypoints, float) trajectoria1x, trajectoria1y = trajectoria(1) trajectoria2x, trajectoria2y = trajectoria(2) trajectoria3x, trajectoria3y = trajectoria(4) plot(trajectoria1x, trajectoria1y, 'b') plot(trajectoria2x, trajectoria2y, 'g') plot(trajectoria3x, trajectoria3y, 'k') xlabel('x (m)') ylabel('y (m)') show()
import 'package:advanced_flutter_clean_architecture/data/network/failure.dart'; import 'package:advanced_flutter_clean_architecture/data/network/requests.dart'; import 'package:advanced_flutter_clean_architecture/domain/model/models.dart'; import 'package:advanced_flutter_clean_architecture/domain/repository/repository.dart'; import 'package:advanced_flutter_clean_architecture/domain/usecase/base_usecase.dart'; import 'package:dartz/dartz.dart'; class LoginUseCase extends BaseUseCase<LoginUseCaseInput, Authentication> { final Repository _repository; LoginUseCase(this._repository); @override Future<Either<Failure, Authentication>> execute(LoginUseCaseInput input) async { return await _repository.login(LoginRequest(input.email, input.password)); } } class LoginUseCaseInput { String email; String password; LoginUseCaseInput(this.email, this.password); }
// Atribuição via desestruturação (Objetos) const pessoa = { // aqui criamos um objeto com informações de uma pessoa nome: 'Luiz', sobrenome: 'Miranda', idade: 30, endereco: { // é bem normal criar um objeto dentro de outro objeto no JS rua: 'Av Brasil', numero: 320 } } //const nome = pessoa.nome; // estamos passando uma informação do objeto para uma variavel //console.log(nome); // usando atribuição via desestruturação: //const { nome } = pessoa; //como estamos criando uma variavel que é exatamente o mesmo nome da informação dentro do objeto só usamos chaves // em resumo pedimos para criar um variavel com este nome e também pedimos para procurar este mesmo nome dentro do objeto que escolhermos. //console.log(nome); //podemos também usar mais de uma variavel // Caso o nome ou valor que você passar não exista no objeto ele vai ficar como "undefined" //const { nome, sobrenome, idade } = pessoa; // e para não aparecer undefined usamos '' //const {nome = '', sobrenome, idade } = pessoa; const {nome: teste = '', sobrenome, idade } = pessoa; // aqui substituimos o nome da variavel dentro do objeto, no objeto vai ficar o mesmo nome // mas na variavel vai ser 'teste' console.log(teste, sobrenome, idade); // Para selecionar um objeto dentro de outro objeto: //const {endereco: {rua, numero} } = pessoa; // selecionamos o objeto dentro do outro objeto e pegamos as informações desejadas const {endereco: {rua: r = 12345, numero}, endereco } = pessoa; // podemos manipular as informações desses objetos também // pegando o resto das informações do objeto: const {nome, ...resto} = pessoa; console.log(nome, resto);
<template> <div> <!-- 面包屑导航区域 --> <el-breadcrumb separator-class="el-icon-arrow-right"> <el-breadcrumb-item :to="{ path: '/home' }">首页</el-breadcrumb-item> <el-breadcrumb-item>订单管理</el-breadcrumb-item> <el-breadcrumb-item>订单列表</el-breadcrumb-item> </el-breadcrumb> <!-- 卡片视图 --> <el-card> <!-- 搜索栏 --> <el-row> <el-col :span="8"> <el-input placeholder="请输入内容"> <el-button slot="append" icon="el-icon-search"></el-button> </el-input> </el-col> </el-row> <!-- 订单列表数据 --> <el-table :data="orderList" border stripe> <el-table-column type="index" label="#"></el-table-column> <el-table-column label="订单编号" prop="order_number"></el-table-column> <el-table-column label="订单价格" prop="order_price"></el-table-column> <el-table-column label="是否付款" prop="pay_status"> <template slot-scope="scope"> <el-tag type="success" v-if="scope.row.pay_status === 1">已付款</el-tag> <el-tag type="danger" else>未付款</el-tag> </template> </el-table-column> <el-table-column label="是否发货" prop="is_send"> <template slot-scope="scope">{{scope.row.is_send}}</template> </el-table-column> <el-table-column label="下单时间" prop="create_time"> <template slot-scope="scope">{{scope.row.create_time | dateFormat}}</template> </el-table-column> <el-table-column label="操作"> <template> <el-button type="primary" size="min" icon="el-icon-edit" @click="showBox"></el-button> <el-button type="success" size="min" icon="el-icon-location" @click="showLocationBox"></el-button> </template> </el-table-column> </el-table> <!-- 分页 --> <el-pagination @size-change="handleSizeChange" @current-change="handleCurrentChange" :current-page="queryInfo.pagenum" :page-sizes="[5, 8,10, 15]" :page-size="queryInfo.pagesize" layout="total, sizes, prev, pager, next, jumper" :total="total" ></el-pagination> </el-card> <!-- 修改地址对话框 --> <el-dialog title="修改地址" :visible.sync="dialogVisible" width="30%" @close="addressDialogClose"> <!-- 修改地址表单 --> <el-form :model="addressForm" :rules="addressFormRules" ref="addressFormRef" label-width="100px" > <el-form-item label="省市区/县" prop="address1"> <el-cascader :options="citydata" :props="{ expandTrigger: 'hover' }" clearable v-model="addressForm.address1" ></el-cascader> </el-form-item> <el-form-item label="详细地址" prop="address2"> <el-input v-model="addressForm.address2"></el-input> </el-form-item> </el-form> <span slot="footer" class="dialog-footer"> <el-button @click="dialogVisible = false">取 消</el-button> <el-button type="primary" @click="dialogVisible = false">确 定</el-button> </span> </el-dialog> <!-- 物流对话框对话框 --> <el-dialog title="物流进度" :visible.sync="loactionDialogVisible" width="50%" @close="loactionDialogClose" > <el-timeline> <el-timeline-item v-for="(activity, index) in loactionInfo" :key="index" :timestamp="activity.time" >{{activity.context}}</el-timeline-item> </el-timeline> </el-dialog> </div> </template> <script> import citydata from './citydata.js' export default { data() { return { // 查询条件 queryInfo: { query: '', pagenum: 1, pagesize: 10 }, // 数据总条数 total: 0, orderList: [], // 修改地址对话框显示和隐藏 dialogVisible: false, // 地址表单数据对象 addressForm: { address1: [], address2: '' }, // 地址表单数据对象验证规则 addressFormRules: { address1: [ { required: true, message: '请选择省市区/县', trigger: 'blur' } ], address2: [ { required: true, message: '请输入详细地址', trigger: 'blur' } ] }, // 省市区数据 citydata, // 物流对话框显示和隐藏 loactionDialogVisible: false, loactionInfo: [] } }, created() { this.getOrderList() }, methods: { async getOrderList() { const { data: res } = await this.$http.get('orders', { params: this.queryInfo }) if (res.meta.status !== 200) { return this.$message.error('获取订单列表失败') } // console.log(res.data) this.total = res.data.total this.orderList = res.data.goods }, // 处理页码大小和当前页变动时候触发的事件 // 每页显示个数的选项 handleSizeChange(newSize) { this.queryInfo.pagesize = newSize this.getOrderList() }, handleCurrentChange(newNum) { this.queryInfo.pagenum = newNum this.getOrderList() }, // 显示修改按钮对话框 showBox() { this.dialogVisible = true }, // 关闭修改按钮对话框 addressDialogClose() { this.$refs.addressFormRef.resetFields() }, // 显示物流对话框 async showLocationBox() { const { data: res } = await this.$http.get('/kuaidi/1106975712662') if (res.meta.status !== 200) { return this.$message.error('获取物流进度失败') } this.loactionInfo = res.data this.loactionDialogVisible = true // console.log(this.loactionInfo) }, loactionDialogClose() {} } } </script> <style lang="less" scoped> .el-cascader { width: 100%; } </style>
/** * Copyright (c) 2017-2019. The WRENCH Team. * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. */ #ifndef WRENCH_SLEEP_ACTION_H #define WRENCH_SLEEP_ACTION_H #include <string> #include "wrench/action/Action.h" namespace wrench { /***********************/ /** \cond DEVELOPER */ /***********************/ /** * @brief A class that implements a sleep action */ class SleepAction : public Action { public: double getSleepTime() const; protected: friend class CompoundJob; SleepAction(const std::string &name, double sleep_time); void execute(const std::shared_ptr<ActionExecutor> &action_executor) override; void terminate(const std::shared_ptr<ActionExecutor> &action_executor) override; private: double sleep_time; }; /***********************/ /** \endcond */ /***********************/ }// namespace wrench #endif//WRENCH_SLEEP_ACTION_H
/**************************************************************************** This file is part of the Webstella protocols exchange (Weprex) software. Copyright (C) 2018 Oleg Malyavkin. Contact: [email protected] This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. ****************************************************************************/ #ifndef WSDATACONVERTER_H #define WSDATACONVERTER_H #include <QtCore> #include <memory> #include <limits> #include "conf.h" enum class WSConversionState : quint8 { OK = 0, ERR_COUNT = 1, ERR_LIMITS = 2, ERR_CONVERT = 3, ERR_REPRESENT = 4, ERR_TYPE_SIZE = 5, ERR_TYPE = 6 }; enum class WSByteOrder : quint8 { FORWARD = 0, BACKWARD = 1, FORWARD_WORDS_REVERSE = 2, BACKWARD_WORDS_REVERSE = 3 }; enum class WSDataRepresent : quint8 { BIN = 0, OCT = 1, DEC = 2, HEX = 3, ASCII = 4 }; enum class WSDataType : quint8 { INTEGER = 0, FLOAT = 1, TEXT = 2 }; class WSDataConverterInterface { public: virtual ~WSDataConverterInterface(); virtual qint32 count() = 0; virtual qint32 size() = 0; virtual void reconvert() = 0; virtual QStringList toStringList(WSDataRepresent represent, quint8 precision, const char *divider) = 0; }; template <typename T> class WSDataConverter : public WSDataConverterInterface { public: WSDataConverter(char *bytes, qint32 size, WSByteOrder order) : m_bytes(bytes), m_size(size), m_order(order), m_need_delete(false) { m_converted_bytes = new char[m_size]; convert(); calcCount(); } WSDataConverter(QVector<T> values, WSByteOrder order) : m_size(sizeof(T) * values.size()), m_order(order), m_count(values.size()), m_need_delete(true) { m_bytes = new char[m_size]; m_converted_bytes = new char[m_size]; for (qint32 i = 0; i < m_count; i++) { memcpy(m_bytes + (i * sizeof(T)), &values[i], sizeof(T)); } convert(); } virtual ~WSDataConverter() { delete m_converted_bytes; if (m_need_delete) { delete m_bytes; } } WSDataConverter(const WSDataConverter &obj) = delete; WSDataConverter& operator=(WSDataConverter &obj) = delete; WSDataConverter(const WSDataConverter &&obj) = delete; WSDataConverter& operator=(WSDataConverter &&obj) = delete; const char* bytes() { return m_converted_bytes; } qint32 count() { return m_count; } qint32 size() { return m_size; } void reconvert() { convert(); } T value() { //return *((T*) m_converted_bytes); return *(reinterpret_cast<T*>(m_converted_bytes)); } QVector<T> values() { T val; QVector<T> result; for (qint32 i = 0; i < m_count; i++) { val = (reinterpret_cast<T*>(m_converted_bytes))[i]; result.append(val); } return result; } QStringList toStringList(WSDataRepresent represent, quint8 precision, const char *divider = " ") { QStringList result; if (m_count < 2) { QString res = valToString(value(), represent, precision); result.append(res); result.append(res); } else { QVector<T> vals = values(); QVectorIterator<T> i(vals); QString result1, result2, res; while (i.hasNext()) { res = valToString(i.next(), represent, precision); result1 = result1 % res; result2 = result2 % res; if (i.hasNext()) { result1 = result1 % divider; } } if (represent == WSDataRepresent::ASCII) { result.append(result2); } else { result.append(result1); } if (represent == WSDataRepresent::BIN || represent == WSDataRepresent::ASCII) { result.append(result2); } else { result.append(result1); } } return result; } private: char *m_bytes; qint32 m_size; WSByteOrder m_order; char *m_converted_bytes; qint32 m_count; bool m_need_delete; void convert() { qint32 i; if (m_size == 1) { m_converted_bytes[0] = m_bytes[0]; } else { if (m_order == WSByteOrder::FORWARD) { memcpy(m_converted_bytes, m_bytes, m_size); } else if (m_order == WSByteOrder::BACKWARD) { for (i = 0; i < m_size; i++) { m_converted_bytes[i] = m_bytes[m_size - i - 1]; } } else if (m_order == WSByteOrder::FORWARD_WORDS_REVERSE) { for (i = 0; i < m_size; i++) { m_converted_bytes[i] = ((i % 2 == 0)?m_bytes[i + 1]:m_bytes[i - 1]); } } else if (m_order == WSByteOrder::BACKWARD_WORDS_REVERSE) { for (i = 0; i < m_size; i++) { m_converted_bytes[i] = ((i % 2 == 0)?m_bytes[m_size - i - 2]:m_bytes[m_size - i]); } } } } void calcCount() { if (m_size == 0 || m_size % sizeof(T) != 0) { m_count = 0; } else { m_count = m_size / sizeof(T); } } QString intToString(T val, WSDataRepresent represent) { QString res; if (represent == WSDataRepresent::BIN) { res = QString::number(val, 2); int n = res.length(); switch (n) { case 0: res = "00000000"; break; case 1: res = "0000000" + res; break; case 2: res = "000000" + res; break; case 3: res = "00000" + res; break; case 4: res = "0000" + res; break; case 5: res = "000" + res; break; case 6: res = "00" + res; break; case 7: res = "0" + res; break; } return res; } else if (represent == WSDataRepresent::OCT) { return QString::number(val, 8); } else if (represent == WSDataRepresent::DEC) { return QString::number(val, 10); } else if (represent == WSDataRepresent::HEX) { res = QString::number(val, 16).toUpper(); if (res.length() == 1) { res = "0" + res; } return res; } else if (represent == WSDataRepresent::ASCII) { QChar c = static_cast<char>(val); return QString(c); } return QString(); } QString floatToString(T val, WSDataRepresent represent, quint8 precision) { if (represent == WSDataRepresent::DEC) { return QString::number(val, 'g', precision); } return QString(); } QString valToString(T val, WSDataRepresent represent, quint8); }; std::unique_ptr<WSDataConverterInterface> make_data_converter(char *dataBytes, qint32 dataSize, WSByteOrder order, WSDataType type, quint8 typeSize, bool sign); class WSByteArrayConverter { public: static QString toString(QByteArray arr, WSDataRepresent represent); }; class WSStringConverter { public: static WSConversionState toArray(QByteArray &result, const QString &data, WSDataRepresent represent, WSByteOrder order, WSDataType type, quint8 typeSize, quint16 bytesSize, bool sign); }; #endif // WSDATACONVERTER_H
import { z } from "zod"; import { AuthContext } from "~/context/context"; import { useContext, useState } from "react"; import { useRegisterSchema } from "./schema/schemeRegister"; import { Card, CardContent, CardDescription, CardFooter, CardHeader, CardTitle, } from "./ui/card"; import { Button } from "./ui/button"; import { Input } from "./ui/input"; import { IoEyeOffOutline, IoEyeOutline } from "react-icons/io5"; export const FormRegister = () => { const { createUser } = useContext(AuthContext); const [invisiblePassword, setInvisiblePassword] = useState(true); const { handleSubmit, schema, errors, register } = useRegisterSchema(); type formDataProps = z.infer<typeof schema>; const registerUser = async (data: formDataProps) => { const { email, firstName, lastName, password, user } = data; await createUser({ email, firstName, lastName, password, user }); }; function handleInvisiblePassword() { setInvisiblePassword(!invisiblePassword); } return ( <Card className="w-[450px] h-full"> <form autoComplete="off" onSubmit={handleSubmit(registerUser)} className="flex flex-col gap-2" > <CardHeader> <CardTitle>Cadastre-se</CardTitle> <CardDescription> Formulario de registro, preencha os dados a baixo para que possa se cadastrar. </CardDescription> </CardHeader> <CardContent className="flex flex-col gap-3"> <label className="flex gap-2"> <div className="flex flex-col"> <span>Nome</span> <Input disabled={true} autoComplete="off" {...register("firstName")} type="text" /> {errors.firstName && ( <span className="text-red-700 text-xs"> {errors.firstName.message} </span> )} </div> <div className="flex flex-col"> <span>Sobrenome</span> <Input disabled={true} autoComplete="off" {...register("lastName")} type="text" /> {errors.lastName && ( <span className="text-red-700 text-xs"> {errors.lastName.message} </span> )} </div> </label> <label className="flex flex-col"> <div className="flex flex-col"> <span>Usuário</span> <Input disabled={true} autoComplete="off" {...register("user")} type="text" /> {errors.user && ( <span className="text-red-700 text-xs"> {errors.user.message} </span> )} </div> </label> <label className="flex flex-col"> <div className="flex flex-col"> <span>E-mail</span> <Input disabled={true} autoComplete="off" {...register("email")} type="email" /> {errors.email && ( <span className="text-red-700 text-xs"> {errors.email.message} </span> )} </div> </label> <label className="flex flex-col"> <div className="flex flex-col"> <span>Senha</span> <div className="flex"> <Input disabled={true} className="rounded-r-none" autoComplete="off" {...register("password")} type={`${invisiblePassword ? "password" : "text"}`} /> <Button type="button" className="rounded-l-none px-3 bg-zinc-700" onClick={handleInvisiblePassword} > {invisiblePassword === true ? ( <IoEyeOutline size={25} /> ) : ( <IoEyeOffOutline size={25} /> )} </Button> </div> {errors.password && ( <span className="text-red-700 text-xs"> {errors.password.message} </span> )} </div> </label> </CardContent> <CardFooter> <Button disabled={true} type="submit" className="w-full"> Cadastrar </Button> </CardFooter> </form> </Card> ); };
package com.goodforallcode.playlistgenerator.model.domain.spotify; import com.fasterxml.jackson.annotation.JsonIgnoreProperties; import com.fasterxml.jackson.annotation.JsonProperty; import java.util.List; @JsonIgnoreProperties(ignoreUnknown = true) public class SpotifyAlbumItem { @JsonProperty("album_type") String albumType; List<SpotifyArtist> artists; String id; String name; int popularity; @JsonProperty("total_tracks") int totalTracks; @JsonProperty("tracks") SpotifyTracksContainer tracksContainer; public SpotifyAlbumItem() { } public String getAlbumType() { return this.albumType; } public List<SpotifyArtist> getArtists() { return this.artists; } public String getId() { return this.id; } public String getName() { return this.name; } public int getPopularity() { return this.popularity; } public int getTotalTracks() { return this.totalTracks; } public SpotifyTracksContainer getTracksContainer() { return this.tracksContainer; } @JsonProperty("album_type") public void setAlbumType(String albumType) { this.albumType = albumType; } public void setArtists(List<SpotifyArtist> artists) { this.artists = artists; } public void setId(String id) { this.id = id; } public void setName(String name) { this.name = name; } public void setPopularity(int popularity) { this.popularity = popularity; } @JsonProperty("total_tracks") public void setTotalTracks(int totalTracks) { this.totalTracks = totalTracks; } @JsonProperty("tracks") public void setTracksContainer(SpotifyTracksContainer tracksContainer) { this.tracksContainer = tracksContainer; } public boolean equals(final Object o) { if (o == this) return true; if (!(o instanceof SpotifyAlbumItem)) return false; final SpotifyAlbumItem other = (SpotifyAlbumItem) o; if (!other.canEqual((Object) this)) return false; final Object this$albumType = this.getAlbumType(); final Object other$albumType = other.getAlbumType(); if (this$albumType == null ? other$albumType != null : !this$albumType.equals(other$albumType)) return false; final Object this$artists = this.getArtists(); final Object other$artists = other.getArtists(); if (this$artists == null ? other$artists != null : !this$artists.equals(other$artists)) return false; final Object this$id = this.getId(); final Object other$id = other.getId(); if (this$id == null ? other$id != null : !this$id.equals(other$id)) return false; final Object this$name = this.getName(); final Object other$name = other.getName(); if (this$name == null ? other$name != null : !this$name.equals(other$name)) return false; if (this.getPopularity() != other.getPopularity()) return false; if (this.getTotalTracks() != other.getTotalTracks()) return false; final Object this$tracksContainer = this.getTracksContainer(); final Object other$tracksContainer = other.getTracksContainer(); if (this$tracksContainer == null ? other$tracksContainer != null : !this$tracksContainer.equals(other$tracksContainer)) return false; return true; } protected boolean canEqual(final Object other) { return other instanceof SpotifyAlbumItem; } public int hashCode() { final int PRIME = 59; int result = 1; final Object $albumType = this.getAlbumType(); result = result * PRIME + ($albumType == null ? 43 : $albumType.hashCode()); final Object $artists = this.getArtists(); result = result * PRIME + ($artists == null ? 43 : $artists.hashCode()); final Object $id = this.getId(); result = result * PRIME + ($id == null ? 43 : $id.hashCode()); final Object $name = this.getName(); result = result * PRIME + ($name == null ? 43 : $name.hashCode()); result = result * PRIME + this.getPopularity(); result = result * PRIME + this.getTotalTracks(); final Object $tracksContainer = this.getTracksContainer(); result = result * PRIME + ($tracksContainer == null ? 43 : $tracksContainer.hashCode()); return result; } public String toString() { return "SpotifyAlbumItem(albumType=" + this.getAlbumType() + ", artists=" + this.getArtists() + ", id=" + this.getId() + ", name=" + this.getName() + ", popularity=" + this.getPopularity() + ", totalTracks=" + this.getTotalTracks() + ", tracksContainer=" + this.getTracksContainer() + ")"; } }
--- title: background-attachment slug: Web/CSS/background-attachment tags: - CSS - CSS Background - CSS Property - Reference translation_of: Web/CSS/background-attachment --- <div><section class="Quick_links" id="Quick_Links"><ol><li><strong><a href="/ru/docs/Web/CSS">CSS</a></strong></li><li><strong><a href="/ru/docs/Web/CSS/Reference">CSS документация</a></strong></li><li><strong><a href="/ru/docs/Web/CSS/CSS_Backgrounds_and_Borders">CSS Backgrounds and Borders</a></strong></li><li class="toggle"><details open><summary>Руководства</summary><ol><li><a href="/ru/docs/Web/CSS/CSS_Backgrounds_and_Borders/Resizing_background_images">Resizing background images with background-size</a> <a href="/ru/docs/Web/CSS/CSS_Backgrounds_and_Borders/Resizing_background_images$translate" style="opacity: 0.5;">[Перевести]</a></li><li><a href="/ru/docs/Web/CSS/CSS_Backgrounds_and_Borders/Using_multiple_backgrounds">Using multiple backgrounds</a> <a href="/ru/docs/Web/CSS/CSS_Backgrounds_and_Borders/Using_multiple_backgrounds$translate" style="opacity: 0.5;">[Перевести]</a></li></ol></details></li><li class="toggle"><details open><summary>Свойства</summary><ol><li><a href="/ru/docs/Web/CSS/background"><code>background</code></a></li><li><em><code>background-attachment</code></em></li><li><a href="/ru/docs/Web/CSS/background-clip"><code>background-clip</code></a></li><li><a href="/ru/docs/Web/CSS/background-color"><code>background-color</code></a></li><li><a href="/ru/docs/Web/CSS/background-image"><code>background-image</code></a></li><li><a href="/ru/docs/Web/CSS/background-origin"><code>background-origin</code></a></li><li><a href="/ru/docs/Web/CSS/background-position"><code>background-position</code></a></li><li><span class="sidebar-icon"><span class="icon-only-inline" title="Это экспериментальное API, которое не должно использоваться в рабочем коде."><i class="icon-beaker"> </i></span></span><a href="/ru/docs/Web/CSS/background-position-x"><code>background-position-x</code></a></li><li><span class="sidebar-icon"><span class="icon-only-inline" title="Это экспериментальное API, которое не должно использоваться в рабочем коде."><i class="icon-beaker"> </i></span></span><a href="/ru/docs/Web/CSS/background-position-y"><code>background-position-y</code></a> <a href="/ru/docs/Web/CSS/background-position-y$translate" style="opacity: 0.5;">[Перевести]</a></li><li><a href="/ru/docs/Web/CSS/background-repeat"><code>background-repeat</code></a></li><li><a href="/ru/docs/Web/CSS/background-size"><code>background-size</code></a></li><li><a href="/ru/docs/Web/CSS/border"><code>border</code></a></li><li><a href="/ru/docs/Web/CSS/border-bottom"><code>border-bottom</code></a></li><li><a href="/ru/docs/Web/CSS/border-bottom-color"><code>border-bottom-color</code></a> <a href="/ru/docs/Web/CSS/border-bottom-color$translate" style="opacity: 0.5;">[Перевести]</a></li><li><a href="/ru/docs/Web/CSS/border-bottom-left-radius"><code>border-bottom-left-radius</code></a> <a href="/ru/docs/Web/CSS/border-bottom-left-radius$translate" style="opacity: 0.5;">[Перевести]</a></li><li><a href="/ru/docs/Web/CSS/border-bottom-right-radius"><code>border-bottom-right-radius</code></a> <a href="/ru/docs/Web/CSS/border-bottom-right-radius$translate" style="opacity: 0.5;">[Перевести]</a></li><li><a href="/ru/docs/Web/CSS/border-bottom-style"><code>border-bottom-style</code></a> <a href="/ru/docs/Web/CSS/border-bottom-style$translate" style="opacity: 0.5;">[Перевести]</a></li><li><a href="/ru/docs/Web/CSS/border-bottom-width"><code>border-bottom-width</code></a> <a href="/ru/docs/Web/CSS/border-bottom-width$translate" style="opacity: 0.5;">[Перевести]</a></li><li><a href="/ru/docs/Web/CSS/border-color"><code>border-color</code></a> <a href="/ru/docs/Web/CSS/border-color$translate" style="opacity: 0.5;">[Перевести]</a></li><li><a href="/ru/docs/Web/CSS/border-image"><code>border-image</code></a> <a href="/ru/docs/Web/CSS/border-image$translate" style="opacity: 0.5;">[Перевести]</a></li><li><a href="/ru/docs/Web/CSS/border-image-outset"><code>border-image-outset</code></a></li><li><a href="/ru/docs/Web/CSS/border-image-repeat"><code>border-image-repeat</code></a></li><li><a href="/ru/docs/Web/CSS/border-image-slice"><code>border-image-slice</code></a></li><li><a href="/ru/docs/Web/CSS/border-image-source"><code>border-image-source</code></a></li><li><a href="/ru/docs/Web/CSS/border-image-width"><code>border-image-width</code></a></li><li><a href="/ru/docs/Web/CSS/border-left"><code>border-left</code></a> <a href="/ru/docs/Web/CSS/border-left$translate" style="opacity: 0.5;">[Перевести]</a></li><li><a href="/ru/docs/Web/CSS/border-left-color"><code>border-left-color</code></a> <a href="/ru/docs/Web/CSS/border-left-color$translate" style="opacity: 0.5;">[Перевести]</a></li><li><a href="/ru/docs/Web/CSS/border-left-style"><code>border-left-style</code></a> <a href="/ru/docs/Web/CSS/border-left-style$translate" style="opacity: 0.5;">[Перевести]</a></li><li><a href="/ru/docs/Web/CSS/border-left-width"><code>border-left-width</code></a> <a href="/ru/docs/Web/CSS/border-left-width$translate" style="opacity: 0.5;">[Перевести]</a></li><li><a href="/ru/docs/Web/CSS/border-radius"><code>border-radius</code></a></li><li><a href="/ru/docs/Web/CSS/border-right"><code>border-right</code></a> <a href="/ru/docs/Web/CSS/border-right$translate" style="opacity: 0.5;">[Перевести]</a></li><li><a href="/ru/docs/Web/CSS/border-right-color"><code>border-right-color</code></a> <a href="/ru/docs/Web/CSS/border-right-color$translate" style="opacity: 0.5;">[Перевести]</a></li><li><a href="/ru/docs/Web/CSS/border-right-style"><code>border-right-style</code></a> <a href="/ru/docs/Web/CSS/border-right-style$translate" style="opacity: 0.5;">[Перевести]</a></li><li><a href="/ru/docs/Web/CSS/border-right-width"><code>border-right-width</code></a> <a href="/ru/docs/Web/CSS/border-right-width$translate" style="opacity: 0.5;">[Перевести]</a></li><li><a href="/ru/docs/Web/CSS/border-style"><code>border-style</code></a> <a href="/ru/docs/Web/CSS/border-style$translate" style="opacity: 0.5;">[Перевести]</a></li><li><a href="/ru/docs/Web/CSS/border-top"><code>border-top</code></a> <a href="/ru/docs/Web/CSS/border-top$translate" style="opacity: 0.5;">[Перевести]</a></li><li><a href="/ru/docs/Web/CSS/border-top-color"><code>border-top-color</code></a> <a href="/ru/docs/Web/CSS/border-top-color$translate" style="opacity: 0.5;">[Перевести]</a></li><li><a href="/ru/docs/Web/CSS/border-top-left-radius"><code>border-top-left-radius</code></a> <a href="/ru/docs/Web/CSS/border-top-left-radius$translate" style="opacity: 0.5;">[Перевести]</a></li><li><a href="/ru/docs/Web/CSS/border-top-right-radius"><code>border-top-right-radius</code></a> <a href="/ru/docs/Web/CSS/border-top-right-radius$translate" style="opacity: 0.5;">[Перевести]</a></li><li><a href="/ru/docs/Web/CSS/border-top-style"><code>border-top-style</code></a> <a href="/ru/docs/Web/CSS/border-top-style$translate" style="opacity: 0.5;">[Перевести]</a></li><li><a href="/ru/docs/Web/CSS/border-top-width"><code>border-top-width</code></a> <a href="/ru/docs/Web/CSS/border-top-width$translate" style="opacity: 0.5;">[Перевести]</a></li><li><a href="/ru/docs/Web/CSS/border-width"><code>border-width</code></a></li><li><a href="/ru/docs/Web/CSS/box-shadow"><code>box-shadow</code></a></li></ol></details></li></ol></section></div> <h2 id="Summary" name="Summary">Краткое описание</h2> <p>Если указано <a href="/ru/docs/Web/CSS/background-image" title="Свойство CSS  background-image устанавливает одно или несколько фоновых изображений для элемента. Изображения рисуются в слоях контекстов наложения одно поверх другого. Первый слой выводится так, чтобы он был ближе всего к пользователю."><code>background-image</code></a>, <a href="/en-US/docs/CSS" title="CSS">CSS</a> свойство <code>background-attachment</code> определяет, является ли позиция этого изображения фиксированной в области просмотра, или прокручивается вместе с содержащим его блоком.</p> <table class="properties"><tbody><tr><th scope="row"><a href="/ru/docs/Web/CSS/initial_value">Начальное значение</a></th><td><code>scroll</code></td></tr><tr><th scope="row">Применяется к</th><td>все элементы. Это также применяется к <a href="/ru/docs/Web/CSS/::first-letter"><code>::first-letter</code></a> и <a href="/ru/docs/Web/CSS/::first-line"><code>::first-line</code></a>.</td></tr><tr><th scope="row"><a href="/ru/docs/Web/CSS/inheritance">Наследуется</a></th><td>нет</td></tr><tr><th scope="row"><a href="/ru/docs/Web/CSS/computed_value">Обработка значения</a></th><td>как указано</td></tr><tr><th scope="row">Animation type</th><td>discrete</td></tr></tbody></table> <h2 id="Syntax" name="Syntax">Синтаксис</h2> <pre class="brush: css notranslate">/* Ключевые значения */ background-attachment: scroll; background-attachment: fixed; background-attachment: local; /* Глобальные значения */ background-attachment: inherit; background-attachment: initial; background-attachment: unset;</pre> <p>Значение свойства <code>background-attachment</code> задается одним из ключевых значений из списка ниже.</p> <h3 id="Значения">Значения</h3> <dl> <dt><code>fixed</code></dt> <dd>Фон фиксируется относительно области просмотра. Даже если элемент имеет механизм прокрутки, фон не перемещается вместе с элементом. (Это значение несовместимо с <a href="/ru/docs/Web/CSS/background-clip#text"><code>background-clip: text</code></a>.)</dd> <dt><code>local</code></dt> <dd>Фон фиксируется относительно содержимого элемента. Если элемент имеет механизм прокрутки, фон прокручивается с содержимым элемента, и область рисования фона и область позиционирования фона относятся к прокручиваемой области элемента, а не к границе, обрамляющей их.</dd> <dt><code>scroll</code></dt> <dd>Фон фиксируется относительно самого элемента и не прокручивается с его содержимым. (Он фактически прикреплен к границе элемента.)</dd> </dl> <h3 id="Формальный_синтаксис">Формальный синтаксис</h3> <pre class="syntaxbox notranslate"><a href="#attachment">&lt;attachment&gt;</a><a href="/ru/docs/CSS/Value_definition_syntax#Hash_mark_()" title="Hash mark: the entity is repeated one or several times, each occurence separated by a comma">#</a><p style="font-family: Open Sans,Arial,sans-serif; margin: 10px 0 0 0;">где <br><code style="font-family: Consolas,Monaco,&quot;Andale Mono&quot;,monospace;"><span id="attachment">&lt;attachment&gt;</span> = scroll <a href="/ru/docs/CSS/Value_definition_syntax#Single_bar" title="Single bar: exactly one of the entities must be present">|</a> fixed <a href="/ru/docs/CSS/Value_definition_syntax#Single_bar" title="Single bar: exactly one of the entities must be present">|</a> local</code></p> </pre> <h2 id="Examples" name="Examples">Примеры</h2> <h3 id="Простой_пример">Простой пример</h3> <h4 id="HTML">HTML</h4> <pre class="brush: html notranslate">&lt;p&gt; There were doors all round the hall, but they were all locked; and when Alice had been all the way down one side and up the other, trying every door, she walked sadly down the middle, wondering how she was ever to get out again. &lt;/p&gt;</pre> <h4 id="CSS">CSS</h4> <pre class="brush:css; highlight:[3]; notranslate">p { background-image: url(&quot;https://mdn.mozillademos.org/files/12057/starsolid.gif&quot;); background-attachment: fixed; } </pre> <h4 id="Результат">Результат</h4> <p><iframe class="live-sample-frame sample-code-frame" frameborder="0" id="frame_Простой_пример" src="https://mdn.mozillademos.org/ru/docs/Web/CSS/background-attachment$samples/%D0%9F%D1%80%D0%BE%D1%81%D1%82%D0%BE%D0%B9_%D0%BF%D1%80%D0%B8%D0%BC%D0%B5%D1%80?revision=1648727"></iframe></p> <h3 id="Поддержка_нескольких_фоновых_изображений">Поддержка нескольких фоновых изображений</h3> <p>Это свойство поддерживает несколько фоновых изображений. Вы можете указать разные <code>&lt;attachment&gt;</code> для каждого фона, разделенных запятыми. Каждое изображение сопоставляется с соответствующим типом вложения, от первого указанного до последнего.</p> <h4 id="HTML_2">HTML</h4> <pre class="brush: html notranslate">&lt;p&gt; There were doors all round the hall, but they were all locked; and when Alice had been all the way down one side and up the other, trying every door, she walked sadly down the middle, wondering how she was ever to get out again. Suddenly she came upon a little three-legged table, all made of solid glass; there was nothing on it except a tiny golden key, and Alice&apos;s first thought was that it might belong to one of the doors of the hall; but, alas! either the locks were too large, or the key was too small, but at any rate it would not open any of them. However, on the second time round, she came upon a low curtain she had not noticed before, and behind it was a little door about fifteen inches high: she tried the little golden key in the lock, and to her great delight it fitted! &lt;/p&gt;</pre> <h4 id="CSS_2">CSS</h4> <pre class="brush:css; highlight:[3]; notranslate">p { background-image: url(&quot;https://mdn.mozillademos.org/files/12057/starsolid.gif&quot;), url(&quot;https://mdn.mozillademos.org/files/12059/startransparent.gif&quot;); background-attachment: fixed, scroll; background-repeat: no-repeat, repeat-y; }</pre> <h4 id="Результат_2">Результат</h4> <p><iframe class="live-sample-frame sample-code-frame" frameborder="0" id="frame_Поддержка_нескольких_фоновых_изображений" src="https://mdn.mozillademos.org/ru/docs/Web/CSS/background-attachment$samples/%D0%9F%D0%BE%D0%B4%D0%B4%D0%B5%D1%80%D0%B6%D0%BA%D0%B0_%D0%BD%D0%B5%D1%81%D0%BA%D0%BE%D0%BB%D1%8C%D0%BA%D0%B8%D1%85_%D1%84%D0%BE%D0%BD%D0%BE%D0%B2%D1%8B%D1%85_%D0%B8%D0%B7%D0%BE%D0%B1%D1%80%D0%B0%D0%B6%D0%B5%D0%BD%D0%B8%D0%B9?revision=1648727"></iframe></p> <h2 id="Specifications" name="Specifications">Спецификации</h2> <table class="standard-table"> <thead> <tr> <th scope="col">Спецификация</th> <th scope="col">Статус</th> <th scope="col">Комментарий</th> </tr> </thead> <tbody> <tr> <td><a class="external" href="https://drafts.csswg.org/css-backgrounds-3/#the-background-attachment" hreflang="en" lang="en">CSS Backgrounds and Borders Module Level 3<br><small lang="ru">Определение &apos;background-attachment&apos; в этой спецификации.</small></a></td> <td><span class="spec-CR">Кандидат в рекомендации</span></td> <td>Сокращенное свойство было расширено для поддержки нескольких фонов и значения <code>local</code>.</td> </tr> <tr> <td><a class="external" href="https://www.w3.org/TR/CSS2/colors.html#propdef-background-attachment" hreflang="en" lang="en">CSS Level 2 (Revision 1)<br><small lang="ru">Определение &apos;background-attachment&apos; в этой спецификации.</small></a></td> <td><span class="spec-REC">Рекомендация</span></td> <td>Никаких существенных изменений.</td> </tr> <tr> <td><a class="external" href="https://www.w3.org/TR/CSS1/#background-attachment" hreflang="en" lang="en">CSS Level 1<br><small lang="ru">Определение &apos;background-attachment&apos; в этой спецификации.</small></a></td> <td><span class="spec-REC">Рекомендация</span></td> <td></td> </tr> </tbody> </table> <h2 id="Browser_compatibility" name="Browser_compatibility">Совместимость браузеров</h2> <div class="warning notecard"><strong><a href="https://github.com/mdn/browser-compat-data">We&apos;re converting our compatibility data into a machine-readable JSON format</a></strong>. This compatibility table still uses the old format, because we haven&apos;t yet converted the data it contains. <strong><a href="/ru/docs/MDN/Contribute/Structures/Compatibility_tables">Find out how you can help!</a></strong></div> <div class="htab"> <a id="AutoCompatibilityTable" name="AutoCompatibilityTable"></a> <ul> <li class="selected"><a>Настольные</a></li> <li><a>Мобильные</a></li> </ul> </div> <div id="compat-desktop"> <table class="compat-table"> <tbody> <tr> <th>Feature</th> <th>Chrome</th> <th>Firefox (Gecko)</th> <th>Internet Explorer</th> <th>Opera</th> <th>Safari</th> </tr> <tr> <td>Basic support</td> <td>1.0</td> <td>1.0 (1.7 или ранее)</td> <td>4.0</td> <td>3.5</td> <td>1.0</td> </tr> <tr> <td>Multiple backgrounds</td> <td>1.0</td> <td><a href="/en-US/Firefox/Releases/3.6">3.6</a> (1.9.2)</td> <td>9.0</td> <td>10.5</td> <td>1.3</td> </tr> <tr> <td><code>local</code></td> <td>4.0</td> <td><span style="color: #f00;">Нет</span></td> <td>9.0</td> <td>10.5</td> <td>5.0</td> </tr> </tbody> </table> </div> <div id="compat-mobile"> <table class="compat-table"> <tbody> <tr> <th>Feature</th> <th>Android</th> <th>Firefox Mobile (Gecko)</th> <th>IE Phone</th> <th>Opera Mobile</th> <th>Safari Mobile</th> </tr> <tr> <td>Basic support</td> <td>2.1</td> <td>1.0 (1.9.2)</td> <td><span style="color: rgb(255, 153, 0);" title="Совместимость неизвестна; пожалуйста, обновите информацию.">?</span></td> <td>10.0</td> <td>3.2</td> </tr> <tr> <td>Multiple backgrounds</td> <td>2.1</td> <td>1.0 (1.9.2)</td> <td><span style="color: rgb(255, 153, 0);" title="Совместимость неизвестна; пожалуйста, обновите информацию.">?</span></td> <td>10.0</td> <td>3.2</td> </tr> <tr> <td><code>local</code></td> <td><span style="color: rgb(255, 153, 0);" title="Совместимость неизвестна; пожалуйста, обновите информацию.">?</span></td> <td><span style="color: #f00;">Нет</span></td> <td><span style="color: rgb(255, 153, 0);" title="Совместимость неизвестна; пожалуйста, обновите информацию.">?</span></td> <td><span style="color: rgb(255, 153, 0);" title="Совместимость неизвестна; пожалуйста, обновите информацию.">?</span></td> <td><span style="color: rgb(255, 153, 0);" title="Совместимость неизвестна; пожалуйста, обновите информацию.">?</span></td> </tr> </tbody> </table> </div> <h2 id="See_also" name="See_also">Смотрите также</h2> <ul> <li><a href="/en-US/docs/CSS/Multiple_backgrounds">Несколько фонов</a></li> </ul>
#! /usr/bin/env python import rclpy from rclpy.node import Node from hrwros_msgs.msg import BoxHeightInformation from hrwros_msgs.srv import ConvertMetresToFeet class BoxHeightFeetConverter(Node): def __init__(self): super().__init__('box_height_in_feet') # First wait for the service to become available - Part2. self.get_logger().info("Waiting for subscription...") # Create a subscriber to the box height topic - Part1. self.subscription = self.create_subscription( BoxHeightInformation, 'box_height_info', self.box_height_info_callback, 10) # Create a client for the service to convert metres to feet - Part2. self.metres_to_feet_client = self.create_client(ConvertMetresToFeet, 'metres_to_feet') def box_height_info_callback(self, data): try: # Wait for the service to be available while not self.metres_to_feet_client.wait_for_service(timeout_sec=1.0): self.get_logger().info('service not available, waiting again...') # Create a request object request = ConvertMetresToFeet.Request() # Set request data here request.distance_metres = data.box_height # Call the service here. self.future = self.metres_to_feet_client.call_async(request) self.future.add_done_callback(self.future_callback) except Exception as e: self.get_logger().info("Service call failed: %s" % e) def future_callback(self, future): try: response = future.result() self.get_logger().info(('Height of box %0.3f Feet' % response.distance_feet)) except Exception as e: self.get_logger().info('Service call failed %r' % (e,)) def main(args=None): rclpy.init(args=args) node = BoxHeightFeetConverter() rclpy.spin(node) # Destroy the node explicitly # (optional - otherwise it will be done automatically # when the garbage collector destroys the node object) node.destroy_node() rclpy.shutdown() if __name__ == '__main__': main()
// Read-up : In selection sort, we iterate through the array from idx-> 0 to n and each time we get the shortest element from the array particularly from idx+1 till the end. // Now instead of getting that smallest elt by iterating through array yet again , we're doing the same using quick sort and partition wherein the index of smlst elt is being returned and then back to Selection sort, we're swapping both of them #include<bits/stdc++.h> using namespace std; using namespace std :: chrono ; int Partition(vector<int>&arr , int low , int high) { int i = low ; int j = high ; int pivot = arr[low]; while (i < j) { while (arr[i] <= pivot && i <= high - 1) { i++; } while (arr[j] > pivot && j >= low + 1) { j--; } if (i < j) swap(arr[i], arr[j]); } swap(arr[low], arr[j]); return j; } int QS(vector<int> &arr , int low , int high , int k=0) { if(low<high) { int j= Partition(arr,low,high); if(j==k) return j; else if (j>k) QS(arr,low,j-1,k); else QS(arr,j+1,high,k); } } void SelectionSort(vector<int> &arr) { int n=arr.size(); for(int i=0 ; i<n ; i++) { int mini = i; int iThSmallest = QS(arr,i,n-1); swap(arr[mini],arr[iThSmallest]); } } void input_generator(vector<pair<int,int>> &store) { for(int i=1e3; i<=1e4 ; i+=500) { vector<int> arr(i); for(int n=0 ; n<i ; n++) arr[n] = rand()%100 ; int t=0; for(int m=1 ; m<=10;m++) { auto start = high_resolution_clock::now(); SelectionSort(arr); auto stop = high_resolution_clock::now(); auto duration = duration_cast<milliseconds>(stop - start); t+=duration.count() ; } t= t/10; store.push_back({i,t}); } } int main() { vector<pair<int,int>> store; input_generator(store); cout << "Number of inputs \tTime taken\n\n"; for(auto i :store) { cout << i.first << " \t" << i.second << endl; } return 0; }
import { Button } from "@/components/ui/button"; import { Clock, Mail, MapPin, Share, User } from "lucide-react"; import Image from "next/image"; import React from "react"; import BookingSection from "./BookingSection"; interface SingleBusinessList { id: string; name: string; contactPerson: string; images: { url: string }[]; email: string; about: string; address: string; category: { name: string }; } interface BusinessInfoProps { business: SingleBusinessList; email: string; name: string; } function BusinessInfo(props: BusinessInfoProps) { return ( props.business?.name && ( <div className="md:flex gap-4 items-center"> <Image src={props.business?.images[0]?.url} alt={props.business.name} width={150} height={200} className="rounded-full h-[150px] object-cover" /> <div className="flex flex-col gap-4 md:flex-row justify-between items-center w-full"> <div className="flex flex-col mt-4 md:mt-0 items-baseline gap-3"> <h2 className="text-primary p-1 px-3 text-lg bg-purple-100 rounded-full" > {props.business?.category?.name} </h2> <h2 className="text-[40px] font-bold">{props.business.name}</h2> <h2 className="flex gap-2 text-lg text-gray-500"> <MapPin /> {props.business.address} </h2> <h2 className="flex gap-2 text-lg text-gray-500"> <Mail /> {props.business?.email} </h2> </div> <div className="flex flex-col gap-5 items-end"> <Button> <Share /> </Button> <h2 className="flex gap-2 text-xl text-primary"> <User /> {props.business.contactPerson}{" "} </h2> <h2 className="flex gap-2 text-lg md:text-xl text-gray-500"> <Clock /> Available 10:00 AM to 6:30 PM </h2> <BookingSection id={props?.business?.id} userEmail={props.email} userName={props.name} > <Button>Book Now</Button> </BookingSection> </div> </div> </div> ) ); } export default BusinessInfo;
import React, { useRef, useState, useContext, Fragment } from 'react' import ProColor from './ProColor'; import { AuthContext } from '../../store/auth'; import styles from './ProductInfo.module.css'; import ProSize from './ProSize'; import { useHttpClient } from '../../hooks/http-hook'; import { useRouter } from 'next/router'; import StatusOverlay from '../Common/UX/StatusOverlay'; import LoadingSpinner from '../Common/UX/LoadingSpinner'; function ProductInfo(props) { const AuthCtx = useContext(AuthContext); const router = useRouter(); const { slang, id, name, colors, size, price, discountPrice, inStock } = props; const { isLoading, error, sendRequest, clearError } = useHttpClient(); const [success, setSuccess] = useState(false); const [activeColor, setActiceColor] = useState(0); const [activeSize, setActiceSize] = useState(0); const [quantity, setQuantity] = useState(1); const quantityRef = useRef(); const buyNowHandler = async () => { if (!AuthCtx.isLoggedIn) { return router.push("/login"); } const productId = id; const selColor = colors[activeColor].color; const selSize = size[activeSize].tag; const ProductQuan = quantity try { await sendRequest('/user/cart', 'POST', JSON.stringify({ proId: productId, proColor: selColor, proSize: selSize, proQuan: ProductQuan, action: "PLUS" }), { 'Content-Type': 'application/json', 'authorization': `Bearer ${AuthCtx.token}` } ) router.push('/user/cart') } catch (err) { } } const addToCartHandler = async () => { if (!AuthCtx.isLoggedIn) { return router.push("/login"); } const productId = id; const selColor = colors[activeColor].color; const selSize = size[activeSize].tag; const ProductQuan = quantity try { await sendRequest('/user/cart', 'POST', JSON.stringify({ proId: productId, proColor: selColor, proSize: selSize, proQuan: ProductQuan, action: "PLUS" }), { 'Content-Type': 'application/json', 'authorization': `Bearer ${AuthCtx.token}` } ) setSuccess(true); } catch (err) { } } const setActiveColorHandler = (id) => { setActiceColor(id) } const clickSizeHandler = (id) => { setActiceSize(id) } const productIncrementHandler = () => { setQuantity(prev => { return prev + 1 }) } const productDecrementHandler = () => { if (quantity === 1) { return 0 } setQuantity(prev => prev - 1) } const quantityInputHandler = () => { setQuantity(Number(quantityRef.current.value)) } return ( <Fragment> {isLoading && <LoadingSpinner />} {error && <StatusOverlay success={false} onClear={clearError} message={error} />} {success && <StatusOverlay success={true} onClear={() => setSuccess(false)} message="Added to Cart" />} <div className={styles.container}> <h1>{name}</h1> <div className={styles.colors}> <div className={styles.subHead}>Colors</div> <div className={styles.colorBoxes}> {colors.map((data, i) => { return ( <ProColor key={data.id} id={i} color={data.color} active={activeColor === i} onClick={setActiveColorHandler} />) })} </div> </div> <div className={styles.size}> <div className={styles.subHead}>Size</div> <div className={styles.sizes}> {size.map((data, i) => { return <ProSize key={data.id} id={i} tag={data.tag} active={activeSize === i} onClick={clickSizeHandler} /> })} </div> </div> <div className={styles.price}> <div>{`$${discountPrice}`}</div> <div>{`$${price}`}</div> </div> <div className={styles.quantity}> <div className={styles.subHead}> Quantity </div> {inStock && <div className={styles.quantityControls}> <button onClick={productDecrementHandler} ><i className="fa-solid fa-minus"></i></button> <input onChange={quantityInputHandler} ref={quantityRef} type="number" value={quantity} min="1" ></input> <button onClick={productIncrementHandler}><i className="fa-solid fa-plus"></i></button> </div> } {!inStock && <p className={styles.unavailableStock}>Stock Unavailable</p> } </div> <div className={styles.socialIcons}> <a href=''><i className="fa-brands fa-facebook"></i></a> <a href=''><i className="fa-brands fa-instagram"></i></a> <a href=''><i className="fa-brands fa-twitter"></i></a> <a href=''><i className="fa-brands fa-whatsapp"></i></a> </div> {inStock && <div className={styles.controls}> <button onClick={buyNowHandler}>Buy Now</button> <button onClick={addToCartHandler}>Add To Cart</button> </div> } </div> </Fragment> ) } export default ProductInfo
--- search: false --- # Converting from Clover to OpenCore So you see the new fancy OpenCore bootloader and just dying to try it out, well you've come to the right place! Many things in Clover have feature parity with OpenCore but many do not, here we'll be going over what you can bring over and what you cannot. To get started, we have some resources that will aid you: * [Config.plist conversion](../clover-conversion/Clover-config.md) * [Kexts and Firmware driver conversion(.kext, .efi)](../clover-conversion/clover-efi.md) * [Boot Argument conversion](../clover-conversion/Clover-boot-arg.md) * [Common Kernel and Kext patch conversions](../clover-conversion/clover-patch.md) ## Cleaning the Clover Junk in macOS So to start, Clover would like to give a big F*** You if you're using emulated NVRAM. Why? Well it likely installed some trash that's a pain in the arse to get rid of. You will need to have SIP disabled to clean it up. Things to check for: * `/Volumes/EFI/EFI/CLOVER/drivers64UEFI/EmuVariableUefi-64.efi` * `/Volumes/EFI/nvram.plist` * `/etc/rc.clover.lib` * `/etc/rc.boot.d/10.save_and_rotate_boot_log.local` * `/etc/rc.boot.d/20.mount_ESP.local` * `/etc/rc.boot.d/70.disable_sleep_proxy_client.local.disabled` * `/etc/rc.shutdown.d/80.save_nvram_plist.local​` If folders are empty then delete them as well: * `/etc/rc.boot.d` * `/etc/rc.shutdown.d​` Users of Clover's Preference Pane will also need to remove these: * `/Library/PreferencePanes/Clover.prefPane` * `/Library/Application\ Support/clover` ## Removing kexts from macOS(S/L/E and L/E) A common tradition with Clover was to install kexts into macOS, specifically System/Library/Extensions and Library/Extensions. Reasoning being that Clover's kext injection system was known to fail either with OS updates or just spontaneously. Thankfully with OpenCore, a much more robust and stable injection mechanism's been made that is far harder to break. So time to do a bit of spring cleaning. **Note**: OpenCore will fail to inject kexts already in your kernelcache so cleaning this out will also resolve those issues Now open up terminal and run the following: ``` sudo kextcache -i / ``` This command will yell at you about any kexts that shouldn't be in either S/L/E or L/E. **Remove all hack kexts**: ``` sudo -s touch /Library/Extensions /System/Library/Extensions​ kextcache -i /​ ``` * **Note**, macOS Catalina will need the `mount -uw /` command to mount the system drive as Read/Write ## Cleaning the Clover Junk in your hardware The other thing that Clover may have hidden from you is NVRAM variables, this is bad as OpenCore won't overwrite variables unless explicitly told via the `Delete` feature found under `NVRAM -> Delete`. To fix this, we'll need to clear then via OpenCore's `ClearNvram` feature. In you config.plist: * `Misc -> Security -> AllowNvramReset -> True` And on your initial boot of OpenCore, select `Reset NVRAM` boot option. This will wipe everything and reboot the system when finished. * Note: Thinkpad laptops are known to be semi-bricked after an NVRAM reset in OpenCore, we recommend resetting NVRAM by updating the BIOS on these machines. ## Optional: Avoiding SMBIOS injection into other OSes By default OpenCore will inject SMBIOS data into all OSes, the reason for this is 2 parts: * This allows for proper multiboot support like with [BootCamp](https://dortania.github.io/OpenCore-Post-Install/multiboot/bootcamp.html) * Avoids edge cases where info is injected several times, commonly seen with Clover However, there are quirks in OpenCore that allow for SMBIOS injection to be macOS limited by patching where macOS reads SMBIOS info from. These quirks can break in the future and so we only recommend this option in the event of certain software breaking in other OSes. For best stability, please avoid To enable macOS-only SMBIOS injection: * Kernel -> Quirks -> CustomSMBIOSGuid -> True * PlatformInfo -> UpdateSMBIOSMode -> Custom
import React, { Component } from 'react' // 从react路由组件库按需引入组件标签 import {Route,Switch,NavLink,Redirect} from 'react-router-dom' // 引入二级子路由 import Message from './Message/Message' import News from './News/News' export default class Home extends Component { render() { return ( <div className="col-xs-6"> <div className="panel"> <div className="panel-body"> <div><h2>Home组件内容</h2> <div> <ul className="nav nav-tabs"> <li> <NavLink className="list-group-item" to="/home/news">News</NavLink> </li> <li> <NavLink className="list-group-item" to="/home/message">Message</NavLink> </li> </ul> {/* 注册二级路由 */} <Switch> <Route path='/home/news' component={News} /> <Route path='/home/message' component={Message} /> <Redirect to='/home/news' /> </Switch> </div> </div> </div> </div> </div> ) } }
package app.trian.rust.persentation.home import android.util.Log import androidx.compose.runtime.snapshots.SnapshotStateList import androidx.lifecycle.viewModelScope import app.trian.rust.common.BaseViewModel import app.trian.rust.common.executeAsFlow import app.trian.rust.data.Response import app.trian.rust.data.dataSource.local.entity.RoomWithMember import app.trian.rust.data.domain.GetListRoomWithMemberUseCase import app.trian.rust.data.domain.GetRoomWithMemberUseCase import dagger.hilt.android.lifecycle.HiltViewModel import kotlinx.coroutines.flow.collect import kotlinx.coroutines.flow.onEach import kotlinx.coroutines.flow.onStart import kotlinx.coroutines.launch import javax.inject.Inject @HiltViewModel class HomeViewModel @Inject constructor( private val getListRoomWithMemberUseCase: GetListRoomWithMemberUseCase, private val getRoomWithMemberUseCase: GetRoomWithMemberUseCase, ) : BaseViewModel<HomeState, HomeStateImpl, HomeAction, HomeEffect>( HomeStateImpl() ) { val listRoom: SnapshotStateList<RoomWithMember> = SnapshotStateList() override fun onAction() { on(HomeAction.GetListRoom::class.java) { viewModelScope.launch { val data = getListRoomWithMemberUseCase() listRoom.clear() listRoom.addAll(data) } } on(HomeAction.OnRoomAdded::class.java) { viewModelScope.launch { executeAsFlow { getRoomWithMemberUseCase(room.roomId) } .onStart { } .onEach { if (it is Response.Result) { val findIndex = listRoom.withIndex() .find { room -> room.value.room.roomId == [email protected] } if (findIndex != null) { listRoom[findIndex.index] = it.data } else { listRoom.add(0, it.data) } } } .collect() } } on(HomeAction.OnRoomRemove::class.java) { val findIndex = listRoom.withIndex().find { room -> room.value.room.roomId == this.room.roomId } if (findIndex != null) { listRoom.removeAt(findIndex.index) } } on(HomeAction.OnRoomChanged::class.java) { viewModelScope.launch { executeAsFlow { getRoomWithMemberUseCase(room.roomId) } .onStart { } .onEach { if (it is Response.Result) { val findIndex = listRoom.withIndex() .find { room -> room.value.room.roomId == [email protected] } if (findIndex != null) { listRoom[findIndex.index] = it.data } else { listRoom.add(0, it.data) } } } .collect() } } } }
/* * Copyright (c) 2006-2009 Nokia Corporation and/or its subsidiary(-ies). * All rights reserved. * This component and the accompanying materials are made available * under the terms of "Eclipse Public License v1.0" * which accompanies this distribution, and is available * at the URL "http://www.eclipse.org/legal/epl-v10.html". * * Initial Contributors: * Nokia Corporation - initial contribution. * * Contributors: * * Description: * Name : sipprofilequeuehandling.h * Part of : sip profile fsm * Interface : Internal IF * Version : 1.0 * */ /** @internalComponent */ #ifndef CSIPPRROFILEQUEUEHANDLING_H #define CSIPPRROFILEQUEUEHANDLING_H //INCLUDES #include "sipprofilequeueitem.h" #include <e32base.h> // FORWARD DECLARATIONS class MSIPProfileFSMUser; class CSIPConcreteProfile; // CLASS DECLARATION /** * A class for maintaining the queue handling for profiles which * are pending for registration or deregistration. * Class stores the arrays of profiles. * * @lib sipprofilefsm.lib */ class CSIPProfileQueueHandling : public CBase { public: /** * Two-phased constructor. * @param aUser user for the state machine */ IMPORT_C static CSIPProfileQueueHandling* NewL( MSIPProfileFSMUser& aUser ); /** * Two-phased constructor. * @param aUser user for the state machine */ IMPORT_C static CSIPProfileQueueHandling* NewLC( MSIPProfileFSMUser& aUser ); /** * Destructor. */ IMPORT_C ~CSIPProfileQueueHandling(); public: // New functions IMPORT_C TBool AddRegisterToQueueL( CSIPConcreteProfile& aSIPConcreteProfile, TBool aRetryRegistration ); IMPORT_C TBool AddDeregisterToQueueL( CSIPConcreteProfile& aSIPConcreteProfile ); /** * After final response is received, queue of profiles * which are waiting deregistration is checked. * If there is found profile with the same registrar as * input profile aSIPConcreteprofile has, that profile is removed * from queue and registration is continued with that profile. */ IMPORT_C void RegisterFromQueueL( CSIPConcreteProfile& aSIPConcreteProfile ); /** * Check if queue has profile which has * SecurityNegotiationEnabled and does not has HttpDigestSettings * configured. * return ETrue in that case. */ IMPORT_C TBool FoundIPSecProfileCandidate(); /** * Check if queue has profile which has * SecurityNegotiationEnabled and does not has HttpDigestSettings * configured.If profile found, it will be removed from queue * and will be returned.Input value aRegistering get value EFalse if profile * found from deregistering queue, otherwise value is ETrue; * Return NULL and ETrue, if no profile found. * return . */ IMPORT_C CSIPConcreteProfile* IPSecProfileCandidate(TBool& aRegistering); /** * Check if queue has profile which is waiting for allowed network. * A profile may be configured so that it can be used only in home network * or used in roaming state only if user has explicitly enabled it. * @param aOnHomeNetwork ETrue if phone is currently on home nw, EFalse * if phone is roaming * @return profile to be registered or NULL */ IMPORT_C CSIPConcreteProfile* WaitingForAllowedNetworkProfile( TBool aOnHomeNetwork ); IMPORT_C CSIPConcreteProfile* RemoveProfileFromRegQueueL( CSIPConcreteProfile& aSIPConcreteProfile); /** * Check if both (registration and deregistration) queues are empty. * @return ETrue if both queues are empty */ IMPORT_C TBool IsEmpty() const; /* * Cleanup of queue, profiles which have been changed from auto * registration mode to when needed or which don't have any users * are removed. */ IMPORT_C void Cleanup( CSIPConcreteProfile& aOldProfile, CSIPConcreteProfile* aNewProfile ); /** * Check if profile is in queue * Return ETrue, if profile found. */ IMPORT_C TBool IsInQueue( CSIPConcreteProfile& aSIPConcreteProfile); /** * Checks whether registration is allowed in current network * for the given profile * @param aSIPConcreteProfile the profile to be checked * @param aOnHomeNetwork ETrue if the phone is currently on home network, * EFalse if the phone is roaming * @return ETrue if registration is allowed */ IMPORT_C TBool IsRegistrationAllowed( CSIPConcreteProfile& aSIPConcreteProfile, TBool aOnHomeNetwork); public: // New functions /** * Checks if registration/deregistration should be put * in que to wait, because there is registration/deregistration * with same registrar address pending. * @return ETrue if should be put into que to wait. */ TBool AddIntoQueue( CSIPConcreteProfile& aSIPConcreteProfile ); /** * aSIPConcreteProfile is added into queue , * until there is no more registration/deregistration * with same registration address pending final response. * @return ETrue if profile was added */ TBool AddIntoDeregisterQueueL( CSIPConcreteProfile& aSIPConcreteProfile ); /** * aSIPConcreteProfile is added into queue , * until there is no more registration/deregistration * with same registration address pending final response. * @return ETrue if profile was added */ TBool AddIntoRegisterQueueL( CSIPConcreteProfile& aSIPConcreteProfile, TBool aRetryRegistration ); /** * After final response is received, queue of profiles * which are waiting deregistration is checked. * If there is found profile with the same registrar as * input profile aSIPConcreteprofile has, that profile is removed * from queue and deregistration is continued with that profile. */ TBool CheckDeregisterQueueL( CSIPConcreteProfile& aSIPConcreteProfile ); /** * After final response is received, queue of profiles * which are waiting registration is checked. * If there is found profile with the same registrar as * input profile aSIPConcreteprofile has, that profile is removed * from queue and registration is continued with that profile. */ TBool CheckRegisterQueueL( CSIPConcreteProfile& aSIPConcreteProfile ); /** * If from aProfileArray is found a profile with the same * registrar as input profile aSIPConcreteprofile has, * that profile is removed from queue and returned. */ TInt FindIndex( CSIPConcreteProfile& aSIPConcreteProfile, RArray<TSIPProfileQueueItem>& aProfileArray ); private: // New functions TBool AddToQueueNoDuplicatesL( RArray<TSIPProfileQueueItem>& aProfileArray, TSIPProfileQueueItem& aProfileItem ); void QueueCleanup( RArray<TSIPProfileQueueItem>& aProfileArray, CSIPConcreteProfile& aOldProfile, CSIPConcreteProfile* aNewProfile ); /** * Constructor */ CSIPProfileQueueHandling( MSIPProfileFSMUser& aUser ); void ConstructL(); private: //data MSIPProfileFSMUser& iUser; RArray<TSIPProfileQueueItem> iRegisteringQueue; RArray<TSIPProfileQueueItem> iDeregisteringQueue; private: // For testing purposes #ifdef CPPUNIT_TEST friend class CSIPProfileQueueHandlingTest; #endif }; #endif // CSIPPRROFILEQUEUEHANDLING_H
# INSERTIONS INSERT INTO company(company_name, location) VALUES ('Hexaware', 'Chennai'), ('Google', 'Pune'), ('Microsoft', 'Mumbai'), ('Amazon', 'Banglore'), ('Adobe', 'Mumbai'), ('Uber', 'Mumbai'), ('TCS', 'Banglore'), ('Wipro', 'Banglore'), ('JP Morgan', 'Hyderabad'), ('IBM', 'Banglore'); INSERT INTO applicants(first_name, last_name, email, phone, resume) VALUES ('Harry', 'Potter', '[email protected]', '90909090', 'harry_resume.pdf'), ('Ronald', 'Weasley', '[email protected]', '90909080', 'ronald_resume.pdf'), ('Hermione', 'Granger', '[email protected]', '90909070', 'hermione_resume.pdf'), ('Draco', 'Malfoy', '[email protected]', '90909060', 'draco_resume.pdf'), ('Neville', 'Longbottom', '[email protected]', '90909050', 'neville_resume.pdf'), ('Ginny', 'Weasley', '[email protected]', '90909040', 'ginny_resume.pdf'); INSERT INTO jobs(job_title, job_description, job_location, salary, job_type, posted_date, company_id) VALUES ('Software Engineer', 'Develop software applications', 'Banglore', 900000, 'Full-time', '2024-02-01', 3), ('Data Analyst', 'Analyze huge data', 'Mumbai', 700000, 'Part-time', '2024-02-09', 4), ('Frontend Developer', 'Design and develop UI/UX', 'Pune', 800000, 'Full-time', '2024-01-09', 1), ('DevOps Engineer', 'Develop CI/CD solutions', 'Hyderabad', 1500000, 'Full-time', '2024-02-19', 2), ('ML Engineer', 'Develop ML applications', 'Chennai', 1000000, 'Full-time', '2023-12-12', 9), ('Business Analyst', 'Plans business solutions', 'Banglore', 800000, 'Full-time', '2023-11-29', 10), ('Backend Developer', 'Develop backend queries', 'Pune', 950000, 'Full-time', '2024-03-01', 5), ('Blockchain Engineer', 'Develop CI/CD solutions', 'Banglore', 1300000, 'Full-time', '2024-02-28', 6); INSERT INTO jobs(job_title, job_description, job_location, salary, job_type, posted_date, company_id) VALUES ('Software Developer', 'Develop Software Products', 'Chennai', 0, 'Internship', '2024-01-02', 7); INSERT INTO applications (jobs_id, applicants_id, application_date, cover_letter) VALUES (4, 3, '2024-02-01', 'cover_letter.pdf'), (1, 1, '2024-03-21', 'my_cover_letter.pdf'), (5, 4, '2024-01-17', 'cover_letter1.pdf'), (8, 5, '2024-12-18', 'company_cover_letter.pdf'), (6, 2, '2024-01-27', 'cover_letter.pdf'), (4, 6, '2024-02-10', '1cover_letter.pdf'); 5. Write an SQL query to count the number of applications received for each job listing in the "Jobs" table. Display the job title and the corresponding application count. Ensure that it lists all jobs, even if they have no applications. => select j.job_title, COUNT(ap.applicants_id) as num_of_applications from applications ap JOIN applicants a ON ap.applicants_id=a.id RIGHT JOIN jobs j ON j.id=ap.jobs_id group by j.job_title; +---------------------+---------------------+ | job_title | num_of_applications | +---------------------+---------------------+ | AI Engineer | 0 | | Backend Developer | 0 | | Blockchain Engineer | 1 | | Business Analyst | 1 | | Data Analyst | 0 | | DevOps Engineer | 2 | | Frontend Developer | 0 | | Gen. AI Engineer | 0 | | ML Engineer | 1 | | Software Developer | 0 | | Software Engineer | 1 | +---------------------+---------------------+ 6. Develop an SQL query that retrieves job listings from the "Jobs" table within a specified salary range. Allow parameters for the minimum and maximum salary values. Display the job title, company name, location, and salary for each matching job. => select c.company_name, j.job_title, j.job_location, j.salary from jobs j JOIN company c ON j.company_id=c.id WHERE salary between 800000 AND 1200000; +--------------+--------------------+--------------+---------+ | company_name | job_title | job_location | salary | +--------------+--------------------+--------------+---------+ | Hexaware | Frontend Developer | Pune | 800000 | | Google | Gen. AI Engineer | Chennai | 900000 | | Microsoft | Software Engineer | Banglore | 900000 | | Adobe | Backend Developer | Pune | 950000 | | JP Morgan | ML Engineer | Chennai | 1000000 | | IBM | Business Analyst | Banglore | 800000 | +--------------+--------------------+--------------+---------+ 7. Write an SQL query that retrieves the job application history for a specific applicant. Allow a parameter for the ApplicantID, and return a result set with the job titles, company names, and application dates for all the jobs the applicant has applied to. => select c.company_name, j.job_title, ap.application_date from applications ap JOIN applicants a ON a.id=ap.applicants_id JOIN jobs j ON ap.jobs_id=j.id JOIN company c ON j.company_id=c.id where a.first_name='Harry' AND a.last_name='Potter'; +--------------+-------------------+------------------+ | company_name | job_title | application_date | +--------------+-------------------+------------------+ | Microsoft | Software Engineer | 2024-03-21 | +--------------+-------------------+------------------+ 8. Create an SQL query that calculates and displays the average salary offered by all companies for job listings in the "Jobs" table. Ensure that the query filters out jobs with a salary of zero. => select c.company_name, AVG(j.salary) from jobs j JOIN company c ON c.id=j.company_id group by c.company_name having AVG(j.salary>0); +--------------+---------------+ | company_name | AVG(j.salary) | +--------------+---------------+ | Adobe | 950000 | | Amazon | 700000 | | Google | 1300000 | | Hexaware | 800000 | | IBM | 800000 | | JP Morgan | 1000000 | | Microsoft | 900000 | | Uber | 1300000 | +--------------+---------------+ 9. Write an SQL query to identify the company that has posted the most job listings. Display the company name along with the count of job listings they have posted. Handle ties if multiple companies have the same maximum count. => select c.company_name, COUNT(c.company_name) from jobs j JOIN company c ON j.company_id=c.id group by c.company_name order by COUNT(c.company_name) DESC limit 1; => select c.company_name, COUNT(c.company_name) as job_posts from jobs j JOIN company c ON j.company_id=c.id group by c.company_name HAVING COUNT(c.company_name) = (select COUNT(c.company_name) from jobs j JOIN company c ON j.company_id=c.id group by c.company_name ORDER BY COUNT(c.company_name) DESC LIMIT 1); +--------------+-----------------------+ | company_name | COUNT(c.company_name) | +--------------+-----------------------+ | Google | 3 | +--------------+-----------------------+ 10. Find the applicants who have applied for positions in companies located in 'CityX' and have at least 3 years of experience. => select a.first_name, a.last_name, c.company_name from applications ap JOIN applicants a ON a.id=ap.applicants_id JOIN jobs j ON j.id=ap.jobs_id JOIN company c ON c.id=j.company_id where c.location='Mumbai'; +------------+------------+--------------+ | first_name | last_name | company_name | +------------+------------+--------------+ | Harry | Potter | Microsoft | | Neville | Longbottom | Uber | +------------+------------+--------------+ 11. Retrieve a list of distinct job titles with salaries between $60,000 and $80,000. (values according to my database)... => select distinct job_title, salary from jobs where salary between 900000 and 1300000; +---------------------+---------+ | job_title | salary | +---------------------+---------+ | Software Engineer | 900000 | | ML Engineer | 1000000 | | Backend Developer | 950000 | | Blockchain Engineer | 1300000 | | Gen. AI Engineer | 900000 | +---------------------+---------+ 12. Find the jobs that have not received any applications. => select * from jobs where id not in (select jobs_id from applications); +----+--------------------+---------------------------+--------------+---------+------------+-------------+------------+ | id | job_title | job_description | job_location | salary | job_type | posted_date | company_id | +----+--------------------+---------------------------+--------------+---------+------------+-------------+------------+ | 2 | Data Analyst | Analyze huge data | Mumbai | 700000 | Part-time | 2024-02-09 | 4 | | 3 | Frontend Developer | Design and develop UI/UX | Pune | 800000 | Full-time | 2024-01-09 | 1 | | 7 | Backend Developer | Develop backend queries | Pune | 950000 | Full-time | 2024-03-01 | 5 | | 9 | AI Engineer | Develop AI applications | Banglore | 1500000 | Full-time | 2024-02-02 | 2 | | 10 | Gen. AI Engineer | Develop Gen AI solutions | Chennai | 900000 | Full-time | 2024-12-02 | 2 | | 11 | Software Developer | Develop Software Products | Chennai | 0 | Internship | 2024-01-02 | 7 | +----+--------------------+---------------------------+--------------+---------+------------+-------------+------------+ 13. Retrieve a list of job applicants along with the companies they have applied to and the positions they have applied for. => select a.first_name, a.last_name, c.company_name, j.job_title from applications ap JOIN applicants a ON ap.applicants_id=a.id JOIN jobs j ON j.id=ap.jobs_id JOIN company c ON c.id=j.company_id; +------------+------------+--------------+---------------------+ | first_name | last_name | company_name | job_title | +------------+------------+--------------+---------------------+ | Hermione | Granger | Google | DevOps Engineer | | Harry | Potter | Microsoft | Software Engineer | | Draco | Malfoy | JP Morgan | ML Engineer | | Neville | Longbottom | Uber | Blockchain Engineer | | Ronald | Weasley | IBM | Business Analyst | | Ginny | Weasley | Google | DevOps Engineer | +------------+------------+--------------+---------------------+ 14. Retrieve a list of companies along with the count of jobs they have posted, even if they have not received any applications. => select c.company_name, COUNT(j.company_id) from jobs j RIGHT JOIN company c ON j.company_id=c.id group by c.company_name; +--------------+---------------------+ | company_name | COUNT(j.company_id) | +--------------+---------------------+ | Adobe | 1 | | Amazon | 1 | | Google | 3 | | Hexaware | 1 | | IBM | 1 | | JP Morgan | 1 | | Microsoft | 1 | | TCS | 1 | | Uber | 1 | | Wipro | 0 | +--------------+---------------------+ 15. List all applicants along with the companies and positions they have applied for, including those who have not applied. => select a.first_name, a.last_name, c.company_name, j.job_title from applications ap RIGHT JOIN applicants a ON ap.applicants_id=a.id JOIN jobs j ON j.id=ap.jobs_id JOIN company c ON c.id=j.company_id; +------------+------------+--------------+---------------------+ | first_name | last_name | company_name | job_title | +------------+------------+--------------+---------------------+ | Hermione | Granger | Google | DevOps Engineer | | Harry | Potter | Microsoft | Software Engineer | | Draco | Malfoy | JP Morgan | ML Engineer | | Neville | Longbottom | Uber | Blockchain Engineer | | Ronald | Weasley | IBM | Business Analyst | | Ginny | Weasley | Google | DevOps Engineer | +------------+------------+--------------+---------------------+ 16. Find companies that have posted jobs with a salary higher than the average salary of all jobs. => select c.company_name from jobs j JOIN company c ON j.company_id=c.id where j.salary>(select avg(salary) from jobs); +--------------+ | company_name | +--------------+ | Google | | Google | | Adobe | | Uber | | JP Morgan | +--------------+ 17. Display a list of applicants with their names and a concatenated string of their city and state. (values according to my database)... => select CONCAT(first_name,' ',last_name) as applicant_name, email, phone from applicants; +--------------------+-----------------------+----------+ | applicant_name | email | phone | +--------------------+-----------------------+----------+ | Harry Potter | [email protected] | 90909090 | | Ronald Weasley | [email protected] | 90909080 | | Hermione Granger | [email protected] | 90909070 | | Draco Malfoy | [email protected] | 90909060 | | Neville Longbottom | [email protected] | 90909050 | | Ginny Weasley | [email protected] | 90909040 | +--------------------+-----------------------+----------+ 18. Retrieve a list of jobs with titles containing either 'Developer' or 'Engineer'. => select * from jobs where job_title like '%developer%' OR job_title like '%engineer%'; +----+---------------------+-------------------------------+--------------+---------+------------+-------------+------------+ | id | job_title | job_description | job_location | salary | job_type | posted_date | company_id | +----+---------------------+-------------------------------+--------------+---------+------------+-------------+------------+ | 1 | Software Engineer | Develop software applications | Banglore | 900000 | Full-time | 2024-02-01 | 3 | | 3 | Frontend Developer | Design and develop UI/UX | Pune | 800000 | Full-time | 2024-01-09 | 1 | | 4 | DevOps Engineer | Develop CI/CD solutions | Hyderabad | 1500000 | Full-time | 2024-02-19 | 2 | | 5 | ML Engineer | Develop ML applications | Chennai | 1000000 | Full-time | 2023-12-12 | 9 | | 7 | Backend Developer | Develop backend queries | Pune | 950000 | Full-time | 2024-03-01 | 5 | | 8 | Blockchain Engineer | Develop CI/CD solutions | Banglore | 1300000 | Full-time | 2024-02-28 | 6 | | 9 | AI Engineer | Develop AI applications | Banglore | 1500000 | Full-time | 2024-02-02 | 2 | | 10 | Gen. AI Engineer | Develop Gen AI solutions | Chennai | 900000 | Full-time | 2024-12-02 | 2 | | 11 | Software Developer | Develop Software Products | Chennai | 0 | Internship | 2024-01-02 | 7 | +----+---------------------+-------------------------------+--------------+---------+------------+-------------+------------+ 19. Retrieve a list of applicants and the jobs they have applied for, including those who have not applied and jobs without applicants. => select a.first_name, a.last_name, j.job_title from applications ap RIGHT JOIN applicants a ON ap.applicants_id=a.id RIGHT JOIN jobs j ON ap.jobs_id=j.id; +------------+------------+---------------------+ | first_name | last_name | job_title | +------------+------------+---------------------+ | Harry | Potter | Software Engineer | | NULL | NULL | Data Analyst | | NULL | NULL | Frontend Developer | | Hermione | Granger | DevOps Engineer | | Ginny | Weasley | DevOps Engineer | | Draco | Malfoy | ML Engineer | | Ronald | Weasley | Business Analyst | | NULL | NULL | Backend Developer | | Neville | Longbottom | Blockchain Engineer | | NULL | NULL | AI Engineer | | NULL | NULL | Gen. AI Engineer | | NULL | NULL | Software Developer | +------------+------------+---------------------+ 20. List all combinations of applicants and companies where the company is in a specific city and the applicant has more than 2 years of experience. For example: city=Chennai => select a.first_name, a.last_name, c.company_name from applications ap RIGHT JOIN applicants a ON ap.applicants_id=a.id RIGHT JOIN jobs j ON ap.jobs_id=j.id RIGHT JOIN company c ON c.id=j.company_id WHERE c.location='mumbai'; +------------+------------+--------------+ | first_name | last_name | company_name | +------------+------------+--------------+ | Harry | Potter | Microsoft | | NULL | NULL | Adobe | | Neville | Longbottom | Uber | +------------+------------+--------------+
--- title: "In 2024, Strategic Use of Hashtags in Gaming Vlog Production" date: 2024-05-31T12:39:39.073Z updated: 2024-06-01T12:39:39.073Z tags: - ai video - ai youtube categories: - ai - youtube description: "This Article Describes In 2024, Strategic Use of Hashtags in Gaming Vlog Production" excerpt: "This Article Describes In 2024, Strategic Use of Hashtags in Gaming Vlog Production" keywords: "GamingHashtagStrategy,VlogHashtagTips,ProductionHashtags,StrategicHashtags,GameDevHashtagUse,VlogHashtagArt,ContentHashtags" thumbnail: https://www.lifewire.com/thmb/KDQnKAs2Hu7gMnkVlLoGjvCxkow=/400x300/filters:no_upscale():max_bytes(150000):strip_icc()/close-up-computer-desk-3061-594f0a7ea74040cfbc46ca39c6d806c4.jpg --- ## Strategic Use of Hashtags in Gaming Vlog Production Hashtags are a great way to reach new viewers and subscribers interested in your content. But with so many hashtags out there, finding which ones are perfect for your YouTube gaming channel can be challenging. That's why we've extensively researched you and compiled a list of the 30 **best tags/hashtags for YouTube gaming** videos. We've also included tools to help you generate or plan your hashtags effectively to grow your channel and boost your subscription. So, if you want to take your YouTube gaming channel to the next level, read on! ## 30 Best YouTube Gaming Hashtags In recent years, YouTube has become the most popular platform for gamers worldwide. However, with millions of users uploading new videos daily, making your content stand out from the rest can take time and effort. One way to alleviate the reach of your YouTube gaming videos is to use popular hashtags that can help you for many purposes like: 1. Driving traffic toward your videos 2. Increasing your brand awareness 3. Expanding the search visibility of your content You can also increase your video's organic volume by using appropriate hashtags. Here is the list of the 30 **most popular hashtags with high search volume for YouTube gaming:** 1. #gamer 2. #nintendo 3. #games 4. #livestream 5. #videogames 6. #gaming 7. #youtubegamer 8. #fortnite 9. #PleaseForgiveMe 10. #pcgaming 11. #playfortnite 12. #xbox 13. #gamingcommunity 14. #gamerguy 15. #gamergirl 16. #gamereview 17. #livegaming 18. #play 19. #winning 20. #videogameaddict 21. #twitchstreamer 22. #youtubegaming 23. #reddead 24. #playinggames 25. #diablo 26. #callofduty 27. #xboxone 28. #retrogaming 29. #smallyoutuberarmy 30. #nintendoswitch ## Useful Tools For YouTube Gaming Hashtags Well goes an old saying: "_B**etter to teach someone to fish than to give him a fish**_**."** As a vlogger, you should use hashtags every time you upload a video to help your video spread and guide viewers to find more of your content. But it would help if you can ensure these hashtags are natural and relevant. Hence, apart from providing the list of popular gaming hashtags, we have created this list of **YouTube hashtag tools** to help you generate such tags. ### Keyword Tool The Keyword Tool is a popular YouTube hashtags generator that can help you find popular gaming hashtags to use on YouTube. You can understand what keywords your audience is searching for with this tool. New and popular YouTube gaming hashtags are just one click away with Keyword Tool. This tool makes developing variations, ideas, and other stuff relatively easy. It's the best tool on the market now and gets updated regularly to provide the most relevant result each time. The Keyword tool offers categories for each social platform and search engine where you can find the most popular tag for your videos. Hence, you can create more relevant content and reach more people on YouTube, Twitch streams, and Instagram posts. Moreover, it provides suggestions for combinations of titles. For example, it produces the fastest results and uses tolls to add the names of the games, providing you with more personalized results. To use Keyword Tool, enter a topic into the search bar. You will then see a list of popular gaming hashtags related to your keyword. You can then use them to help promote your videos and get more views. ![keyword tool for youtube gaming hashtags](https://images.wondershare.com/filmora/article-images/2023/03/keyword-tool-for-youtube-gaming-hashtags.png) ### [Tube Ranker](https://tuberanker.com/) Tube Ranker is another excellent tool for ranking your videos on YouTube. This **YouTube hashtag generator** is fast, easy to use, and effective at getting your videos to the top of search results in no time. Tube Ranker tool can work as a description generator, a channel auditor, a tags extractor, an SEO rank tracker, a keyword generator, and a title generator. Therefore, if you want more views and engagement on your YouTube gaming videos, the Tube Ranker should be a must-have tool in your arsenal! As to the price, TubeBuddy comes with a Pro version for $7.20/month, a Star version for $15.20/month, and a Legend version for $39.20/month in addition to the free version. ![tube ranker tool for youtube gaming hashtags](https://images.wondershare.com/filmora/article-images/2023/03/tube-ranker-tool-for-youtube-gaming-hashtags.png) ### [SEO Book](http://www.seobook.com/) SEOBook is an excellent website for learning about search engine optimization (SEO). Aaron Wall, a leading expert on the topic, writes the book. This website covers everything from the basics of SEO to more advanced concepts. The tool can help you generate relevant hashtags for your YouTube gaming videos and make your content more visible. ![seobook for youtube gaming hashtags](https://images.wondershare.com/filmora/article-images/2023/03/seobook-for-youtube-gaming-hashtags.png) ![](https://images.wondershare.com/assets/images-common/icon-note.png) **Note:** _If you want to optimize your YouTube gaming videos' descriptions, tags and titles,_ [_click here to learn how!_](https://tools.techidaily.com/wondershare/filmora/download/) ## Bonus: Using Filmora To Screen Record Or Edit The Gaming Video Although we have talked about the most popular gaming hashtags and the useful tools for you videos, meanwhile, the quality and the content of the video are worth of high attention. With the popularity of YouTube Gaming videos, how to record or edit content to make it more engaging and eye-capturing is important. Therefore, we recommend using [Filmora](https://tools.techidaily.com/wondershare/filmora/download/) \- a user-friendly video editing software for both Mac and Windows. It has all the features you need to edit, screen record, and [export the edited videos to YouTube.](https://tools.techidaily.com/wondershare/filmora/download/) [https://www.youtube.com/embed/](https://www.youtube.com/embed/ktyRHjL5bYM) [Free Download](https://tools.techidaily.com/wondershare/filmora/download/) For Win 7 or later(64-bit) [Free Download](https://tools.techidaily.com/wondershare/filmora/download/) For macOS 10.14 or later There are many reasons to use Filmora, but here are the most important ones: * Quick and easy to use, even for novice users * It helps you [screen record](https://tools.techidaily.com/wondershare/filmora/download/) the footage of your game and webcam simultaneously while you are playing. * Offers AI portrait, green screen, mask & blend, New BlueFX, and Boris FX effects * Excellent and clear UI * It lets you edit the video and add effects, transitions, and other elements to make it more engaging. * It includes instant mode, PIP, preset templates for different occasions, WordArt, title editing, and motion tracking features. * You can export the edited video to YouTube with just a few clicks. ## Conclusion This comprehensive article provides the 30 **best tags/hashtags for YouTube gaming** to help you improve your content visibility. We've also discussed the three most popular YouTube hashtags generator. Moreover, we've recommended using Filmora to quickly screen record or edit your YouTube gaming videos before searching for popular hashtags. We hope you've enjoyed reading this article and can now use the right hashtags for your YouTube gaming content. ## Bonus: Using Filmora To Screen Record Or Edit The Gaming Video Although we have talked about the most popular gaming hashtags and the useful tools for you videos, meanwhile, the quality and the content of the video are worth of high attention. With the popularity of YouTube Gaming videos, how to record or edit content to make it more engaging and eye-capturing is important. Therefore, we recommend using [Filmora](https://tools.techidaily.com/wondershare/filmora/download/) \- a user-friendly video editing software for both Mac and Windows. It has all the features you need to edit, screen record, and [export the edited videos to YouTube.](https://tools.techidaily.com/wondershare/filmora/download/) [https://www.youtube.com/embed/](https://www.youtube.com/embed/ktyRHjL5bYM) [Free Download](https://tools.techidaily.com/wondershare/filmora/download/) For Win 7 or later(64-bit) [Free Download](https://tools.techidaily.com/wondershare/filmora/download/) For macOS 10.14 or later There are many reasons to use Filmora, but here are the most important ones: * Quick and easy to use, even for novice users * It helps you [screen record](https://tools.techidaily.com/wondershare/filmora/download/) the footage of your game and webcam simultaneously while you are playing. * Offers AI portrait, green screen, mask & blend, New BlueFX, and Boris FX effects * Excellent and clear UI * It lets you edit the video and add effects, transitions, and other elements to make it more engaging. * It includes instant mode, PIP, preset templates for different occasions, WordArt, title editing, and motion tracking features. * You can export the edited video to YouTube with just a few clicks. ## Conclusion This comprehensive article provides the 30 **best tags/hashtags for YouTube gaming** to help you improve your content visibility. We've also discussed the three most popular YouTube hashtags generator. Moreover, we've recommended using Filmora to quickly screen record or edit your YouTube gaming videos before searching for popular hashtags. We hope you've enjoyed reading this article and can now use the right hashtags for your YouTube gaming content. <ins class="adsbygoogle" style="display:block" data-ad-format="autorelaxed" data-ad-client="ca-pub-7571918770474297" data-ad-slot="1223367746"></ins> <ins class="adsbygoogle" style="display:block" data-ad-format="autorelaxed" data-ad-client="ca-pub-7571918770474297" data-ad-slot="1223367746"></ins> ## Navigating Through Your Preferred YouTube Viewing Pace YouTube is one of the top online learning platforms that ever existed. You could learn almost anything on YouTube just by ardently following through the videos with rapt attention, intent, and understanding. However, the degree to which you know effectively can hamper if the video you watch is too fast. More so, you could want to save time while playing your choice YouTube video and yet wish to get all the information. You can eat your muffin and have it by speeding up or slowing down the **YouTube playback speed**. This article will show you how to **speed up YouTube videos** or slow them down. #### In this article 01 [How to Change YouTube's Playback Speed on Computer](#part1) 02 [How to Change Video Speed with Keyboard Shortcuts on YouTube](#part2) 03 [How to Change YouTube's Video Playback Speed on Mobile](#part3) 04 [How YouTube's Playback Speed Controls Work?](#part4) 05 [How to Change a YouTube Video's Speed Online](#part5) ## Part 1: How to Change YouTube's Playback Speed on Computer To **fast-forward YouTube** playback videos or slow it down on the computer, you must follow the proper steps. Changing **YouTube's playback speed** on a computer is not a complex thing to do. You can achieve it if you follow the steps we will show you. On the website, you can manually alter the **YouTube video's playback speed** with your computer while watching it. To start, at the bottom right corner of your YouTube video, click the **gear wheel icon** there. You'll see a list of other settings like quality, subtitles, and playback speed. Select **playback speed,** and you will see a list of speeds from 0.25x, 0.5x, 0.75x, normal, 1.25x, 1.5x, 1.75 x to 2.0x. ![change youtube video playback speed option](https://images.wondershare.com/filmora/article-images/change-youtube-video-playback-speed-option.jpg) These are preset speeds. If they are not suitable for you, you will have to select a custom speed, but they must be in the same range of 0.25x to 2.0x. To do this, go back to playback speed. Select **custom** in the top-right side of the pop-up menu**,** then adjust the speed using a slider that you will see there. **Related**: [How to Make a YouTube Video](https://tools.techidaily.com/wondershare/filmora/download/). ## Part 2: How to Change Video Speed with Keyboard Shortcuts on YouTube If you are watching a YouTube Video on your computer and want to adjust the playback speed, you can change the speed with keyboard shortcuts. Here's another way to adjust the **playback speed of YouTube** videos more quickly. To **fast-forward the YouTube** video, press your shift button and hold down, then tap your greater than(>) symbol on your keyboard. Your **YouTube playback speed** will increase by 0.25 as you keep tapping. To reduce the **playback speed YouTube** videos, the reverse is the case. Press and hold down the shift tab, then tap less than the (<) symbol. Changing the playback speed using these keyboard shortcuts increases by 0.25 increments. You have to use custom if you want other values, as earlier mentioned. The best thing about this method of changing the speed is that you can do it while watching the movie or while paused. ## Part 3: How to Change YouTube's Video Playback Speed on Mobile If you'd like to know **how to speed up YouTube videos** on your Android, iPhone, or iPad, here's it. First, you must open your YouTube App on your device and play the video you want to watch. Then tap the video once, and it will bring up the toolbar. At the top right corner, select the vertically aligned three dots. This will bring out a menu for you—select playback speed. ![youtube playback speed option mobile](https://images.wondershare.com/filmora/article-images/youtube-playback-speed-option-mobile.jpg) Then you will see a list containing the preset speed provided by YouTube. ![change youtube video speed mobile](https://images.wondershare.com/filmora/article-images/change-youtube-video-speed-mobile.jpg) 1 is the average speed. Any value below than 1 will slow down the video, while any value above it will increase it. Once done, close the menu, and the video will resume from where it stopped. ## Part 4: How YouTube's Playback Speed Controls Work? YouTube has an added feature known as 'playback speed.' This feature allows you to select a speed from 0.25x to 2x the normal speed. With this **YouTube's speed control**, 1 is the normal speed, with 0.25 being one-quarter (1/4th) of the actual speed and slowing the video. In the same vein, 2x is twice the average speed, increasing the video speed. However, in as much as **YouTube speed control** slows or speeds up the video, it does not change the pitch of the video. All it does is either expand or compress the video samples while still maintaining the video or audio pitch. It would still sound like the podcaster or video blogger is just talking faster or slower. Music would still maintain the same musical key and pitch while playing faster or slower. If you are watching a presentation or interview that seems like everyone is talking like they have all the time in the world, you can **fast forward YouTube** video by using the feature. Also, you can slow down a tutorial or the video you are watching if you feel they are too fast using this **YouTube speed control**. **Conclusion** **YouTube speed control** has made it easier to change **YouTube playback speed** to either speed up or slow down the video. This enables you to capture important moments or information you may miss if the video was fast. Also, you can save time and get to the information you are searching for as soon as possible by speeding the YouTube video. 02 [How to Change Video Speed with Keyboard Shortcuts on YouTube](#part2) 03 [How to Change YouTube's Video Playback Speed on Mobile](#part3) 04 [How YouTube's Playback Speed Controls Work?](#part4) 05 [How to Change a YouTube Video's Speed Online](#part5) ## Part 1: How to Change YouTube's Playback Speed on Computer To **fast-forward YouTube** playback videos or slow it down on the computer, you must follow the proper steps. Changing **YouTube's playback speed** on a computer is not a complex thing to do. You can achieve it if you follow the steps we will show you. On the website, you can manually alter the **YouTube video's playback speed** with your computer while watching it. To start, at the bottom right corner of your YouTube video, click the **gear wheel icon** there. You'll see a list of other settings like quality, subtitles, and playback speed. Select **playback speed,** and you will see a list of speeds from 0.25x, 0.5x, 0.75x, normal, 1.25x, 1.5x, 1.75 x to 2.0x. ![change youtube video playback speed option](https://images.wondershare.com/filmora/article-images/change-youtube-video-playback-speed-option.jpg) These are preset speeds. If they are not suitable for you, you will have to select a custom speed, but they must be in the same range of 0.25x to 2.0x. To do this, go back to playback speed. Select **custom** in the top-right side of the pop-up menu**,** then adjust the speed using a slider that you will see there. **Related**: [How to Make a YouTube Video](https://tools.techidaily.com/wondershare/filmora/download/). ## Part 2: How to Change Video Speed with Keyboard Shortcuts on YouTube If you are watching a YouTube Video on your computer and want to adjust the playback speed, you can change the speed with keyboard shortcuts. Here's another way to adjust the **playback speed of YouTube** videos more quickly. To **fast-forward the YouTube** video, press your shift button and hold down, then tap your greater than(>) symbol on your keyboard. Your **YouTube playback speed** will increase by 0.25 as you keep tapping. To reduce the **playback speed YouTube** videos, the reverse is the case. Press and hold down the shift tab, then tap less than the (<) symbol. Changing the playback speed using these keyboard shortcuts increases by 0.25 increments. You have to use custom if you want other values, as earlier mentioned. The best thing about this method of changing the speed is that you can do it while watching the movie or while paused. ## Part 3: How to Change YouTube's Video Playback Speed on Mobile If you'd like to know **how to speed up YouTube videos** on your Android, iPhone, or iPad, here's it. First, you must open your YouTube App on your device and play the video you want to watch. Then tap the video once, and it will bring up the toolbar. At the top right corner, select the vertically aligned three dots. This will bring out a menu for you—select playback speed. ![youtube playback speed option mobile](https://images.wondershare.com/filmora/article-images/youtube-playback-speed-option-mobile.jpg) Then you will see a list containing the preset speed provided by YouTube. ![change youtube video speed mobile](https://images.wondershare.com/filmora/article-images/change-youtube-video-speed-mobile.jpg) 1 is the average speed. Any value below than 1 will slow down the video, while any value above it will increase it. Once done, close the menu, and the video will resume from where it stopped. ## Part 4: How YouTube's Playback Speed Controls Work? YouTube has an added feature known as 'playback speed.' This feature allows you to select a speed from 0.25x to 2x the normal speed. With this **YouTube's speed control**, 1 is the normal speed, with 0.25 being one-quarter (1/4th) of the actual speed and slowing the video. In the same vein, 2x is twice the average speed, increasing the video speed. However, in as much as **YouTube speed control** slows or speeds up the video, it does not change the pitch of the video. All it does is either expand or compress the video samples while still maintaining the video or audio pitch. It would still sound like the podcaster or video blogger is just talking faster or slower. Music would still maintain the same musical key and pitch while playing faster or slower. If you are watching a presentation or interview that seems like everyone is talking like they have all the time in the world, you can **fast forward YouTube** video by using the feature. Also, you can slow down a tutorial or the video you are watching if you feel they are too fast using this **YouTube speed control**. **Conclusion** **YouTube speed control** has made it easier to change **YouTube playback speed** to either speed up or slow down the video. This enables you to capture important moments or information you may miss if the video was fast. Also, you can save time and get to the information you are searching for as soon as possible by speeding the YouTube video. 02 [How to Change Video Speed with Keyboard Shortcuts on YouTube](#part2) 03 [How to Change YouTube's Video Playback Speed on Mobile](#part3) 04 [How YouTube's Playback Speed Controls Work?](#part4) 05 [How to Change a YouTube Video's Speed Online](#part5) ## Part 1: How to Change YouTube's Playback Speed on Computer To **fast-forward YouTube** playback videos or slow it down on the computer, you must follow the proper steps. Changing **YouTube's playback speed** on a computer is not a complex thing to do. You can achieve it if you follow the steps we will show you. On the website, you can manually alter the **YouTube video's playback speed** with your computer while watching it. To start, at the bottom right corner of your YouTube video, click the **gear wheel icon** there. You'll see a list of other settings like quality, subtitles, and playback speed. Select **playback speed,** and you will see a list of speeds from 0.25x, 0.5x, 0.75x, normal, 1.25x, 1.5x, 1.75 x to 2.0x. ![change youtube video playback speed option](https://images.wondershare.com/filmora/article-images/change-youtube-video-playback-speed-option.jpg) These are preset speeds. If they are not suitable for you, you will have to select a custom speed, but they must be in the same range of 0.25x to 2.0x. To do this, go back to playback speed. Select **custom** in the top-right side of the pop-up menu**,** then adjust the speed using a slider that you will see there. **Related**: [How to Make a YouTube Video](https://tools.techidaily.com/wondershare/filmora/download/). ## Part 2: How to Change Video Speed with Keyboard Shortcuts on YouTube If you are watching a YouTube Video on your computer and want to adjust the playback speed, you can change the speed with keyboard shortcuts. Here's another way to adjust the **playback speed of YouTube** videos more quickly. To **fast-forward the YouTube** video, press your shift button and hold down, then tap your greater than(>) symbol on your keyboard. Your **YouTube playback speed** will increase by 0.25 as you keep tapping. To reduce the **playback speed YouTube** videos, the reverse is the case. Press and hold down the shift tab, then tap less than the (<) symbol. Changing the playback speed using these keyboard shortcuts increases by 0.25 increments. You have to use custom if you want other values, as earlier mentioned. The best thing about this method of changing the speed is that you can do it while watching the movie or while paused. ## Part 3: How to Change YouTube's Video Playback Speed on Mobile If you'd like to know **how to speed up YouTube videos** on your Android, iPhone, or iPad, here's it. First, you must open your YouTube App on your device and play the video you want to watch. Then tap the video once, and it will bring up the toolbar. At the top right corner, select the vertically aligned three dots. This will bring out a menu for you—select playback speed. ![youtube playback speed option mobile](https://images.wondershare.com/filmora/article-images/youtube-playback-speed-option-mobile.jpg) Then you will see a list containing the preset speed provided by YouTube. ![change youtube video speed mobile](https://images.wondershare.com/filmora/article-images/change-youtube-video-speed-mobile.jpg) 1 is the average speed. Any value below than 1 will slow down the video, while any value above it will increase it. Once done, close the menu, and the video will resume from where it stopped. ## Part 4: How YouTube's Playback Speed Controls Work? YouTube has an added feature known as 'playback speed.' This feature allows you to select a speed from 0.25x to 2x the normal speed. With this **YouTube's speed control**, 1 is the normal speed, with 0.25 being one-quarter (1/4th) of the actual speed and slowing the video. In the same vein, 2x is twice the average speed, increasing the video speed. However, in as much as **YouTube speed control** slows or speeds up the video, it does not change the pitch of the video. All it does is either expand or compress the video samples while still maintaining the video or audio pitch. It would still sound like the podcaster or video blogger is just talking faster or slower. Music would still maintain the same musical key and pitch while playing faster or slower. If you are watching a presentation or interview that seems like everyone is talking like they have all the time in the world, you can **fast forward YouTube** video by using the feature. Also, you can slow down a tutorial or the video you are watching if you feel they are too fast using this **YouTube speed control**. **Conclusion** **YouTube speed control** has made it easier to change **YouTube playback speed** to either speed up or slow down the video. This enables you to capture important moments or information you may miss if the video was fast. Also, you can save time and get to the information you are searching for as soon as possible by speeding the YouTube video. 02 [How to Change Video Speed with Keyboard Shortcuts on YouTube](#part2) 03 [How to Change YouTube's Video Playback Speed on Mobile](#part3) 04 [How YouTube's Playback Speed Controls Work?](#part4) 05 [How to Change a YouTube Video's Speed Online](#part5) ## Part 1: How to Change YouTube's Playback Speed on Computer To **fast-forward YouTube** playback videos or slow it down on the computer, you must follow the proper steps. Changing **YouTube's playback speed** on a computer is not a complex thing to do. You can achieve it if you follow the steps we will show you. On the website, you can manually alter the **YouTube video's playback speed** with your computer while watching it. To start, at the bottom right corner of your YouTube video, click the **gear wheel icon** there. You'll see a list of other settings like quality, subtitles, and playback speed. Select **playback speed,** and you will see a list of speeds from 0.25x, 0.5x, 0.75x, normal, 1.25x, 1.5x, 1.75 x to 2.0x. ![change youtube video playback speed option](https://images.wondershare.com/filmora/article-images/change-youtube-video-playback-speed-option.jpg) These are preset speeds. If they are not suitable for you, you will have to select a custom speed, but they must be in the same range of 0.25x to 2.0x. To do this, go back to playback speed. Select **custom** in the top-right side of the pop-up menu**,** then adjust the speed using a slider that you will see there. **Related**: [How to Make a YouTube Video](https://tools.techidaily.com/wondershare/filmora/download/). ## Part 2: How to Change Video Speed with Keyboard Shortcuts on YouTube If you are watching a YouTube Video on your computer and want to adjust the playback speed, you can change the speed with keyboard shortcuts. Here's another way to adjust the **playback speed of YouTube** videos more quickly. To **fast-forward the YouTube** video, press your shift button and hold down, then tap your greater than(>) symbol on your keyboard. Your **YouTube playback speed** will increase by 0.25 as you keep tapping. To reduce the **playback speed YouTube** videos, the reverse is the case. Press and hold down the shift tab, then tap less than the (<) symbol. Changing the playback speed using these keyboard shortcuts increases by 0.25 increments. You have to use custom if you want other values, as earlier mentioned. The best thing about this method of changing the speed is that you can do it while watching the movie or while paused. ## Part 3: How to Change YouTube's Video Playback Speed on Mobile If you'd like to know **how to speed up YouTube videos** on your Android, iPhone, or iPad, here's it. First, you must open your YouTube App on your device and play the video you want to watch. Then tap the video once, and it will bring up the toolbar. At the top right corner, select the vertically aligned three dots. This will bring out a menu for you—select playback speed. ![youtube playback speed option mobile](https://images.wondershare.com/filmora/article-images/youtube-playback-speed-option-mobile.jpg) Then you will see a list containing the preset speed provided by YouTube. ![change youtube video speed mobile](https://images.wondershare.com/filmora/article-images/change-youtube-video-speed-mobile.jpg) 1 is the average speed. Any value below than 1 will slow down the video, while any value above it will increase it. Once done, close the menu, and the video will resume from where it stopped. ## Part 4: How YouTube's Playback Speed Controls Work? YouTube has an added feature known as 'playback speed.' This feature allows you to select a speed from 0.25x to 2x the normal speed. With this **YouTube's speed control**, 1 is the normal speed, with 0.25 being one-quarter (1/4th) of the actual speed and slowing the video. In the same vein, 2x is twice the average speed, increasing the video speed. However, in as much as **YouTube speed control** slows or speeds up the video, it does not change the pitch of the video. All it does is either expand or compress the video samples while still maintaining the video or audio pitch. It would still sound like the podcaster or video blogger is just talking faster or slower. Music would still maintain the same musical key and pitch while playing faster or slower. If you are watching a presentation or interview that seems like everyone is talking like they have all the time in the world, you can **fast forward YouTube** video by using the feature. Also, you can slow down a tutorial or the video you are watching if you feel they are too fast using this **YouTube speed control**. **Conclusion** **YouTube speed control** has made it easier to change **YouTube playback speed** to either speed up or slow down the video. This enables you to capture important moments or information you may miss if the video was fast. Also, you can save time and get to the information you are searching for as soon as possible by speeding the YouTube video. <ins class="adsbygoogle" style="display:block" data-ad-format="autorelaxed" data-ad-client="ca-pub-7571918770474297" data-ad-slot="1223367746"></ins> <ins class="adsbygoogle" style="display:block" data-ad-client="ca-pub-7571918770474297" data-ad-slot="8358498916" data-ad-format="auto" data-full-width-responsive="true"></ins>
import React from "react"; import { ButtonBase } from "@mui/material"; import Stack from '@mui/material/Stack'; import ChevronLeftIcon from '@mui/icons-material/ChevronLeft'; import ChevronRightIcon from '@mui/icons-material/ChevronRight'; export default function Pagination({currentPage, totalPages, paginate}) { return ( <Stack direction="row" spacing={2} justifyContent="center" marginTop={2} marginBottom={3} > <ButtonBase onClick={() => paginate(currentPage - 1)} disabled={currentPage === 1} variant="contained" > <ChevronLeftIcon fontSize="small" color={currentPage > 1 ? "action" : "disabled"} /> </ButtonBase> <ButtonBase onClick={() => paginate(currentPage + 1)} disabled={currentPage === totalPages} variant="contained" > <ChevronRightIcon fontSize="small" color={currentPage < totalPages ? "action" : "disabled"} /> </ButtonBase> </Stack> ); }
import { EyeOutlined, PlusCircleOutlined, SearchOutlined, } from "@ant-design/icons"; import { Button, Card, Input, Menu, message, Table } from "antd"; import EllipsisDropdown from "components/shared-components/EllipsisDropdown"; import Flex from "components/shared-components/Flex"; import React, { useEffect, useState } from "react"; import { useHistory } from "react-router-dom"; import PushNotifications from "services/PushNotification"; import RetrieveDate from "services/RetrieveDate"; import Restricted from "views/app-views/permissions/Restricted"; const AwardList = () => { let history = useHistory(); const [isLoading, setIsLoading] = useState(false); const [list, setList] = useState([]); const [filter, setFilter] = useState({ page: 1, perPage: 10, }); const [totalData, setTotalData] = useState(0); const _onFilterChange = (idx, value) => setFilter((prev) => ({ ...prev, [idx]: value })); const _pageSizeChange = (_, pageSize) => _onFilterChange("perPage", pageSize); const _pageChange = (page) => _onFilterChange("page", page); const paginationOptions = { showSizeChanger: true, showQuickJumper: true, onShowSizeChange: _pageSizeChange, onChange: _pageChange, pageSizeOptions: [10, 20, 30, 40, 50], total: totalData, showTotal: (total, range) => `${range[0]} to ${range[1]} of ${total}`, }; useEffect(() => { fetchAll(filter); }, [filter]); const fetchAll = async (params) => { setIsLoading(true); try { const res = await PushNotifications.list(params); setList(res.data); setTotalData(res.total); } catch (error) { message.error(error.message); } finally { setIsLoading(false); } }; const dropdownMenu = (row) => ( <Menu> <Restricted to="award_edit"> <Menu.Item onClick={() => viewDetails(row)}> <Flex alignItems="center"> <EyeOutlined /> <span className="ml-2">View Details</span> </Flex> </Menu.Item> </Restricted> </Menu> ); const addProduct = () => { history.push(`/app/push-notifications/add-push-notification`); }; const viewDetails = (row) => { history.push(`/app/push-notifications/edit-push-notification/${row.id}`); }; const tableColumns = [ { title: "ID", dataIndex: "id", }, { title: "Created at", dataIndex: "created_at", render: (date) => <RetrieveDate date={date} />, }, { title: "Updated at", dataIndex: "updated_at", render: (date) => <RetrieveDate date={date} />, }, { title: "Title", dataIndex: "title", }, { title: "Role", dataIndex: "role", }, { title: "Send", dataIndex: "sent", render: (sent) => (sent === Number(1) ? "Yes" : "No"), }, { title: "", dataIndex: "actions", render: (_, elm) => ( <div className="text-right"> <EllipsisDropdown menu={dropdownMenu(elm)} /> </div> ), }, ]; const onSearch = (e) => { const value = e.currentTarget.value; _onFilterChange("q", value); }; return ( <> {/* {isLoading ? ( <Loading /> ) : ( */} <Card> <Flex alignItems="center" justifyContent="between" mobileFlex={false}> <Flex className="mb-1" mobileFlex={false} alignItems="center"> <div className="mr-md-3 mb-3"> <Input placeholder="Search" prefix={<SearchOutlined />} onChange={(e) => onSearch(e)} allowClear /> </div> </Flex> <Flex> <Restricted to={"award_create"}> <Button onClick={addProduct} type="primary" icon={<PlusCircleOutlined />} block > Create Notification </Button> </Restricted> </Flex> </Flex> <div className="table-responsive"> <Table loading={isLoading} columns={tableColumns} dataSource={list} rowKey="id" pagination={{ ...paginationOptions, total: totalData, current: filter.page, pageSize: filter.perPage, }} /> </div> </Card> {/* )} */} </> ); }; export default AwardList;
#!/usr/bin/env python # -*- coding: utf-8 -*- # RHN/Spacewalk API Module abstracting the 'packages' namespace # and its children / sub-namespaces # # Copyright (c) 2009-2014 Stuart Sears # # This file is part of python-rhnapi # # python-rhnapi is free software: you can redistribute it and/or modify it under # the terms of the GNU General Public License as published by the Free # Software Foundation, either version 3 of the License, or (at your option) # any later version. # # python-rhnapi is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or # FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License # for more details. # # You should have received a copy of the GNU General Public License along # with python-rhnapi. If not, see http://www.gnu.org/licenses/. __doc__ = """ rhnapi.packages A python interface to the 'packages' namespace in RHN Satellite 5.4+ That is to say, some of these may work on earlier versions, but haven't been tested on them. This file includes the methods from the following namespaces: packages packages.provider packages.search Some methods have been renamed to follow a sensible, understandable scheme and to facilitate the flattening into a single file. E.g. all packages.search.* methods now being with 'search': packages.search namespace: search -> packages.search.advanced searchActivationKey -> packages.search.advancedWithActKey searchChannel -> packages.search.advancedWithChannel searchName -> packages.search.name searchNameAndDescription -> packages.search.nameAndDescription searchNameAndSummary -> packages.search.nameAndSummary """ __author__ = "Stuart Sears" # ---------------------------------------------------------------------------- # def findByNvrea(rhn, pkgname, pkgver, pkgrel, pkgarch, pkgepoch=''): """ API: packages.findByNvrea usage: findByNvrea(rhn, name, version, release, arch, epoch=None) description: Find details about a package by Name Version Release (and possibly epoch) returns: list of dict, one per matched package { 'name' : str , 'version' : str , 'release' : str, 'epoch' : str, 'id' : int, 'arch_label' : str, 'path' : str (where the package is on the satellite, under /var/satellite), 'provider' : str (determined by GPG signing key), 'last_modified' : xmlrpclib.DateTime, } parameters: rhn - an authenticated RHN session pkgname(str) - the package name pkgver(str) - package version pkgrel(str) - package release pkgarch(str) - package architecture *pkgepoch(str) - package epoch. optional """ try: return rhn.session.packages.findByNvrea(rhn.key,pkgname, pkgver, pkgrel, pkgepoch, pkgarch) except Exception, E: return rhn.fail(E, 'find package %s-%s-%s.%s' % (pkgname, pkgver, pkgrel, pkgarch,)) # ---------------------------------------------------------------------------- # def getDetails(rhn, pkgid): """ API: packages.getDetails usage: getDetailsByID(RHN, pkgid) description: Retrieves package information based on a package ID returns: dict {'arch_label': str 'build_date': str 'build_host': str 'checksum': str 'checksum_type': str 'cookie': 'None', 'description': str 'epoch': str, 'file': str (actual package filename) 'id': int, 'last_modified_date': str 'license': str 'name': 'bash', 'path': str 'payload_size': str, 'providing_channels': [list of channels containing this package] 'release': '3.el6', 'size': '928344', 'summary': 'The GNU Bourne Again shell\n', 'vendor': 'Red Hat, Inc.', 'version': '4.1.2' } parameters: rhn - an authenticated RHN session pkgid(int) - Package ID number """ try: return rhn.session.packages.getDetails(rhn.key, pkgid) except Exception, E: return rhn.fail(E, 'find package with ID %d' % pkgid) # ---------------------------------------------------------------------------- # def getPackage(rhn, pkgid): """ API: packages.getPackage usage: getPackage(rhn, pkgid) description: Retrieve the package file associated with a package. returns: the binary package file, base64 encoded. parameters: rhn - authenticated rhnapi.rhnSession() object pkgid(int) - Package ID number """ try: return rhn.session.packages.getPackage(rhn.key, pkgid) except Exception, E: return rhn.fail(E, 'fetch package with ID %d' % pkgid) # ---------------------------------------------------------------------------- # def getPackageUrl(rhn, pkgid): """ API: packages.getPackageUrl usage: getPackageUrl(rhn, pkgid) description: Retrieve the url that can be used to download a package.i This will expire after a certain time period. returns: string parameters: rhn - authenticated rhnapi.rhnSession() object pkgid(int) - Package ID number """ try: return rhn.session.packages.getPackageUrl(rhn.key, pkgid) except Exception, E: return rhn.fail(E, 'find download URL for package ID %d' % pkgid) # ---------------------------------------------------------------------------- # def listChangelog(rhn, pkgid): """ API: packages.listChangelog usage: listChangelog(rhn, pkgid) description: Retrieves package changelog for package ID returns: list of dicts, one per entry {'author': str 'date': str 'text': str } parameters: rhn - an authenticated RHN session pkgid(int) - Package ID number """ try: return rhn.session.packages.listChangelog(rhn.key, pkgid) except Exception, E: return rhn.fail(E, 'get changelog for package ID %d' % pkgid) # ---------------------------------------------------------------------------- # def listDependencies(rhn, pkgid): """ API: packages.listDependencies usage: listDependencies(rhn, pkgid) description: Retrieves dependency info for a package ID returns: list of dicts, one per dependency { 'dependency': (str) usually file path or package name, 'dependency_modifier': (str), 'dependency_type': (str) one of {'requires', 'conflicts', 'obsoletes', 'provides'], }, parameters: rhn - an authenticated RHN session pkgid(int) - Package ID number """ try: return rhn.session.packages.listDependencies(rhn.key, pkgid) except Exception, E: return rhn.fail(E, 'get changelog for package ID %d' % pkgid) # ---------------------------------------------------------------------------- # def listFiles(rhn, pkgid): """ API: packages.listFiles usage: listFiles(rhn, pkgid) description: Retrieves file listing for package ID returns: list of dict, one per file { 'checksum': (str) '55e10cb00b262abf4a13e91e0bbb6040e1da2e428c9fb844430f4d0650c21ec0', 'checksum_type': (str) 'sha256', 'last_modified_date': (str) '2010-06-22 20:49:51', 'linkto': (str) '', 'path': (str) '/usr/share/man/man1/wait.1.gz', 'size': (int) 40, 'type': (str) 'file' }, parameters: rhn - an authenticated RHN session pkgid(int) - Package ID number """ try: return rhn.session.packages.listFiles(rhn.key, pkgid) except Exception, E: return rhn.fail(E, 'get file list for package ID %d' % pkgid) # ---------------------------------------------------------------------------- # def listProvidingChannels(rhn, pkgid): """ API: packages.listProvidingChannels usage: listProvidingChannels(rhn, pkgid) description: Lists channels providing a package ID returns: list of dicts, one per channel { 'label': 'rhel-x86_64-server-6', 'name': 'Red Hat Enterprise Linux Server (v. 6 for 64-bit x86_64)', 'parent_label': ' ' } parameters: rhn - an authenticated RHN session pkgid(int) - Package ID number """ try: return rhn.session.packages.listProvidingChannels(rhn.key, pkgid) except Exception, E: return rhn.fail(E, 'get channel list for package ID %d' % pkgid) # ---------------------------------------------------------------------------- # def listProvidingErrata(rhn, pkgid): """ API: packages.listProvidingErrata usage: listProvidingErrata(rhn, pkgid) description: Displays which Errata provide a given package. returns: list of dicts, one per entry parameters: rhn - an authenticated RHN session pkgid(int) - Package ID number """ try: return rhn.session.packages.listProvidingErrata(rhn.key, pkgid) except Exception, E: return rhn.fail(E, 'find errata providing package ID %d' % pkgid) # ---------------------------------------------------------------------------- # def removePackage(rhn, pkgid): """ API: packages.removePackage usage: removePackage(rhn, pkgid) description: Removes a package from the satellite completely, from ALL channels. returns: bool, or throws exception parameters: rhn - an authenticated RHN session pkgid(int) - Package ID number """ try: return rhn.session.packages.removePackage(rhn.key, pkgid) == 1 except Exception, E: return rhn.fail(E, 'remove Package ID %d' % pkgid) # ----------------------------- package.provider ----------------------------- # def associateKey(rhn, provider, key='', keytype='gpg'): """ API: packages.provider.associateKey usage: associateKey(rhn, provider, key, keytype='gpg') Associate a package security key and with the package provider. * If the provider or key doesn't exist, it is created. * User executing the request must be a Satellite administrator. returns: True, or throws exception parameters: rhn - authenticated rhnapi.rhnSession() object provider(str) - provider name key(str) - the key content keytype(str) - key type. Currently only understands 'gpg' """ try: return rhn.session.packages.provider.associateKey(rhn.key, provider, key, keytype) == 1 except Exception, E: return rhn.fail(E, 'associate new key with package provider %s' % provider) # ---------------------------------------------------------------------------- # def listProviders(rhn): """ API: packages.provider.list usage: listProviders(rhn) description: lists all package providers returns: list/dict { 'name' : (str), 'keys' : [ { 'key' : (str) 'type' : (str) } ] } parameters: rhn - authenticated rhnapi.rhnSession() object """ try: return rhn.session.packages.provider.list(rhn.key) except Exception, E: return rhn.fail(E, 'list all package providers') # ---------------------------------------------------------------------------- # def listKeys(rhn, provider): """ API: packages.provider.listKeys usage: listKeys(rhn, provider) description: List all security keys associated with a package provider. User executing the request must be a Satellite administrator. returns: list/dict { 'key' : (str) 'type' : (str) } parameters: rhn - authenticated rhnapi.rhnSession() object provider(str) - provider name """ try: return rhn.session.packages.provider.listKeys(rhn.key, provider) except Exception, E: return rhn.fail(E, 'get list of package security keys for provider %s' % provider) # ----------------------------- packages.search ------------------------------ # def search(rhn, query): """ API: packages.search.advanced usage: advancedSearch(rhn, query) description: Advanced method to search lucene indexes with a passed in query written in Lucene Query Parser syntax. Fields searchable for Packages: name, epoch, version, release, arch, description, summary Lucene Query Example: "name:kernel AND version:2.6.18 AND -description:devel" returns: list of dict, one per matching package parameters: rhn - authenticated rhnapi.rhnSession() object query(str) - query string """ try: return rhn.session.packages.search.advanced(rhn.key, query) except Exception, E: return rhn.fail(E, 'search for packages using query "%s"' % query) def searchAdvanced(rhn, query): """ wrapper around search to look more like the API :) """ return search(rhn, query) # ---------------------------------------------------------------------------- # def searchActivationKey(rhn, query, actkey): """ API: packages.search.advancedWithActKey usage: searchActivationKey(rhn, query, actkey) description: Advanced method to search lucene indexes with a passed in query written in Lucene Query Parser syntax, additionally this method will limit results to those which are associated with a given activation key. returns: list of dict, one per matched package parameters: rhn - authenticated rhnapi.rhnSession() object query(str) - query string actkey(Str) - activation key (hex id) """ try: return rhn.session.packages.search.advancedWithActKey(rhn.key, query, actkey) except Exception, E: return rhn.fail(E, 'find packages in activation key "%s" using query "%s"' %(actkey, query)) # ---------------------------------------------------------------------------- # def searchAdvancedWithActivationKey(rhn, query, actkey): """ wrapper around searchActivationKey """ return searchActivationKey(rhn, query, actkey) # ---------------------------------------------------------------------------- # def searchChannel(rhn, query, chanlabel): """ API: packages.search.advancedWithChannel usage: searchChannel(rhn, query, chanlabel) description: Advanced method to search lucene indexes with a passed in query written in Lucene Query Parser syntax, additionally this method will limit results to those which are in the passed in channel label returns: list of dict, one per matched package parameters: rhn - authenticated rhnapi.rhnSession() object query(str) - query string chanlabel(str) - software channel label """ try: return rhn.session.packages.search.advancedWithChannel(rhn.key, query, chanlabel) except Exception, E: return rhn.fail(E, 'search for packages in channel "%s" using query "%s"' %(chanlabel, query)) # ---------------------------------------------------------------------------- # def searchAdvancedWithChannel(rhn, query, chanlabel): """ wrapper around searchChannel """ return searchChannel(rhn, query, chanlabel) # ---------------------------------------------------------------------------- # def searchName(rhn, pkgname): """ API: packages.search.name usage: searchName(rhn, pkgname) description: Search the lucene package indexes for all packages which match the given name returns: list of dict, one per matched package (or throws exception) parameters: rhn - authenticated rhnapi.rhnSession() object query(str) - query string pkgname(str) - name to search for """ try: return rhn.session.packages.search.name(rhn.key, pkgname) except Exception, E: return rhn.fail(E, 'search for packages matching name "%s"' % pkgname) # ---------------------------------------------------------------------------- # def searchNameAndDescription(rhn, query): """ API: packages.search.nameAndDescription usage: searchNameAndDescription(rhn, query) description: Search the lucene package indexes for all packages which match the given query in name or description returns: list of dict, one per matched package (or throws exception) parameters: rhn - authenticated rhnapi.rhnSession() object query(str) - string to search for """ try: return rhn.session.packages.search.nameAndDescription(rhn.key, query) except Exception, E: return rhn.fail(E, 'search packages names and descriptions using query "%s"' % query) # ---------------------------------------------------------------------------- # def searchNameAndSummary(rhn, query): """ API: packages.search.nameAndSummary usage: searchNameAndSummary(rhn, query) description: Search the lucene package indexes for all packages which match the given query in name or summary returns: list of dict, one per matched package (or throws exception) parameters: rhn - authenticated rhnapi.rhnSession() object query(str) - string to search for """ try: return rhn.session.packages.search.nameAndSummary(rhn.key, query) except Exception, E: return rhn.fail(E, 'search packages names and summaries using query "%s"' % query) # footer - do not edit below here # vim: set et ai smartindent ts=4 sts=4 sw=4 ft=python:
from fastapi import FastAPI from pydantic import BaseModel from fastapi.middleware.cors import CORSMiddleware app = FastAPI() # CORSミドルウェアを有効化して、異なるポートからのリクエストを許可 app.add_middleware( CORSMiddleware, allow_origins=["http://localhost:3000"], # ReactアプリケーションのURLに合わせて変更 allow_credentials=True, allow_methods=["*"], allow_headers=["*"], ) class CalculationRequest(BaseModel): x: float y: float operator: str @app.post("/calculate/") async def calculate(request: CalculationRequest): x = request.x y = request.y operator = request.operator if operator == "+": result = x + y elif operator == "-": result = x - y elif operator == "*": result = x * y elif operator == "/": if y == 0: return {"error": "ゼロで割ることはできません"} result = x / y else: return {"error": "無効な演算子"} return {"result": result}
<template> <div class="bottom-bar"> <div class="check-content"> <check-bottom class="check-button" :is-checked="isSelectAll" @click="selectAll" ></check-bottom> <span>全选</span> </div> <div class="price">合计:{{ totalPrice }}</div> <div class="calc">去支付({{ totalCount }})</div> </div> </template> <script> import CheckBottom from "components/content/tickButton/TickButton.vue"; import { mapGetters } from "vuex"; export default { name: "CartBottomBar", components: { CheckBottom, }, computed: { ...mapGetters(["cartList"]), totalPrice() { return ( "¥" + this.cartList .filter((item) => { return item.checked; }) .reduce((preValue, item) => { return preValue + item.price * item.count; }, 0) .toFixed(2) ); //0是reduce函数的初始值 }, totalCount() { return this.cartList .filter((item) => { return item.checked; }) .reduce((preValue, item) => { return preValue + item.count; }, 0); }, isSelectAll() { return this.cartList.length ? this.cartList.every((item) => item.checked) : false; }, }, methods: { selectAll() { // this.isSelectAll = !this.isSelectAll; isSelectAll is readonly!!! if (this.isSelectAll) { this.cartList.forEach((element) => { element.checked = false; // 点击后要取反 }); } else { this.cartList.forEach((item) => (item.checked = true)); } }, }, }; </script> <style scoped> .bottom-bar { position: relative; display: flex; height: 40px; line-height: 40px; background-color: #eee; } .check-content { display: flex; align-items: center; margin-left: 10px; } .check-button { width: 20px; height: 20px; line-height: 20px; margin-right: 5px; } .calc { margin-left: auto; text-align: center; background-color: darkorange; color: #eee; width: 80px; } </style>
import {useEffect, useRef} from "react"; import {editBlockPositionHandler, editBlockSizeHandler} from "../stateManager/stateManagerFunctions"; export function useDragAndDrop(slideIndex: number, blockIndex: number, id: string, coordX: number, coordY: number, type: string) { const isClicked = useRef<boolean>(false); const coords = useRef<{ startX: number, startY: number, lastX: number, lastY: number }>({ startX: coordX, startY: coordY, lastX: coordX, lastY: coordY }) useEffect(() => { const target = document.getElementById(id); if (!target) throw new Error("Элемента с заданным ID не существует!"); const container = target.parentElement; if (!container) throw new Error("У элемента отсутствует родительский элемент!"); const onMouseDown = (e: MouseEvent) => { isClicked.current = true; coords.current.startX = e.clientX; coords.current.startY = e.clientY; let height; let width; if (type === 'text') { height = target.offsetHeight - 4; width = target.offsetWidth - 4; } else { height = target.offsetHeight - 12; width = target.offsetWidth - 12; } editBlockSizeHandler(slideIndex, blockIndex, width, height); } const onMouseUp = (e: MouseEvent) => { isClicked.current = false; coords.current.lastX = target.offsetLeft; coords.current.lastY = target.offsetTop; editBlockPositionHandler(slideIndex, blockIndex, coords.current.lastX, coords.current.lastY); } const onMouseMove = (e: MouseEvent) => { if (!isClicked.current) return; const nextX = e.clientX - coords.current.startX + coords.current.lastX; const nextY = e.clientY - coords.current.startY + coords.current.lastY; target.style.top = `${nextY}px`; target.style.left = `${nextX}px`; } target.addEventListener('mousedown', onMouseDown); target.addEventListener('mouseup', onMouseUp); container.addEventListener('mousemove', onMouseMove); container.addEventListener('mouseleave', onMouseUp); return () => { target.removeEventListener('mousedown', onMouseDown); target.removeEventListener('mouseup', onMouseUp); container.removeEventListener('mousemove', onMouseMove); container.removeEventListener('mouseleave', onMouseUp); }; }, [id, slideIndex, blockIndex, type]) } export default useDragAndDrop;
// Exercise: channel // Use print wherever necessary /* 1. In continuation of the search files example from goroutines. You need to search files on a disk. The function fileSearch() is already given to search for the files. It sleeps for 1 sec if the search is successful. You should remove all the sleep() and instead introduce a channel to read/write the search result from main(). The search result must include "FOUND"/"NOT FOUND" + path of the file. Also adjust the print statements accordingly by moving it to main(). */ package main import ( "fmt" "io/ioutil" "path/filepath" "time" ) func fileSearch(dir string, fileName string, ch chan<- string ) { fmt.Println("[SEARCHING] ", dir) files, err := ioutil.ReadDir(dir) if err != nil { fmt.Println("error:", err) return } for _, file := range files { if file.Name() == fileName { ch <- "[FOUND]" + filepath.Join(dir, file.Name()) return } } ch <- "[NOT FOUND]" + dir } func main() { start := time.Now() defer func () { fmt.Println(time.Since(start)) }() ch := make(chan string) go fileSearch("../9_channels", "demo1.go", ch) // found fmt.Println(<-ch) go fileSearch("../8_goroutines", "demo1.go", ch) // not found fmt.Println(<-ch) }
import * as styled from "styled-components"; export const colors = { darkBlue: "hsl(233, 26%, 24%)", transparentDarkBlue: "hsl(233, 26%, 24%, 0.7)", limeGreen: "hsl(136, 65%, 51%)", brightCyan: "hsl(192, 70%, 51%)", grayishBlue: "hsl(233, 8%, 62%)", lightGrayishBlue: "hsl(220, 16%, 96%)", veryLightGray: "hsl(0, 0%, 98%)", transparentVeryLightGray: "hsl(0, 0%, 98%, 0.3)", white: "hsl(0, 0%, 100%)", }; export const weight = { large: "700", medium: "400", small: "300", }; export const breakpoints = { hg: "1440px", xlg: "1300px", lg: "1170px", md: "969px", sm: "768px", xsm: "450px", }; export const GlobalStyles = styled.createGlobalStyle` * { margin: 0; padding: 0; box-sizing: border-box; -webkit-font-smooothing: antialiased; -moz-osx-font-smoothing: grayscale; } body, html { font-family: "Public Sans", sans-serif; font-size: 62.5%; background-color: ${colors.white}; scroll-behavior: smooth; } body { overflow-x: hidden; max-width: 1440px; margin: auto; } a { color: inherit; text-decoration: none; } p, span { color: ${colors.grayishBlue}; font-size: 1.5rem; line-height: 28px; } p { } h1, h2, h3, h4, h5, h6 { color: ${colors.darkBlue}; } h2 { font-size: 4rem; font-weight: ${weight.medium}; } h3 { font-size: 3rem; font-weight: ${weight.medium}; } h4 { font-size: 1.8rem; font-weight: ${weight.medium}; } `;
// Copyright (c) GREE, Inc. // // This source code is licensed under the MIT license found in the // LICENSE file in the root directory of this source tree. import 'package:flutter/material.dart'; import 'package:flutter/widgets.dart'; import 'package:flutter_test/flutter_test.dart'; import 'package:patapata_core/patapata_core.dart'; import 'package:patapata_core/patapata_core_libs.dart'; import 'package:patapata_core/patapata_widgets.dart'; import 'package:patapata_core/src/exception.dart'; import 'pages/home_page.dart'; import 'utils/patapata_core_test_utils.dart'; class _Environment { const _Environment(); } class _I18nEnvironment with I18nEnvironment { const _I18nEnvironment(this._l10nPaths, this._supportedL10ns); final List<String>? _l10nPaths; @override List<String>? get l10nPaths => _l10nPaths; final List<Locale>? _supportedL10ns; @override List<Locale>? get supportedL10ns => _supportedL10ns; } void main() { group('class L10n', () { setUp(() { testInitialize(); }); test( 'Create L10n class.', () async { const tL10n = L10n( Locale('en'), { 'aaa': 'Test', }, ); expect(tL10n.locale, equals(const Locale('en'))); expect(tL10n.containsMessageKey('aaa'), isTrue); expect(tL10n.lookup('aaa'), equals('Test')); expect(tL10n.containsMessageKey('bbb'), isFalse); expect(tL10n.lookup('bbb'), equals('bbb')); }, ); test( 'The resource is loaded from assets and the lookup works correctly.', () async { final tL10nEn = await L10n.fromAssets( locale: const Locale('en'), paths: ['l10n'], assetBundle: mockL10nAssetBundle, ); final tL10nJa = await L10n.fromAssets( locale: const Locale('ja'), paths: ['l10n'], assetBundle: mockL10nAssetBundle, ); // en expect(tL10nEn.locale, equals(const Locale('en'))); expect(tL10nEn.lookup('home.title'), equals('HomePage')); expect(tL10nEn.containsMessageKey('test.title'), isTrue); expect( tL10nEn.lookup( 'test.title', namedParameters: {'param': 'en'}, ), equals('TestMessage:en'), ); expect(tL10nEn.containsMessageKey('test2.title'), isFalse); expect( tL10nEn.lookup( 'test2.title', namedParameters: {'param': 'en'}, ), equals('test2.title'), ); // ja expect(tL10nJa.locale, equals(const Locale('ja'))); expect(tL10nJa.lookup('home.title'), equals('ホーム')); expect(tL10nJa.containsMessageKey('test.title'), isTrue); expect( tL10nJa.lookup( 'test.title', namedParameters: {'param': 'ja'}, ), equals('テストメッセージ:ja'), ); expect(tL10nJa.containsMessageKey('test2.title'), isFalse); expect( tL10nJa.lookup( 'test2.title', namedParameters: {'param': 'ja'}, ), equals('test2.title'), ); }, ); test( 'If multiple paths are specified, the same key will be overwritten by subsequent files.', () async { final tL10nEn = await L10n.fromAssets( locale: const Locale('en'), paths: ['l10n', 'l10n2'], assetBundle: mockL10nAssetBundle, ); final tL10nJa = await L10n.fromAssets( locale: const Locale('ja'), paths: ['l10n', 'l10n2'], assetBundle: mockL10nAssetBundle, ); // en expect(tL10nEn.lookup('home.title'), equals('HomePage2')); expect(tL10nEn.containsMessageKey('test.title'), isTrue); expect( tL10nEn.lookup( 'test.title', namedParameters: {'param': 'en'}, ), equals('TestMessage:en'), ); expect(tL10nEn.containsMessageKey('test2.title'), isTrue); expect( tL10nEn.lookup( 'test2.title', namedParameters: {'param': 'en'}, ), equals('TestMessage2:en'), ); // ja expect(tL10nJa.lookup('home.title'), equals('ホーム2')); expect(tL10nJa.containsMessageKey('test.title'), isTrue); expect( tL10nJa.lookup( 'test.title', namedParameters: {'param': 'ja'}, ), equals('テストメッセージ:ja'), ); expect(tL10nJa.containsMessageKey('test2.title'), isTrue); expect( tL10nJa.lookup( 'test2.title', namedParameters: {'param': 'ja'}, ), equals('テストメッセージ2:ja'), ); }, ); test( 'Load error in assets. The `patapata_dummy_never` is set as dummy.', () async { // The path is invalid and will not be loaded. final tL10nNotFound = await L10n.fromAssets( locale: const Locale('en'), paths: ['aaa'], assetBundle: mockL10nAssetBundle, ); // Languages not supported by the app are not loaded. final tL10nAr = await L10n.fromAssets( locale: const Locale('ar'), paths: ['l10n'], assetBundle: mockL10nAssetBundle, ); // Since [assetBundle] is not specified, the assets is loaded from [rootBundle], // but in the test environment, the actual assets does not exist. final tL10nEn = await L10n.fromAssets( locale: const Locale('en'), paths: ['l10n'], ); expect(tL10nNotFound.containsMessageKey('home.title'), isFalse); expect(tL10nNotFound.lookup('patapata_dummy_never'), 'dummy'); expect(tL10nAr.containsMessageKey('home.title'), isFalse); expect(tL10nAr.lookup('patapata_dummy_never'), 'dummy'); expect(tL10nEn.containsMessageKey('home.title'), isFalse); expect(tL10nEn.lookup('patapata_dummy_never'), 'dummy'); }, ); test( 'Parse error in yaml.', () async { Object? tException; final tLogSubscription = Logger.root.onRecord.listen((LogRecord record) { tException = record.error; }); final tL10nError = await L10n.fromAssets( locale: const Locale('en'), paths: ['parse_error'], assetBundle: mockL10nAssetBundle, ); expect(tL10nError.containsMessageKey('home.title'), isFalse); expect(tL10nError.containsMessageKey('patapata_dummy_never'), isFalse); expect(tException, isA<L10nLoadAssetsException>()); expect((tException as L10nLoadAssetsException).code, equals(PatapataCoreExceptionCode.PPE401.name)); // Empty yaml file tException = null; final tL10nEmpty = await L10n.fromAssets( locale: const Locale('en'), paths: ['empty'], assetBundle: mockL10nAssetBundle, ); expect(tL10nEmpty.containsMessageKey('home.title'), isFalse); expect(tL10nEmpty.containsMessageKey('patapata_dummy_never'), isFalse); expect(tException, isA<L10nLoadAssetsException>()); expect((tException as L10nLoadAssetsException).code, equals(PatapataCoreExceptionCode.PPE401.name)); tLogSubscription.cancel(); }, ); }); group('Initialize I18nPlugin and load assets.', () { test( "If I18nEnvironment is not specified, Locale('en') is read from l10n as default.", () async { final tApp = createApp(environment: const _Environment()); final tI18nPlugin = I18nPlugin(); const tLocale = Locale('en'); final bool tResult = await tI18nPlugin.init(tApp); expect(tResult, isTrue); expect(tI18nPlugin.i18n.supportedL10ns, [tLocale]); expect(tI18nPlugin.i18n.delegate.isSupported(tLocale), isTrue); expect( tI18nPlugin.i18n.delegate.isSupported(const Locale('ja')), isFalse); expect(tI18nPlugin.i18n.delegate.l10n, isNull); await tI18nPlugin.i18n.delegate.load(tLocale); expect(tI18nPlugin.i18n.delegate.l10n, isNotNull); expect(tI18nPlugin.i18n.delegate.l10n!.containsMessageKey('test.title'), isTrue); }, ); test( 'If I18nEnvironment.l10nPaths is not set, `l10n` will be the default.', () async { final tApp = createApp( environment: const _I18nEnvironment(null, [Locale('en')])); final tI18nPlugin = I18nPlugin(); const tLocale = Locale('en'); final bool tResult = await tI18nPlugin.init(tApp); expect(tResult, isTrue); await tI18nPlugin.i18n.delegate.load(tLocale); expect(tI18nPlugin.i18n.delegate.l10n!.containsMessageKey('test.title'), isTrue); }, ); test( 'If I18nEnvironment.supportedL10ns is required.', () async { final tApp = createApp(environment: const _I18nEnvironment(null, null)); final tI18nPlugin = I18nPlugin(); final bool tResult = await tI18nPlugin.init(tApp); expect(tResult, isFalse); }, ); }); group( 'Widget tests', () { late App tApp; setUp(() async { tApp = createApp( appWidget: StandardMaterialApp( onGenerateTitle: (context) => 'Test Title', pages: [ StandardPageFactory<HomePage, void>( create: (data) => HomePage(), ), ], ), ); }); testWidgets( 'l function process correctly.', (WidgetTester tester) async { tApp.run(); await tApp.runProcess( () async { await tester.pumpAndSettle(); expect( l(StandardMaterialApp.globalNavigatorContext!, 'home.title'), equals('HomePage'), ); expect( l( StandardMaterialApp.globalNavigatorContext!, 'test.title', { 'param': 'test', }, ), equals('TestMessage:test'), ); }, ); tApp.dispose(); }, ); testWidgets( 'L10n.containsKey process correctly.', (WidgetTester tester) async { tApp.run(); await tApp.runProcess( () async { await tester.pumpAndSettle(); expect( L10n.containsKey( context: StandardMaterialApp.globalNavigatorContext!, key: 'home.title', ), isTrue, ); expect( L10n.containsKey( context: StandardMaterialApp.globalNavigatorContext!, key: 'test2.title', ), isFalse, ); }, ); tApp.dispose(); }, ); }, ); }
import React, { useEffect, useState } from "react"; import { Col, Container, Row } from "react-bootstrap"; import { api } from "../../services/api"; import { ToastContainer, toast } from "react-toastify"; import "react-toastify/dist/ReactToastify.css"; const CadProduto = () => { const urlParams = new URLSearchParams(window.location.search); const id_produto = urlParams.get("id_produto"); const paramNome = urlParams.get("nome"); const paramPreco = +urlParams.get("preco"); const paramMedida = urlParams.get("medida"); const paramTipo = urlParams.get("tipo"); const [nome, setNome] = useState(id_produto ? paramNome : ""); const [preco, setPreco] = useState(id_produto ? paramPreco : 0); const [medida, setMedida] = useState(id_produto ? paramMedida : "Unidade"); const [tipo, setTipo] = useState(id_produto ? paramTipo : "Comida"); // Manipulador de evento para atualizar o estado da descrição quando o usuário alterar o valor do input const handleNomeChange = (event) => { setNome(event.target.value); }; const handlePrecoChange = (event) => { setPreco(event.target.value); }; const handleMedidaChange = (event) => { setMedida(event.target.value); }; const handleTipoChange = (event) => { setTipo(event.target.value) }; // Função que cria um novo produto const novoProduto = async (nome, preco, medida, tipo) => { console.log(nome, preco, medida, tipo) const produtoEncontrado = produtos.find( produto => produto.nome === nome ) if (produtoEncontrado) { toast.error("Já tem um item com este nome!", { position: toast.POSITION.TOP_CENTER, }); return; } if (!nome || !preco || !medida || !tipo ) { toast.error("Todos os campos devem estar preenchidos!", { position: toast.POSITION.TOP_CENTER, }); return; } if (isNaN(preco) && (preco > 0)) { toast.error("O preço deve ser um número maior que 0!", { position: toast.POSITION.TOP_CENTER, }); return; } try { const res = await api.post("/produtos", { nome, preco, medida, tipo, ativo: true, }); toast.success(`${res.data} salvo com sucesso`, { position: toast.POSITION.TOP_CENTER, }); return res.data (window.location.href = "/cadastros/produtos"); } catch (error) { toast.error(error); } }; // Função que altera um produto existente const alteraProduto = async ( id_produto, nome, preco, medida, tipo, ) => { const produtoEncontrado = produtos.find( produto => produto.nome.toLowerCase() === nome.toLowerCase() && produto.id_produto === id_produto ); if (produtoEncontrado) { toast.error("Já tem um item com este nome!", { position: toast.POSITION.TOP_CENTER, }); return; } if (!nome || !preco || !medida || !tipo ) { toast.error("Todos os campos devem estar preenchidos!", { position: toast.POSITION.TOP_CENTER, }); return; } if (isNaN(preco)) { toast.error("O preço deve ser um número!", { position: toast.POSITION.TOP_CENTER, }); return; } try { const ativo = 1 console.log( id_produto, nome, preco, medida, tipo, ativo) const res = await api.put(`/produtos/${id_produto}`, { id_produto, nome, preco, medida, tipo, ativo, }); toast.success(`${nome} alterado com sucesso`, { position: toast.POSITION.TOP_CENTER, }); return res.data (window.location.href = "/cadastros/produtos"); } catch (error) { toast.error(error); } }; // Este trecho busca os produtos no BD e seta os valores na const produtos const [produtos, setProdutos] = useState([]); const getProdutos = async () => { try { const res = await api.get("/produtos"); setProdutos( res.data.sort((a, b) => (a.id_produto > b.id_produto ? 1 : -1)) ); // console.log(produtos) } catch (error) { toast.error(error); } }; useEffect(() => { getProdutos(); }, [setProdutos]); // fim do trecho return ( <Container fluid="true"> <ToastContainer /> <Row> <div className="title">Cadastro de Produto</div> </Row> <br /> <Row> <Col> <div>Digite o nome do produto:</div> <input className="nomeProduto" type="text" placeholder="Nome do produto" onChange={handleNomeChange} value={nome} /> </Col> </Row> <br /> <Row> <Col> <div>Digite o valor do produto:</div> <input className="valorProduto" type="number" placeholder="Valor do produto" onChange={handlePrecoChange} value={preco} /> </Col> </Row> <br /> <Row> <Col> <div>Selecione a unidade de medida:</div> <select onChange={handleMedidaChange} value={medida}> <option defaultValue="Unidade" value="Unidade"> Unidade </option> <option value="Quilograma">Quilograma</option> </select> </Col> </Row> <br /> <Row> <Col> <div>Selecione o tipo:</div> <select onChange={handleTipoChange} value={tipo}> <option defaultValue="Comida" value="Comida"> Comida </option> <option value="Bebida">Bebida</option> </select> </Col> </Row> <br /> <Row> <Col> {!id_produto && ( <button className="botao" onClick={() => { novoProduto(nome, preco, medida, tipo); // console.log('novo') }} > Salvar </button> )} {id_produto && ( <button className="botao" onClick={() => { alteraProduto(id_produto, nome, preco, medida, tipo); console.log('editado',id_produto, nome, preco, medida, tipo) }} > Salvar </button> )} <button className="botao" onClick={() => (window.location.href = "/cadastros/produtos")} > Voltar </button> </Col> </Row> </Container> ); }; export default CadProduto;
#!/usr/bin/env dart import 'dart:io'; import 'package:melos/melos.dart'; import 'package:path/path.dart' as path; class Package { final String name; final String version; final String location; Package({ required this.name, required this.version, required this.location, }); factory Package.fromMelo(Map map) { return Package( name: map['name'], version: map['version'], location: map['location'], ); } static List<Package> fromMeloList(List list) { return list.map((e) => Package.fromMelo(e)).toList(); } static List<Package> fromMeloworkspace(MelosWorkspace space) { final result = <Package>[]; for (final pkg in space.filteredPackages.values) { result.add(Package( name: pkg.name, version: pkg.version.toString(), location: pkg.path, )); } return result; } String get relativePath => path.relative(location, from: Directory.current.path); String toMarkdown() { final link = '[$relativePath]($relativePath)'; return '| $name | $version | $link | [$name][$pubLinkName] |'; } String get pubLinkName { return '${name}_pub'; } } String links(Iterable<Package> pkgList) { final sb = StringBuffer(); for (final pkg in pkgList) { sb.writeln('[${pkg.pubLinkName}]: https://pub.dev/packages/${pkg.name}'); } return sb.toString(); } Future<void> main(List<String> args) async { // print('pwd: $pwd'); // Get the version list final config = await MelosWorkspaceConfig.fromWorkspaceRoot(Directory.current); final melos = Melos(config: config); final space = await melos.createWorkspace(); final pkgList = Package.fromMeloworkspace(space) // .where((element) => element.name != 'photo_manager') ; final pkgListMd = ''' | Package | Version | Location | pub.dev | | ------- | ------- | -------- | ------- | ${pkgList.map((e) => e.toMarkdown()).join('\n')} ''' .trim(); // print(pkgListMd); final template = File('README.template.md').readAsStringSync(); final readme = template .replaceFirst('<!-- PKG_LIST -->', pkgListMd) .replaceFirst('<!-- Links -->', links(pkgList)); File('README.md').writeAsStringSync(readme); }
import React, { useState } from 'react'; import TextField from '@mui/material/TextField'; import Button from '@mui/material/Button'; import styles from '../styles/CompanyBox.module.css'; const TransferFrom = ({ web3, account, contract }) => { let [sender, setSender] = useState(""); let [receiver, setReceiver] = useState(""); let [amount, setAmount] = useState(""); const handleSetSender = (e) => { setSender(e.target.value); }; const handleSetReceiver = (e) => { setReceiver(e.target.value); }; const handleSetAmount = (e) => { setAmount(e.target.value); }; const handleSubmit = async () => { if (amount <= 0 || sender == "" || receiver == "") return; try { amount = web3.utils.toWei(amount, 'ether'); await contract.methods.transferFrom(sender, receiver, amount).send({ from: account }); } catch (err) { console.error(err); } }; return ( <div className={styles.container}> <h1>Transfer From</h1> <TextField id="outlined-multiline-static" label="From account" variant="filled" value={sender} onChange={handleSetSender} className={styles.customTextField} /> <TextField id="outlined-multiline-static" label="To account" variant="filled" value={receiver} onChange={handleSetReceiver} className={styles.customTextField} sx = {{ mt: 1, }} /> <TextField id="filled-number" label="Amount" type="number" InputLabelProps={{ shrink: true, }} variant="filled" value={amount} onChange={handleSetAmount} className={styles.customTextField} sx = {{ mt: 1, }} /> <div className={styles.flexButton}> <Button variant="contained" onClick={handleSubmit} className={styles.button} sx={{ width: '100%', mt: 1, }}> transfer </Button> </div> </div> ); }; export default TransferFrom;
<?php namespace App\Http\Requests; use Illuminate\Foundation\Http\FormRequest; use Illuminate\Contracts\Validation\Validator; use Illuminate\Http\Exceptions\HttpResponseException; class UpdateServiceOrderRequest extends FormRequest { public function authorize() { return true; } public function rules() { return [ 'vehiclePlate' => 'string|size:7', 'price' => 'numeric|min:0' ]; } public function failedValidation(Validator $validator) { throw new HttpResponseException(response()->json([ 'errors' => $validator->errors() ])); } }
<!DOCTYPE html> <html lang="ru"> <head> <meta charset="utf-8"> <meta name="viewport" content="width=device-width, initial-scale=1"> <title>HTML Academy: Седона. Фото и видео</title> <link rel="preload" href="fonts/ptsans.woff2" as="font" type="font/woff2" crossorigin> <link rel="stylesheet" href="css/style.min.css"> <link rel="icon" href="favicon.ico"> <link rel="icon" href="img/favicons/icon.svg" type="image/svg+xml"> <link rel="apple-touch-icon" href="img/favicons/apple.png"> <link rel="manifest" href="manifest.webmanifest"> </head> <body> <div class="page-container"> <header class="main-header"> <a class="main-header__logo logo logo--none" href="index.html"> <img class="main-header__logo-image" src="img/logotype-phone.svg" width="185" height="96" alt="Логотип сайта Седона"> </a> <nav class="main-nav main-nav--opened main-nav--nojs"> <button class="main-nav__toggle" type="button"><span class="visually-hidden">Открыть меню</span></button> <div class="main-nav__wrapper"> <ul class="main-nav__list site-list"> <li class="site-list__item"> <a href="index.html">Главная</a> </li> <li class="site-list__item site-list__item--active"> <a>Фото и видео</a> </li> <li class="site-list__item"> <a href="form.html"> Форма отзыва</a> </li> <li class="site-list__item"> <a href="#">HTML Academy</a> </li> </ul> </div> </nav> </header> <main class="main-container"> <h1 class="visually-hidden">Sedona</h1> <section class="gallery-intro"> <div class="gallery-intro__image-wrapper"> <picture> <source media="(min-width: 1200px)" srcset="img/gallery/intro-sedona-desktop.svg" width="460" height="80"> <source media="(min-width: 768px)" srcset="img/gallery/intro-sedona-tablet.svg" width="457" height="78"> <img class="gallery-intro__image" src="img/gallery/intro-sedona-mobile.svg" width="229" height="39" alt="Седона"> </picture> </div> <div class="gallery-intro__text-wrapper"> <h2 class="gallery-intro__title">Фото и видео</h2> <p class="gallery-intro__description">Не можете решиться на путешествие из-за курса?<br> Фотографии помогут вам забыть о политике и экономике.</p> </div> </section> <section class="photo-gallery"> <h2 class="visually-hidden">Фото</h2> <ul class="photo-gallery__list"> <li class="photo-gallery__item"> <a class="photo-gallery__link" href="#"> <picture> <source media="(min-width: 1200px)" srcset="img/gallery/scope-photo-desktop.jpg 1x, img/gallery/[email protected] 2x" width="1200" height="400" > <source media="(min-width: 768px)" srcset="img/gallery/scope-photo-tablet.jpg 1x, img/gallery/[email protected] 2x" width="648" height="400" > <img class="photo-gallery__image" src="img/gallery/scope-photo-mobile.jpg" width="280" height="280" srcset="img/gallery/[email protected]" alt="Трасса, горы, закат"> </picture> </a> <div class="photo-gallery__content photo-gallery__content--first"> <p class="photo-gallery__image-title">Неродные просторы</p> <p class="photo-gallery__image-author">Автор фото: Борис</p> </div> <button class="photo-gallery__button-thumbs-up" type="button">1350</button> </li> <li class="photo-gallery__item"> <a class="photo-gallery__link" href="#"> <picture> <source media="(min-width: 1200px)" srcset="img/gallery/flora-photo-desktop.jpg 1x, img/gallery/[email protected] 2x" width="400" height="267" > <source media="(min-width: 768px)" srcset="img/gallery/flora-photo-tablet.jpg 1x, img/gallery/[email protected] 2x" width="324" height="267" > <img class="photo-gallery__image" src="img/gallery/flora-photo-mobile.jpg" width="280" height="280" srcset="img/gallery/[email protected]" alt="Кактус на фоне гор"> </picture> </a> <div class="photo-gallery__content"> <p class="photo-gallery__image-title">Местная растительность</p> <p class="photo-gallery__image-author">Автор фото: Сергей</p> </div> <button class="photo-gallery__button-thumbs-up" type="button">143</button> </li> <li class="photo-gallery__item"> <a class="photo-gallery__link" href="#"> <picture> <source media="(min-width: 1200px)" srcset="img/gallery/road-photo-desktop.jpg 1x, img/gallery/[email protected] 2x" width="400" height="267" > <source media="(min-width: 768px)" srcset="img/gallery/road-photo-tablet.jpg 1x, img/gallery/[email protected] 2x" width="324" height="267" > <img class="photo-gallery__image" src="img/gallery/road-photo-mobile.jpg" width="280" height="280" srcset="img/gallery/[email protected]" alt="Дорога в горах"> </picture> </a> <div class="photo-gallery__content"> <p class="photo-gallery__image-title">Дорога на север</p> <p class="photo-gallery__image-author">Автор фото: Петр</p> </div> <button class="photo-gallery__button-thumbs-up" type="button">96</button> </li> <li class="photo-gallery__item"> <a class="photo-gallery__link" href="#"> <picture> <source media="(min-width: 1200px)" srcset="img/gallery/bridge-photo-desktop.jpg 1x, img/gallery/[email protected] 2x" width="400" height="267"> <source media="(min-width: 768px)" srcset="img/gallery/bridge-photo-tablet.jpg 1x, img/gallery/[email protected] 2x" width="324" height="267" > <img class="photo-gallery__image" src="img/gallery/bridge-photo-mobile.jpg" width="280" height="280" srcset="img/gallery/[email protected]" alt="Мост из скалы"> </picture> </a> <div class="photo-gallery__content"> <p class="photo-gallery__image-title">Мост дьявола</p> <p class="photo-gallery__image-author">Автор фото: Антон</p> </div> <button class="photo-gallery__button-thumbs-up" type="button">254</button> </li> </ul> </section> <section class="video-gallery"> <h2 class="visually-hidden">Видео</h2> <div class="video-gallery__text-wrap"> <p class="video-gallery__title">Все еще сомневаетесь?</p> <p class="video-gallery__description">Смотрите видеопрезентацию и скорее за билетами, пока они не подорожали в очередной раз!</p> </div> <div class="video-gallery__content"> <picture> <source media="(min-width: 1200px)" srcset="img/gallery/video-desktop.jpg 1x, img/gallery/[email protected] 2x" width="800" height="500"> <source media="(min-width: 768px)" srcset="img/gallery/video-tablet.jpg 1x, img/gallery/[email protected] 2x" width="648" height="500" > <img class="video-gallery__video" src="img/gallery/video-mobile.jpg" width="280" height="177" srcset="img/gallery/[email protected]" alt="Видео"> </picture> <button class="video-gallery__button" type="button"></button> </div> </section> </main> <footer class="main-footer"> <div class="main-footer__wrapper"> <div class="main-footer__hashtag hashtag"> <p class="hashtag__text">#visitSEDONA</p> </div> </div> <div class="main-footer__social social"> <ul class="social__list"> <li class="social__item"> <a class="social__link" href="https://vk.com/htmlacademy"> <svg class="social__vkontakte-icon" width="24" height="14" aria-hidden="true" focusable="false"> <use href="img/sprite.svg#icon-vk"></use> </svg> <span class="visually-hidden">Изображение логотипа Вконтакте</span> </a> </li> <li class="social__item"> <a class="social__link" href="https://t.me/htmlacademy" aria-hidden="true"> <svg class="social__telegram-icon" aria-hidden="true" focusable="false" width="22" height="18" viewBox="0 0 22 18" fill="none" xmlns="http://www.w3.org/2000/svg"> <use href="img/sprite.svg#icon-telegram"></use> </svg> <span class="visually-hidden">Изображение логотипа Телеграмм</span> </a> </li> <li class="social__item"> <a class="social__link" href="https://www.youtube.com/user/htmlacademyru"> <svg class="social__youtube-icon" aria-hidden="true" focusable="false" width="20" height="14"> <use href="img/sprite.svg#icon-youtube"></use> </svg> <span class="visually-hidden">Изображение логотипа Youtube</span> </a> </li> </ul> </div> <div class="main-footer__copyright copyright"> <p class="copyright__text">Разработано</p> <a class="copyright__link" href="https://htmlacademy.ru/intensive/adaptive"> <svg class="copyright__icon" aria-hidden="true" focusable="false" width="27" height="35"> <use href="img/sprite.svg#icon-academy"></use> </svg> <span class="visually-hidden">Изображение логотипа HTML-Academy</span> </a> </div> </footer> </div> <script src="js/script.js"></script> </body> </html>
use std::cell::RefCell; use std::sync::RwLock; use std::time::Duration; use leaky_bucket::RateLimiter; use rand::prelude::*; use tokio::time; use crate::clock::Clock; pub struct TestClient<'a> { clock: &'a Clock, limiter: RateLimiter, rng: RwLock<ThreadRng>, } impl<'a> TestClient<'a> { pub fn new(clock: &'a Clock) -> Self { let interval = Duration::from_micros(8000); let limiter = RateLimiter::builder() // .max(120) .interval(interval) // .initial(60) .refill(1) .build() ; let rng = RwLock::new(thread_rng()); Self { clock, limiter, rng, } } pub async fn get(&self) -> u64 { self.limiter.acquire_one().await; self.clock.req(); let micros = self.rng.write().unwrap().gen_range(2000..3000); time::sleep(Duration::from_micros(micros)).await; micros } }
//! # Nest CFGs //! //! Identify Single-Entry-Single-Exit (SESE) regions in the CFG. //! These are pairs of edges (a,b) where //! * a dominates b //! * b postdominates a //! * there are no other edges in/out of the nodes inbetween //! (this last condition is necessary because loop backedges do not affect (post)dominance). //! //! # Algorithm //! See paper: <https://doi.org/10.1145/178243.178258>, approximately: //! 1. those three conditions are equivalent to: //! *a and b are cycle-equivalent in the CFG with an extra edge from the exit node to the entry* //! where cycle-equivalent means every cycle has either both a and b, or neither //! 2. cycle equivalence is unaffected if all edges are considered *un*directed //! (not obvious, see paper for proof) //! 3. take undirected CFG, perform depth-first traversal //! => all edges are either *tree edges*, or *backedges* where one endpoint is an ancestor of the other //! 4. identify the "bracketlist" of each tree edge - the set of backedges going from a descendant of that edge to an ancestor //! -- post-order traversal, merging bracketlists of children, //! then delete backedges from below to here, add backedges from here to above //! => tree edges with the same bracketlist are cycle-equivalent; //! + a tree edge with a single-element bracketlist is cycle-equivalent with that single element //! 5. this would be expensive (comparing large sets of backedges) - so to optimize, //! - the backedge most recently added (at the top) of the bracketlist, plus the size of the bracketlist, //! is sufficient to identify the set *when the UDFS tree is linear*; //! - when UDFS is treelike, any ancestor with brackets from >1 subtree cannot be cycle-equivalent with any descendant //! (as the brackets of said descendant come from beneath it to its ancestors, not from any sibling/etc. in the other subtree). //! So, add (onto top of bracketlist) a fake "capping" backedge from here to the highest ancestor reached by >1 subtree. //! (Thus, edges from here up to that ancestor, cannot be cycle-equivalent with any edges elsewhere.) //! //! # Restrictions //! * The paper assumes that all CFG nodes are on paths from entry to exit, i.e. no loops without exits. //! HUGR assumes only that they are all reachable from entry, so we do a backward traversal from exit node //! first and restrict to the CFG nodes in the reachable set. (This means we will not discover SESE regions //! in exit-free loops, but that doesn't seem a major concern.) //! * Multiple edges in the same direction between the same BBs will "confuse" the algorithm in the paper. //! However it is straightforward for us to treat successors and predecessors as sets. (Two edges between //! the same BBs but in opposite directions must be distinct!) use std::collections::{HashMap, HashSet, LinkedList, VecDeque}; use std::hash::Hash; use itertools::Itertools; use thiserror::Error; use hugr_core::hugr::rewrite::outline_cfg::OutlineCfg; use hugr_core::hugr::views::sibling::SiblingMut; use hugr_core::hugr::views::{HierarchyView, HugrView, SiblingGraph}; use hugr_core::hugr::{hugrmut::HugrMut, Rewrite, RootTagged}; use hugr_core::ops::handle::{BasicBlockID, CfgID}; use hugr_core::ops::OpTag; use hugr_core::ops::OpTrait; use hugr_core::{Direction, Hugr, Node}; /// A "view" of a CFG in a Hugr which allows basic blocks in the underlying CFG to be split into /// multiple blocks in the view (or merged together). /// `T` is the type of basic block; this can just be a BasicBlock (e.g. [`Node`]) in the Hugr, /// or an [IdentityCfgMap] if the extra level of indirection is not required. However, since /// SESE regions are bounded by edges between pairs of such `T`, such splitting may allow the /// algorithm to identify more regions than existed in the underlying CFG, without mutating the /// underlying CFG just for the analysis - the splitting (and/or merging) can then be performed by /// [CfgNester::nest_sese_region] only as necessary for regions actually nested. pub trait CfgNodeMap<T> { /// The unique entry node of the CFG. It may any n>=0 of incoming edges; we assume control arrives here from "outside". fn entry_node(&self) -> T; /// The unique exit node of the CFG. The only node to have no successors. fn exit_node(&self) -> T; /// Allows the trait implementor to define a type of iterator it will return from /// `successors` and `predecessors`. type Iterator<'c>: Iterator<Item = T> where Self: 'c; /// Returns an iterator over the successors of the specified basic block. fn successors(&self, node: T) -> Self::Iterator<'_>; /// Returns an iterator over the predecessors of the specified basic block. fn predecessors(&self, node: T) -> Self::Iterator<'_>; } /// Extension of [CfgNodeMap] to that can perform (mutable/destructive) /// nesting of regions detected. pub trait CfgNester<T>: CfgNodeMap<T> { /// Given an entry edge and exit edge defining a SESE region, mutates the /// Hugr such that all nodes between these edges are placed in a nested CFG. /// Returns the newly-constructed block (containing a nested CFG). /// /// # Panics /// May panic if the two edges do not constitute a SESE region. fn nest_sese_region(&mut self, entry_edge: (T, T), exit_edge: (T, T)) -> T; } /// Transforms a CFG into as much-nested a form as possible. pub fn transform_cfg_to_nested<T: Copy + Eq + Hash + std::fmt::Debug>( view: &mut impl CfgNester<T>, ) { let edge_classes = EdgeClassifier::get_edge_classes(view); let mut rem_edges: HashMap<usize, HashSet<(T, T)>> = HashMap::new(); for (e, cls) in edge_classes.iter() { rem_edges.entry(*cls).or_default().insert(*e); } // Traverse. Any traversal will encounter edges in SESE-respecting order. fn traverse<T: Copy + Eq + Hash + std::fmt::Debug>( view: &mut impl CfgNester<T>, n: T, edge_classes: &HashMap<(T, T), usize>, rem_edges: &mut HashMap<usize, HashSet<(T, T)>>, stop_at: Option<usize>, ) -> Option<(T, T)> { let mut seen = HashSet::new(); let mut stack = Vec::new(); let mut exit_edges = Vec::new(); stack.push(n); while let Some(n) = stack.pop() { if !seen.insert(n) { continue; } let (exit, rest): (Vec<_>, Vec<_>) = view .successors(n) .map(|s| (n, s)) .partition(|e| stop_at.is_some() && edge_classes.get(e).copied() == stop_at); exit_edges.extend(exit.into_iter().at_most_one().unwrap()); for mut e in rest { if let Some(cls) = edge_classes.get(&e) { assert!(rem_edges.get_mut(cls).unwrap().remove(&e)); // While there are more edges in that same class, we can traverse the entire // subregion between pairs of edges in that class in a single step // (as these are strictly nested in any outer region) while !rem_edges.get_mut(cls).unwrap().is_empty() { let prev_e = e; // Traverse to the next edge in the same class - we know it exists in the set e = traverse(view, e.1, edge_classes, rem_edges, Some(*cls)).unwrap(); assert!(rem_edges.get_mut(cls).unwrap().remove(&e)); // Skip trivial regions of a single node, unless the node has other edges // (non-exiting, but e.g. a backedge to a loop header, ending that loop) if prev_e.1 != e.0 || view.successors(e.0).count() > 1 { // Traversal and nesting of the subregion's *contents* were completed in the // recursive call above, so only processed nodes are moved into descendant CFGs e = (view.nest_sese_region(prev_e, e), e.1) }; } } stack.push(e.1); } } exit_edges.into_iter().unique().at_most_one().unwrap() } traverse(view, view.entry_node(), &edge_classes, &mut rem_edges, None); // TODO we should probably now try to merge consecutive basic blocks // (i.e. where a BB has a single successor, that has a single predecessor) // and thus convert CF dependencies into (parallelizable) dataflow. } /// Search the entire Hugr looking for CFGs, and transform each /// into as deeply-nested form as possible (as per [transform_cfg_to_nested]). /// This search may be expensive, although if it finds much/many CFGs, /// the analysis/transformation on them is likely to be more expensive still! pub fn transform_all_cfgs(h: &mut Hugr) { let mut node_stack = Vec::from([h.root()]); while let Some(n) = node_stack.pop() { if let Ok(s) = SiblingMut::<CfgID>::try_new(h, n) { transform_cfg_to_nested(&mut IdentityCfgMap::new(s)); } node_stack.extend(h.children(n)) } } /// Directed edges in a Cfg - i.e. along which control flows from first to second only. type CfgEdge<T> = (T, T); // The next enum + few functions allow to abstract over the edge directions // in a CfgView. #[derive(Copy, Clone, PartialEq, Eq, Hash)] enum EdgeDest<T> { Forward(T), Backward(T), } impl<T: Copy + Clone + PartialEq + Eq + Hash> EdgeDest<T> { pub fn target(&self) -> T { match self { EdgeDest::Forward(i) => *i, EdgeDest::Backward(i) => *i, } } } fn all_edges<'a, T: Copy + Clone + PartialEq + Eq + Hash + 'a>( cfg: &'a impl CfgNodeMap<T>, n: T, ) -> impl Iterator<Item = EdgeDest<T>> + '_ { let extra = if n == cfg.exit_node() { vec![cfg.entry_node()] } else { vec![] }; cfg.successors(n) .chain(extra) .map(EdgeDest::Forward) .chain(cfg.predecessors(n).map(EdgeDest::Backward)) .unique() } fn flip<T: Copy + Clone + PartialEq + Eq + Hash>(src: T, d: EdgeDest<T>) -> (T, EdgeDest<T>) { match d { EdgeDest::Forward(tgt) => (tgt, EdgeDest::Backward(src)), EdgeDest::Backward(tgt) => (tgt, EdgeDest::Forward(src)), } } fn cfg_edge<T: Copy + Clone + PartialEq + Eq + Hash>(s: T, d: EdgeDest<T>) -> CfgEdge<T> { match d { EdgeDest::Forward(t) => (s, t), EdgeDest::Backward(t) => (t, s), } } /// A straightforward view of a Cfg as it appears in a Hugr pub struct IdentityCfgMap<H> { h: H, entry: Node, exit: Node, } impl<H: RootTagged<RootHandle = CfgID>> IdentityCfgMap<H> { /// Creates an [IdentityCfgMap] for the specified CFG pub fn new(h: H) -> Self { // Panic if malformed enough not to have two children let (entry, exit) = h.children(h.root()).take(2).collect_tuple().unwrap(); debug_assert_eq!(h.get_optype(exit).tag(), OpTag::BasicBlockExit); Self { h, entry, exit } } } impl<H: HugrView> CfgNodeMap<Node> for IdentityCfgMap<H> { fn entry_node(&self) -> Node { self.entry } fn exit_node(&self) -> Node { self.exit } type Iterator<'c> = <H as HugrView>::Neighbours<'c> where Self: 'c; fn successors(&self, node: Node) -> Self::Iterator<'_> { self.h.neighbours(node, Direction::Outgoing) } fn predecessors(&self, node: Node) -> Self::Iterator<'_> { self.h.neighbours(node, Direction::Incoming) } } impl<H: HugrMut> CfgNester<Node> for IdentityCfgMap<H> { fn nest_sese_region(&mut self, entry_edge: (Node, Node), exit_edge: (Node, Node)) -> Node { // The algorithm only calls with entry/exit edges for a SESE region; panic if they don't let blocks = region_blocks(self, entry_edge, exit_edge).unwrap(); assert!([entry_edge.0, entry_edge.1, exit_edge.0, exit_edge.1] .iter() .all(|n| self.h.get_parent(*n) == Some(self.h.root()))); let (new_block, new_cfg) = OutlineCfg::new(blocks).apply(&mut self.h).unwrap(); debug_assert!([entry_edge.0, exit_edge.1] .iter() .all(|n| self.h.get_parent(*n) == Some(self.h.root()))); debug_assert!({ let new_block_view = SiblingGraph::<BasicBlockID>::try_new(&self.h, new_block).unwrap(); let new_cfg_view = SiblingGraph::<CfgID>::try_new(&new_block_view, new_cfg).unwrap(); [entry_edge.1, exit_edge.0] .iter() .all(|n| new_cfg_view.get_parent(*n) == Some(new_cfg)) }); new_block } } /// An error trying to get the blocks of a SESE (single-entry-single-exit) region #[derive(Clone, Debug, Error)] #[non_exhaustive] pub enum RegionBlocksError<T> { /// The specified exit edge did not exist in the CFG ExitEdgeNotPresent(T, T), /// The specified entry edge did not exist in the CFG EntryEdgeNotPresent(T, T), /// The source of the entry edge was in the region /// (reachable from the target of the entry edge without using the exit edge) EntryEdgeSourceInRegion(T), /// The target of the entry edge had other predecessors (given) /// that were outside the region (i.e. not reachable from the target) UnexpectedEntryEdges(Vec<T>), } /// Given entry and exit edges for a SESE region, identify all the blocks in it. pub fn region_blocks<T: Copy + Eq + Hash + std::fmt::Debug>( v: &impl CfgNodeMap<T>, entry_edge: (T, T), exit_edge: (T, T), ) -> Result<HashSet<T>, RegionBlocksError<T>> { let mut blocks = HashSet::new(); let mut queue = VecDeque::new(); queue.push_back(entry_edge.1); while let Some(n) = queue.pop_front() { if blocks.insert(n) { if n == exit_edge.0 { let succs: Vec<T> = v.successors(n).collect(); let n_succs = succs.len(); let internal_succs: Vec<T> = succs.into_iter().filter(|s| *s != exit_edge.1).collect(); if internal_succs.len() == n_succs { return Err(RegionBlocksError::ExitEdgeNotPresent( exit_edge.0, exit_edge.1, )); } queue.extend(internal_succs) } else { queue.extend(v.successors(n)); } } } if blocks.contains(&entry_edge.0) { return Err(RegionBlocksError::EntryEdgeSourceInRegion(entry_edge.0)); } let ext_preds = v .predecessors(entry_edge.1) .unique() .filter(|p| !blocks.contains(p)); let (expected, extra): (Vec<T>, Vec<T>) = ext_preds.partition(|i| *i == entry_edge.0); if expected != vec![entry_edge.0] { return Err(RegionBlocksError::EntryEdgeNotPresent( entry_edge.0, entry_edge.1, )); }; if !extra.is_empty() { return Err(RegionBlocksError::UnexpectedEntryEdges(extra)); } // We could check for other nodes in the region having predecessors outside it, but that would be more expensive Ok(blocks) } /// Records an undirected Depth First Search over a CfgView, /// restricted to nodes forwards-reachable from the entry. /// That is, the DFS traversal goes both ways along the edges of the CFG. /// *Undirected* DFS classifies all edges into *only two* categories /// * tree edges, which on their own (with the nodes) form a tree (minimum spanning tree); /// * backedges, i.e. those for which, when DFS tried to traverse them, the other endpoint was an ancestor /// Moreover, we record *which way* along the underlying CFG edge we went. struct UndirectedDFSTree<T> { /// Pre-order traversal numbering dfs_num: HashMap<T, usize>, /// For each node, the edge along which it was reached from its parent dfs_parents: HashMap<T, EdgeDest<T>>, } impl<T: Copy + Clone + PartialEq + Eq + Hash> UndirectedDFSTree<T> { fn new(cfg: &impl CfgNodeMap<T>) -> Self { //1. Traverse backwards-only from exit building bitset of reachable nodes let mut reachable = HashSet::new(); { let mut pending = VecDeque::new(); pending.push_back(cfg.exit_node()); while let Some(n) = pending.pop_front() { if reachable.insert(n) { pending.extend(cfg.predecessors(n)); } } } //2. Traverse undirected from entry node, building dfs_num and setting dfs_parents let mut dfs_num = HashMap::new(); let mut dfs_parents = HashMap::new(); { // Node, and directed edge along which reached let mut pending = vec![(cfg.entry_node(), EdgeDest::Backward(cfg.exit_node()))]; while let Some((n, p_edge)) = pending.pop() { if !dfs_num.contains_key(&n) && reachable.contains(&n) { dfs_num.insert(n, dfs_num.len()); dfs_parents.insert(n, p_edge); for e in all_edges(cfg, n) { pending.push(flip(n, e)); } } } dfs_parents.remove(&cfg.entry_node()).unwrap(); } UndirectedDFSTree { dfs_num, dfs_parents, } } } #[derive(Clone, PartialEq, Eq, Hash)] enum Bracket<T> { Real(CfgEdge<T>), Capping(usize, T), } /// Manages a list of brackets. The goal here is to allow constant-time deletion /// out of the middle of the list - which isn't really possible, so instead we /// track deleted items (in an external set) and the remaining number (here). /// /// Note - we could put the items deleted from *this* BracketList here, and merge in concat(). /// That would be cleaner, but repeated set-merging would be slower than adding the /// deleted items to a single set in the `TraversalState` struct BracketList<T> { items: LinkedList<Bracket<T>>, // Allows O(1) `append` of two lists. size: usize, // Not counting deleted items. } impl<T: Copy + Clone + PartialEq + Eq + Hash> BracketList<T> { pub fn new() -> Self { BracketList { items: LinkedList::new(), size: 0, } } pub fn tag(&mut self, deleted: &HashSet<Bracket<T>>) -> Option<(Bracket<T>, usize)> { while let Some(e) = self.items.front() { // Pop deleted elements to save time (and memory) if deleted.contains(e) { self.items.pop_front(); //deleted.remove(e); // Would only save memory, so keep as immutable } else { return Some((e.clone(), self.size)); } } None } pub fn concat(&mut self, other: BracketList<T>) { let BracketList { mut items, size } = other; self.items.append(&mut items); assert!(items.is_empty()); self.size += size; } pub fn delete(&mut self, b: &Bracket<T>, deleted: &mut HashSet<Bracket<T>>) { // Ideally, here we would also assert that no *other* BracketList contains b. debug_assert!(self.items.contains(b)); // Makes operation O(n), otherwise O(1) let was_new = deleted.insert(b.clone()); assert!(was_new); self.size -= 1; } pub fn push(&mut self, e: Bracket<T>) { self.items.push_back(e); self.size += 1; } } /// Mutable state updated during traversal of the UndirectedDFSTree by the cycle equivalence algorithm. pub struct EdgeClassifier<T> { /// Edges we have marked as deleted, allowing constant-time deletion without searching BracketList deleted_backedges: HashSet<Bracket<T>>, /// Key is DFS num of highest ancestor /// to which backedges reached from >1 sibling subtree; /// Value is the LCA i.e. parent of those siblings. capping_edges: HashMap<usize, Vec<T>>, /// Result of traversal - accumulated here, entries should never be overwritten edge_classes: HashMap<CfgEdge<T>, Option<(Bracket<T>, usize)>>, } impl<T: Copy + Clone + PartialEq + Eq + Hash> EdgeClassifier<T> { /// Computes equivalence class of each edge, i.e. two edges with the same value /// are cycle-equivalent. Any two consecutive edges in the same class define a SESE region /// (where "consecutive" means on any path in the original directed CFG, as the edges /// in a class all dominate + postdominate each other as part of defn of cycle equivalence). pub fn get_edge_classes(cfg: &impl CfgNodeMap<T>) -> HashMap<CfgEdge<T>, usize> { let tree = UndirectedDFSTree::new(cfg); let mut s = Self { deleted_backedges: HashSet::new(), capping_edges: HashMap::new(), edge_classes: HashMap::new(), }; s.traverse(cfg, &tree, cfg.entry_node()); assert!(s.capping_edges.is_empty()); s.edge_classes.remove(&(cfg.exit_node(), cfg.entry_node())); let mut cycle_class_idxs = HashMap::new(); s.edge_classes .into_iter() .map(|(k, v)| { let l = cycle_class_idxs.len(); (k, *cycle_class_idxs.entry(v).or_insert(l)) }) .collect() } /// Returns the lowest DFS num (highest ancestor) reached by any bracket leaving /// the subtree, and the list of said brackets. fn traverse( &mut self, cfg: &impl CfgNodeMap<T>, tree: &UndirectedDFSTree<T>, n: T, ) -> (usize, BracketList<T>) { let n_dfs = *tree.dfs_num.get(&n).unwrap(); // should only be called for nodes on path to exit let (children, non_capping_backedges): (Vec<_>, Vec<_>) = all_edges(cfg, n) .filter(|e| tree.dfs_num.contains_key(&e.target())) .partition(|e| { // The tree edges are those whose *targets* list the edge as parent-edge let (tgt, from) = flip(n, *e); tree.dfs_parents.get(&tgt) == Some(&from) }); let child_results: Vec<_> = children .iter() .map(|c| self.traverse(cfg, tree, c.target())) .collect(); let mut min_dfs_target: [Option<usize>; 2] = [None, None]; // We want highest-but-one let mut bs = BracketList::new(); for (tgt, brs) in child_results { if tgt < min_dfs_target[0].unwrap_or(usize::MAX) { min_dfs_target = [Some(tgt), min_dfs_target[0]] } else if tgt < min_dfs_target[1].unwrap_or(usize::MAX) { min_dfs_target[1] = Some(tgt) } bs.concat(brs); } // Add capping backedge if let Some(min1dfs) = min_dfs_target[1] { if min1dfs < n_dfs { bs.push(Bracket::Capping(min1dfs, n)); // mark capping edge to be removed when we return out to the other end self.capping_edges.entry(min1dfs).or_default().push(n); } } let parent_edge = tree.dfs_parents.get(&n); let (be_up, be_down): (Vec<_>, Vec<_>) = non_capping_backedges .into_iter() .map(|e| (*tree.dfs_num.get(&e.target()).unwrap(), e)) .partition(|(dfs, _)| *dfs < n_dfs); // Remove edges to here from beneath for (_, e) in be_down { let e = cfg_edge(n, e); let b = Bracket::Real(e); bs.delete(&b, &mut self.deleted_backedges); // Last chance to assign an edge class! This will be a singleton class, // but assign for consistency with other singletons. self.edge_classes.entry(e).or_insert_with(|| Some((b, 0))); } // And capping backedges for src in self.capping_edges.remove(&n_dfs).unwrap_or_default() { bs.delete(&Bracket::Capping(n_dfs, src), &mut self.deleted_backedges) } // Add backedges from here to ancestors (not the parent edge, but perhaps other edges to the same node) be_up .iter() .filter(|(_, e)| Some(e) != parent_edge) .for_each(|(_, e)| bs.push(Bracket::Real(cfg_edge(n, *e)))); // Now calculate edge classes let class = bs.tag(&self.deleted_backedges); if let Some((Bracket::Real(e), 1)) = &class { self.edge_classes.insert(*e, class.clone()); } if let Some(parent_edge) = tree.dfs_parents.get(&n) { self.edge_classes.insert(cfg_edge(n, *parent_edge), class); } let highest_target = be_up .into_iter() .map(|(dfs, _)| dfs) .chain(min_dfs_target[0]); (highest_target.min().unwrap_or(usize::MAX), bs) } } #[cfg(test)] pub(crate) mod test { use super::*; use hugr_core::builder::{ BuildError, CFGBuilder, Container, DataflowSubContainer, HugrBuilder, }; use hugr_core::extension::PRELUDE_REGISTRY; use hugr_core::extension::{prelude::USIZE_T, ExtensionSet}; use hugr_core::hugr::rewrite::insert_identity::{IdentityInsertion, IdentityInsertionError}; use hugr_core::hugr::views::RootChecked; use hugr_core::ops::handle::{ConstID, NodeHandle}; use hugr_core::ops::Value; use hugr_core::type_row; use hugr_core::types::{EdgeKind, FunctionType, Type}; use hugr_core::utils::depth; const NAT: Type = USIZE_T; pub fn group_by<E: Eq + Hash + Ord, V: Eq + Hash>(h: HashMap<E, V>) -> HashSet<Vec<E>> { let mut res = HashMap::new(); for (k, v) in h.into_iter() { res.entry(v).or_insert_with(Vec::new).push(k); } res.into_values().map(sorted).collect() } pub fn sorted<E: Ord>(items: impl IntoIterator<Item = E>) -> Vec<E> { let mut v: Vec<_> = items.into_iter().collect(); v.sort(); v } #[test] fn test_cond_then_loop_separate() -> Result<(), BuildError> { // /-> left --\ // entry -> split > merge -> head -> tail -> exit // \-> right -/ \-<--<-/ let mut cfg_builder = CFGBuilder::new(FunctionType::new_endo(NAT))?; let pred_const = cfg_builder.add_constant(Value::unit_sum(0, 2).expect("0 < 2")); let const_unit = cfg_builder.add_constant(Value::unary_unit_sum()); let entry = n_identity( cfg_builder.simple_entry_builder(type_row![NAT], 1, ExtensionSet::new())?, &const_unit, )?; let (split, merge) = build_if_then_else_merge(&mut cfg_builder, &pred_const, &const_unit)?; cfg_builder.branch(&entry, 0, &split)?; let head = n_identity( cfg_builder.simple_block_builder(FunctionType::new_endo(NAT), 1)?, &const_unit, )?; let tail = n_identity( cfg_builder.simple_block_builder(FunctionType::new_endo(NAT), 2)?, &pred_const, )?; cfg_builder.branch(&tail, 1, &head)?; cfg_builder.branch(&head, 0, &tail)?; // trivial "loop body" cfg_builder.branch(&merge, 0, &head)?; let exit = cfg_builder.exit_block(); cfg_builder.branch(&tail, 0, &exit)?; let mut h = cfg_builder.finish_prelude_hugr()?; let rc = RootChecked::<_, CfgID>::try_new(&mut h).unwrap(); let (entry, exit) = (entry.node(), exit.node()); let (split, merge, head, tail) = (split.node(), merge.node(), head.node(), tail.node()); let edge_classes = EdgeClassifier::get_edge_classes(&IdentityCfgMap::new(rc.borrow())); let [&left, &right] = edge_classes .keys() .filter(|(s, _)| *s == split) .map(|(_, t)| t) .collect::<Vec<_>>()[..] else { panic!("Split node should have two successors"); }; let classes = group_by(edge_classes); assert_eq!( classes, HashSet::from([ sorted([(split, left), (left, merge)]), // Region containing single BB 'left'. sorted([(split, right), (right, merge)]), // Region containing single BB 'right'. Vec::from([(head, tail)]), // Loop body and backedges are in their own classes because Vec::from([(tail, head)]), // the path executing the loop exactly once skips the backedge. sorted([(entry, split), (merge, head), (tail, exit)]), // Two regions, conditional and then loop. ]) ); transform_cfg_to_nested(&mut IdentityCfgMap::new(rc)); h.update_validate(&PRELUDE_REGISTRY).unwrap(); assert_eq!(1, depth(&h, entry)); assert_eq!(1, depth(&h, exit)); for n in [split, left, right, merge, head, tail] { assert_eq!(3, depth(&h, n)); } let first = [split, left, right, merge] .iter() .map(|n| h.get_parent(*n).unwrap()) .unique() .exactly_one() .unwrap(); let second = [head, tail] .iter() .map(|n| h.get_parent(*n).unwrap()) .unique() .exactly_one() .unwrap(); assert_ne!(first, second); Ok(()) } #[test] fn test_cond_then_loop_combined() -> Result<(), BuildError> { // Here we would like two consecutive regions, but there is no *edge* between // the conditional and the loop to indicate the boundary, so we cannot separate them. let (h, merge, tail) = build_cond_then_loop_cfg()?; let (merge, tail) = (merge.node(), tail.node()); let [entry, exit]: [Node; 2] = h .children(h.root()) .take(2) .collect_vec() .try_into() .unwrap(); let v = IdentityCfgMap::new(RootChecked::try_new(&h).unwrap()); let edge_classes = EdgeClassifier::get_edge_classes(&v); let [&left, &right] = edge_classes .keys() .filter(|(s, _)| *s == entry) .map(|(_, t)| t) .collect::<Vec<_>>()[..] else { panic!("Entry node should have two successors"); }; let classes = group_by(edge_classes); assert_eq!( classes, HashSet::from([ sorted([(entry, left), (left, merge)]), // Region containing single BB 'left'. sorted([(entry, right), (right, merge)]), // Region containing single BB 'right'. Vec::from([(tail, exit)]), // The only edge in neither conditional nor loop. Vec::from([(merge, tail)]), // Loop body (at least once per execution). Vec::from([(tail, merge)]), // Loop backedge (0 or more times per execution). ]) ); Ok(()) } #[test] fn test_cond_in_loop_separate_headers() -> Result<(), BuildError> { let (mut h, head, tail) = build_conditional_in_loop_cfg(true)?; let head = head.node(); let tail = tail.node(); // /-> left --\ // entry -> head -> split > merge -> tail -> exit // | \-> right -/ | // \---<---<---<---<---<---<---<---<---/ // split is unique successor of head let split = h.output_neighbours(head).exactly_one().unwrap(); // merge is unique predecessor of tail let merge = h.input_neighbours(tail).exactly_one().unwrap(); // There's no need to use a view of a region here but we do so just to check // that we *can* (as we'll need to for "real" module Hugr's) let v = IdentityCfgMap::new(SiblingGraph::try_new(&h, h.root()).unwrap()); let edge_classes = EdgeClassifier::get_edge_classes(&v); let IdentityCfgMap { h: _, entry, exit } = v; let [&left, &right] = edge_classes .keys() .filter(|(s, _)| *s == split) .map(|(_, t)| t) .collect::<Vec<_>>()[..] else { panic!("Split node should have two successors"); }; let classes = group_by(edge_classes); assert_eq!( classes, HashSet::from([ sorted([(split, left), (left, merge)]), // Region containing single BB 'left' sorted([(split, right), (right, merge)]), // Region containing single BB 'right' sorted([(head, split), (merge, tail)]), // "Conditional" region containing split+merge choosing between left/right sorted([(entry, head), (tail, exit)]), // "Loop" region containing body (conditional) + back-edge Vec::from([(tail, head)]) // The loop back-edge ]) ); // Again, there's no need for a view of a region here, but check that the // transformation still works when we can only directly mutate the top level let root = h.root(); let m = SiblingMut::<CfgID>::try_new(&mut h, root).unwrap(); transform_cfg_to_nested(&mut IdentityCfgMap::new(m)); h.update_validate(&PRELUDE_REGISTRY).unwrap(); assert_eq!(1, depth(&h, entry)); assert_eq!(3, depth(&h, head)); for n in [split, left, right, merge] { assert_eq!(5, depth(&h, n)); } assert_eq!(3, depth(&h, tail)); assert_eq!(1, depth(&h, exit)); Ok(()) } #[test] fn test_cond_in_loop_combined_headers() -> Result<(), BuildError> { let (h, head, tail) = build_conditional_in_loop_cfg(false)?; let head = head.node(); let tail = tail.node(); // /-> left --\ // entry -> head > merge -> tail -> exit // | \-> right -/ | // \---<---<---<---<---<--<---/ // Here we would like an indication that we can make two nested regions, // but there is no edge to act as entry to a region containing just the conditional :-(. let v = IdentityCfgMap::new(RootChecked::try_new(&h).unwrap()); let edge_classes = EdgeClassifier::get_edge_classes(&v); let IdentityCfgMap { h: _, entry, exit } = v; // merge is unique predecessor of tail let merge = *edge_classes .keys() .filter(|(_, t)| *t == tail) .map(|(s, _)| s) .exactly_one() .unwrap(); let [&left, &right] = edge_classes .keys() .filter(|(s, _)| *s == head) .map(|(_, t)| t) .collect::<Vec<_>>()[..] else { panic!("Loop header should have two successors"); }; let classes = group_by(edge_classes); assert_eq!( classes, HashSet::from([ sorted([(head, left), (left, merge)]), // Region containing single BB 'left' sorted([(head, right), (right, merge)]), // Region containing single BB 'right' Vec::from([(merge, tail)]), // The edge "in the loop", but no other edge in its class to define SESE region sorted([(entry, head), (tail, exit)]), // "Loop" region containing body (conditional) + back-edge Vec::from([(tail, head)]) // The loop back-edge ]) ); Ok(()) } #[test] fn incorrect_insertion() { let (mut h, _, tail) = build_conditional_in_loop_cfg(false).unwrap(); let final_node = tail.node(); let final_node_input = h.node_inputs(final_node).next().unwrap(); let rw = IdentityInsertion::new(final_node, final_node_input); let apply_result = h.apply_rewrite(rw); assert_eq!( apply_result, Err(IdentityInsertionError::InvalidPortKind(Some( EdgeKind::ControlFlow ))) ); } fn n_identity<T: DataflowSubContainer>( mut dataflow_builder: T, pred_const: &ConstID, ) -> Result<T::ContainerHandle, BuildError> { let w = dataflow_builder.input_wires(); let u = dataflow_builder.load_const(pred_const); dataflow_builder.finish_with_outputs([u].into_iter().chain(w)) } fn build_if_then_else_merge<T: AsMut<Hugr> + AsRef<Hugr>>( cfg: &mut CFGBuilder<T>, const_pred: &ConstID, unit_const: &ConstID, ) -> Result<(BasicBlockID, BasicBlockID), BuildError> { let split = n_identity( cfg.simple_block_builder(FunctionType::new_endo(NAT), 2)?, const_pred, )?; let merge = build_then_else_merge_from_if(cfg, unit_const, split)?; Ok((split, merge)) } fn build_then_else_merge_from_if<T: AsMut<Hugr> + AsRef<Hugr>>( cfg: &mut CFGBuilder<T>, unit_const: &ConstID, split: BasicBlockID, ) -> Result<BasicBlockID, BuildError> { let merge = n_identity( cfg.simple_block_builder(FunctionType::new_endo(NAT), 1)?, unit_const, )?; let left = n_identity( cfg.simple_block_builder(FunctionType::new_endo(NAT), 1)?, unit_const, )?; let right = n_identity( cfg.simple_block_builder(FunctionType::new_endo(NAT), 1)?, unit_const, )?; cfg.branch(&split, 0, &left)?; cfg.branch(&split, 1, &right)?; cfg.branch(&left, 0, &merge)?; cfg.branch(&right, 0, &merge)?; Ok(merge) } // /-> left --\ // entry > merge -> tail -> exit // \-> right -/ \-<--<-/ // Result is Hugr plus merge and tail blocks fn build_cond_then_loop_cfg() -> Result<(Hugr, BasicBlockID, BasicBlockID), BuildError> { let mut cfg_builder = CFGBuilder::new(FunctionType::new_endo(NAT))?; let pred_const = cfg_builder.add_constant(Value::unit_sum(0, 2).expect("0 < 2")); let const_unit = cfg_builder.add_constant(Value::unary_unit_sum()); let entry = n_identity( cfg_builder.simple_entry_builder(type_row![NAT], 2, ExtensionSet::new())?, &pred_const, )?; let merge = build_then_else_merge_from_if(&mut cfg_builder, &const_unit, entry)?; // The merge block is also the loop header (so it merges three incoming control-flow edges) let tail = n_identity( cfg_builder.simple_block_builder(FunctionType::new_endo(NAT), 2)?, &pred_const, )?; cfg_builder.branch(&tail, 1, &merge)?; cfg_builder.branch(&merge, 0, &tail)?; // trivial "loop body" let exit = cfg_builder.exit_block(); cfg_builder.branch(&tail, 0, &exit)?; let h = cfg_builder.finish_prelude_hugr()?; Ok((h, merge, tail)) } // Build a CFG, returning the Hugr pub(crate) fn build_conditional_in_loop_cfg( separate_headers: bool, ) -> Result<(Hugr, BasicBlockID, BasicBlockID), BuildError> { let mut cfg_builder = CFGBuilder::new(FunctionType::new_endo(NAT))?; let (head, tail) = build_conditional_in_loop(&mut cfg_builder, separate_headers)?; let h = cfg_builder.finish_prelude_hugr()?; Ok((h, head, tail)) } pub(crate) fn build_conditional_in_loop<T: AsMut<Hugr> + AsRef<Hugr>>( cfg_builder: &mut CFGBuilder<T>, separate_headers: bool, ) -> Result<(BasicBlockID, BasicBlockID), BuildError> { let pred_const = cfg_builder.add_constant(Value::unit_sum(0, 2).expect("0 < 2")); let const_unit = cfg_builder.add_constant(Value::unary_unit_sum()); let entry = n_identity( cfg_builder.simple_entry_builder(type_row![NAT], 1, ExtensionSet::new())?, &const_unit, )?; let (split, merge) = build_if_then_else_merge(cfg_builder, &pred_const, &const_unit)?; let head = if separate_headers { let head = n_identity( cfg_builder.simple_block_builder(FunctionType::new_endo(NAT), 1)?, &const_unit, )?; cfg_builder.branch(&head, 0, &split)?; head } else { // Combine loop header with split. split }; let tail = n_identity( cfg_builder.simple_block_builder(FunctionType::new_endo(NAT), 2)?, &pred_const, )?; cfg_builder.branch(&tail, 1, &head)?; cfg_builder.branch(&merge, 0, &tail)?; let exit = cfg_builder.exit_block(); cfg_builder.branch(&entry, 0, &head)?; cfg_builder.branch(&tail, 0, &exit)?; Ok((head, tail)) } }
package service import ( "github.com/stretchr/testify/assert" "testing" ) func TestRealDatabase_GetEmployeeById(t *testing.T) { db := NewRealDatabase("data.txt") err := db.Init() assert.NoError(t, err) t.Run("test given id should return correct employee info", func(t *testing.T) { got, err := db.GetEmployeeById(1) assert.NoError(t, err) assert.Equal(t, 1, got.Id()) assert.Equal(t, "waterball", got.Name()) }) t.Run("test given id should return correct employee info (cont)", func(t *testing.T) { got, err := db.GetEmployeeById(5) assert.NoError(t, err) assert.Equal(t, 5, got.Id()) assert.Equal(t, "peterchen", got.Name()) }) t.Run("test given id should return correct employee info and subordinateIds", func(t *testing.T) { got, err := db.GetEmployeeById(2) assert.NoError(t, err) assert.Equal(t, 2, got.Id()) assert.ElementsMatch(t, []int{1, 3}, got.(IRealEmployee).SubordinateIds()) }) }
<template> <table> <thead> <tr> <th>#</th> <th>Сумма</th> <th>Дата</th> <th>Категория</th> <th>Тип</th> <th>Открыть</th> </tr> </thead> <tbody> <tr v-for="(record) of records" :key="record.id"> <td>{{record.recordNumber}}</td> <td>{{record.amount + ' ₽'}}</td> <td> {{ new Intl.DateTimeFormat('ru-RU', { day: 'numeric', month: 'long', year: 'numeric', }).format(new Date(record.date)) }}</td> <td>{{record.categoryName}}</td> <td> <span :class="[record.typeClass]" class="white-text badge" >{{ record.typeText }}</span> </td> <td> <button class="btn-small btn" @click="$router.push('/detail/' + record.id)" v-tooltip="'Открыть подробную информацию'" data-position="top" > <i class="material-icons">open_in_new</i> </button> </td> </tr> </tbody> </table> </template> <script> export default { props: { records: { required: true, type: Array, } } } </script>
class MariadbAT106 < Formula desc "Drop-in replacement for MySQL" homepage "https:mariadb.org" url "https:archive.mariadb.orgmariadb-10.6.18sourcemariadb-10.6.18.tar.gz" sha256 "6898a1111f47130709e28ba2c7bd1a57e4bb57101f6e109e597d51e6d385cf18" license "GPL-2.0-only" livecheck do url "https:downloads.mariadb.orgrest-apimariadball-releases?olderReleases=false" strategy :json do |json| json["releases"]&.map do |release| next unless release["release_number"]&.start_with?(version.major_minor) next if release["status"] != "stable" release["release_number"] end end end bottle do sha256 arm64_sonoma: "cb27e0bc5d7de68ef196a59c95157cc131e85319a90df977294b442a2514c37d" sha256 arm64_ventura: "be38e39b817a0c6eef01c0d21243269da935cbc694651247be7bce8ac16447d7" sha256 arm64_monterey: "970e571b0c8e6dcba0ec31f73dfe73b8a1e4b858ab26ab78494b61839b61555f" sha256 sonoma: "ec248a67955a67ccdbf1eba4fe1c90b4a390d62230503c8566c726c1cedf1438" sha256 ventura: "cc42afccd4eec25083454fdd1190db9c96d4f192eb8e7d7bca405a388da34304" sha256 monterey: "5be23da1b1e208992a2852a0b806ba0aaf5516f8331fc953cf105b5b3eeee85e" sha256 x86_64_linux: "6c488b264ec037cfc98844e95c17a1b851a8cfcde1dce3db15a91d1ceb95bdfa" end keg_only :versioned_formula # See: https:mariadb.comkbenchanges-improvements-in-mariadb-106 deprecate! date: "2026-06-01", because: :unsupported depends_on "bison" => :build depends_on "cmake" => :build depends_on "pkg-config" => :build depends_on "groonga" depends_on "openssl@3" depends_on "pcre2" uses_from_macos "bzip2" uses_from_macos "krb5" uses_from_macos "libedit" uses_from_macos "libxcrypt" uses_from_macos "libxml2" uses_from_macos "ncurses" uses_from_macos "xz" uses_from_macos "zlib" on_linux do depends_on "linux-pam" depends_on "readline" # uses libedit on macOS end fails_with gcc: "5" def install # Set basedir and ldata so that mysql_install_db can find the server # without needing an explicit path to be set. This can still # be overridden by calling --basedir= when calling. inreplace "scriptsmysql_install_db.sh" do |s| s.change_make_var! "basedir", "\"#{prefix}\"" s.change_make_var! "ldata", "\"#{var}mysql\"" end # Use brew groonga rm_r "storagemroongavendorgroonga" # -DINSTALL_* are relative to prefix args = %W[ -DMYSQL_DATADIR=#{var}mysql -DINSTALL_INCLUDEDIR=includemysql -DINSTALL_MANDIR=shareman -DINSTALL_DOCDIR=sharedoc#{name} -DINSTALL_INFODIR=shareinfo -DINSTALL_MYSQLSHAREDIR=sharemysql -DWITH_SSL=yes -DWITH_UNIT_TESTS=OFF -DDEFAULT_CHARSET=utf8mb4 -DDEFAULT_COLLATION=utf8mb4_general_ci -DINSTALL_SYSCONFDIR=#{etc} -DCOMPILATION_COMMENT=#{tap.user} ] if OS.linux? args << "-DWITH_NUMA=OFF" args << "-DENABLE_DTRACE=NO" args << "-DCONNECT_WITH_JDBC=OFF" end # Disable RocksDB on Apple Silicon (currently not supported) args << "-DPLUGIN_ROCKSDB=NO" if Hardware::CPU.arm? system "cmake", ".", *std_cmake_args, *args system "make" system "make", "install" # Fix my.cnf to point to #{etc} instead of etc (etc"my.cnf.d").mkpath inreplace "#{etc}my.cnf", "!includedir etcmy.cnf.d", "!includedir #{etc}my.cnf.d" touch etc"my.cnf.d.homebrew_dont_prune_me" # Don't create databases inside of the prefix! # See: https:github.comHomebrewhomebrewissues4975 rm_rf prefix"data" # Save space (prefix"mysql-test").rmtree (prefix"sql-bench").rmtree # Link the setup script into bin bin.install_symlink prefix"scriptsmysql_install_db" # Fix up the control script and link into bin inreplace "#{prefix}support-filesmysql.server", ^(PATH=".*)("), "\\1:#{HOMEBREW_PREFIX}bin\\2" bin.install_symlink prefix"support-filesmysql.server" # Move sourced non-executable out of bin into libexec libexec.install "#{bin}wsrep_sst_common" # Fix up references to wsrep_sst_common %w[ wsrep_sst_mysqldump wsrep_sst_rsync wsrep_sst_mariabackup ].each do |f| inreplace "#{bin}#{f}", "$(dirname \"$0\")wsrep_sst_common", "#{libexec}wsrep_sst_common" end # Install my.cnf that binds to 127.0.0.1 by default (buildpath"my.cnf").write <<~EOS # Default Homebrew MySQL server config [mysqld] # Only allow connections from localhost bind-address = 127.0.0.1 EOS etc.install "my.cnf" end def post_install # Make sure the varmysql directory exists (var"mysql").mkpath # Don't initialize database, it clashes when testing other MySQL-like implementations. return if ENV["HOMEBREW_GITHUB_ACTIONS"] unless File.exist? "#{var}mysqlmysqluser.frm" ENV["TMPDIR"] = nil system "#{bin}mysql_install_db", "--verbose", "--user=#{ENV["USER"]}", "--basedir=#{prefix}", "--datadir=#{var}mysql", "--tmpdir=tmp" end end def caveats <<~EOS A "etcmy.cnf" from another install may interfere with a Homebrew-built server starting up correctly. MySQL is configured to only allow connections from localhost by default EOS end service do run [opt_bin"mysqld_safe", "--datadir=#{var}mysql"] keep_alive true working_dir var end test do (testpath"mysql").mkpath (testpath"tmp").mkpath system bin"mysql_install_db", "--no-defaults", "--user=#{ENV["USER"]}", "--basedir=#{prefix}", "--datadir=#{testpath}mysql", "--tmpdir=#{testpath}tmp", "--auth-root-authentication-method=normal" port = free_port fork do system "#{bin}mysqld", "--no-defaults", "--user=#{ENV["USER"]}", "--datadir=#{testpath}mysql", "--port=#{port}", "--tmpdir=#{testpath}tmp" end sleep 5 assert_match "information_schema", shell_output("#{bin}mysql --port=#{port} --user=root --password= --execute='show databases;'") system "#{bin}mysqladmin", "--port=#{port}", "--user=root", "--password=", "shutdown" end end
from abc import ABC from typing import Dict from typing import List import torch from hydra.utils import instantiate from omegaconf import DictConfig from torch import nn from torch.nn import functional as F from torchmetrics.classification import MulticlassAccuracy, MultilabelAccuracy from avr.task.avr_module import AVRModule from avr.task.task import Task class MultiTaskModule(AVRModule, ABC): def __init__(self, cfg: DictConfig, use_single_target_pred_head: bool = True): super().__init__(cfg, use_single_target_pred_head) self.val_losses = [] self.tasks: List[Task] = instantiate(cfg.avr.tasks) if use_single_target_pred_head: target_pred_head = self.tasks[0].target_predictor.create(cfg) self.target_pred_heads = nn.ModuleDict( {task.name: target_pred_head for task in self.tasks} ) else: self.target_pred_heads = nn.ModuleDict( {task.name: task.target_predictor.create(cfg) for task in self.tasks} ) self.rule_pred_heads = nn.ModuleDict( { task.name: task.rule_predictor.create( cfg, task.num_rules, num_answers=task.num_answers ) for task in self.tasks if task.has_rules() } ) self.target_loss = nn.CrossEntropyLoss() self.metrics = nn.ModuleDict( { split: nn.ModuleDict( { "acc": nn.ModuleDict( { "target": nn.ModuleDict( { **{ "all": MulticlassAccuracy( num_classes=cfg.num_answers ) }, **{ task.name: MulticlassAccuracy( num_classes=task.num_answers ) for task in self.tasks }, } ), "rules": nn.ModuleDict( { task.name: MultilabelAccuracy( num_labels=task.num_rules ) for task in self.tasks if task.has_rules() } ), } ) } ) for split in ["tr", "val", "test"] } ) def training_step( self, batch: Dict[str, List[List[torch.Tensor]]], batch_idx: int ) -> Dict[str, torch.Tensor]: loss = torch.tensor(0.0, device=self.device) for task in self.tasks: (context, answers), y = ( batch[task.name][0][0], batch[task.name][0][1], ), batch[task.name][0][2] embedding = self.model( context, answers, num_rows=task.num_rows, num_cols=task.num_cols ) y_hat = self.target_pred_heads[task.name](embedding) acc = self.metrics["tr"]["acc"]["target"][task.name](y_hat, y) self.log( f"tr/{task.name}/acc/target", acc, on_epoch=True, prog_bar=True, add_dataloader_idx=False, ) loss_target = self.target_loss(y_hat, y) self.log( f"tr/{task.name}/loss/target", loss_target, on_epoch=True, add_dataloader_idx=False, ) loss += task.target_loss_ratio * loss_target if task.has_rules(): rules = batch[task.name][0][3] rules_hat = self.rule_pred_heads[task.name](embedding) loss_rules = F.binary_cross_entropy_with_logits(rules_hat, rules) self.log( f"tr/{task.name}/loss/rules", loss_rules, on_epoch=True, add_dataloader_idx=False, ) loss += task.rules_loss_ratio * loss_rules self.log( "tr/loss", loss, on_epoch=True, prog_bar=True, add_dataloader_idx=False ) return {"loss": loss} def validation_step( self, batch, batch_idx: int, dataloader_idx: int ) -> Dict[str, torch.Tensor]: task = self.tasks[dataloader_idx] (context, answers), y = (batch[0], batch[1]), batch[2] embedding = self.model( context, answers, num_rows=task.num_rows, num_cols=task.num_cols ) y_hat = self.target_pred_heads[task.name](embedding) acc = self.metrics["val"]["acc"]["target"][task.name](y_hat, y) self.log( f"val/{task.name}/acc/target", acc, on_epoch=True, prog_bar=True, add_dataloader_idx=False, ) loss_target = self.target_loss(y_hat, y) self.log( f"val/{task.name}/loss/target", loss_target, on_epoch=True, add_dataloader_idx=False, ) loss = task.target_loss_ratio * loss_target if task.has_rules(): rules = batch[3] rules_hat = self.rule_pred_heads[task.name](embedding) loss_rules = F.binary_cross_entropy_with_logits(rules_hat, rules) self.log( f"val/{task.name}/loss/rules", loss_rules, on_epoch=True, add_dataloader_idx=False, ) loss += task.rules_loss_ratio * loss_rules self.val_losses.append(loss) return {"loss": loss} def test_step( self, batch, batch_idx: int, dataloader_idx: int ) -> Dict[str, torch.Tensor]: task = self.tasks[dataloader_idx] (context, answers), y = (batch[0], batch[1]), batch[2] embedding = self.model( context, answers, num_rows=task.num_rows, num_cols=task.num_cols ) y_hat = self.target_pred_heads[task.name](embedding) acc = self.metrics["test"]["acc"]["target"][task.name](y_hat, y) self.log( f"test/{task.name}/acc/target", acc, on_epoch=True, prog_bar=True, add_dataloader_idx=False, ) loss_target = self.target_loss(y_hat, y) self.log( f"test/{task.name}/loss/target", loss_target, on_epoch=True, add_dataloader_idx=False, ) loss = task.target_loss_ratio * loss_target if task.has_rules(): rules = batch[3] rules_hat = self.rule_pred_heads[task.name](embedding) loss_rules = F.binary_cross_entropy_with_logits(rules_hat, rules) self.log( f"test/{task.name}/loss/rules", loss_rules, on_epoch=True, add_dataloader_idx=False, ) loss += task.rules_loss_ratio * loss_rules return {"loss": loss} def on_validation_epoch_end(self) -> None: val_losses = torch.tensor(self.val_losses) val_loss = val_losses.mean() self.log( "val/loss", val_loss, on_epoch=True, prog_bar=True, logger=True, add_dataloader_idx=False, ) self.val_losses.clear()
package com.pro.delicacy.ui; import android.os.Bundle; import androidx.fragment.app.Fragment; import android.view.LayoutInflater; import android.view.View; import android.view.ViewGroup; import android.widget.ImageView; import android.widget.TextView; import android.widget.Toast; import com.google.firebase.database.DatabaseReference; import com.google.firebase.database.FirebaseDatabase; import com.pro.delicacy.R; import com.pro.delicacy.models.Category; import com.squareup.picasso.Picasso; import org.parceler.Parcels; import butterknife.BindView; import butterknife.ButterKnife; /** * A simple {@link Fragment} subclass. * Use the {@link Categories_details#newInstance} factory method to * create an instance of this fragment. */ public class Categories_details extends Fragment implements View.OnClickListener{ @BindView(R.id.delicaciesImageView) ImageView mImageLabel; @BindView(R.id.delicaciesNameTextView) TextView mNameLabel; @BindView(R.id.delicaciesDescriptionTextView) TextView mDescriptionLabel; @BindView(R.id.saveCategory) TextView mSaveCategory; private Category mCategory; public Categories_details() { // Required empty public constructor } /** * Use this factory method to create a new instance of * this fragment using the provided parameters. * */ // TODO: Rename and change types and number of parameters public static Categories_details newInstance(Category category) { Categories_details fragment = new Categories_details(); Bundle args = new Bundle(); args.putParcelable("category", Parcels.wrap(category)); fragment.setArguments(args); return fragment; } @Override public void onCreate(Bundle savedInstanceState) { super.onCreate(savedInstanceState); assert getArguments() != null; mCategory = Parcels.unwrap(getArguments().getParcelable("category")); } @Override public View onCreateView(LayoutInflater inflater, ViewGroup container, Bundle savedInstanceState) { // Inflate the layout for this fragment View view = inflater.inflate(R.layout.fragment_categories_details, container, false); ButterKnife.bind(this, view); Picasso.get().load(mCategory.getStrCategoryThumb()).into(mImageLabel); mNameLabel.setText(mCategory.getStrCategory()); mDescriptionLabel.setText(mCategory.getStrCategoryDescription()); mSaveCategory.setOnClickListener(this); return view; } @Override public void onClick(View v) { if (v == mSaveCategory){ DatabaseReference categoryRef = FirebaseDatabase .getInstance() .getReference(); categoryRef.push().setValue(mCategory); Toast.makeText(getContext(),"Saved", Toast.LENGTH_SHORT).show(); } } }
import { Injectable } from "@angular/core"; import { environment } from "src/environments/environment.development"; import { HttpClient } from '@angular/common/http' import { Fleet, FleetWithNoId } from "../interfaces/Fleet"; @Injectable({ providedIn: 'root' }) export class FleetService { private url = `${environment.api}/fleets` constructor(private httpClient: HttpClient) { } getFleets() { return this.httpClient.get<Fleet[]>(this.url) } createFleet(fleet: FleetWithNoId) { return this.httpClient.post<Fleet>(`${this.url}/new`, fleet) } updateFleet(fleet: Fleet) { return this.httpClient.put<Fleet>(`${this.url}/${fleet.id}`, fleet) } remove(id: string) { return this.httpClient.delete<void>(`${this.url}/${id}`) } }
import type { Locator, Page } from '@playwright/test'; import BaseModal from './BaseModal'; export default class NewBoardModal extends BaseModal { readonly boardName: Locator; readonly columnRows: Locator; readonly addColumnBtn: Locator; readonly submitBtn: Locator; constructor(page: Page) { super(page); this.boardName = this.rootElement.locator('#board-name'); this.columnRows = this.rootElement.locator('#board-columns > div'); this.addColumnBtn = this.rootElement.locator('#board-columns-add'); this.submitBtn = this.rootElement.getByTestId('board-submit'); } nthColInput(n: number) { return this.columnRows.nth(n).locator('input'); } nthColDeleteBtn(n: number) { return this.columnRows.nth(n).getByTestId('multi-input-delete'); } nthColDragBtn(n: number) { return this.columnRows.nth(n).getByTestId('multi-input-drag'); } /** * @param fieldLabel Text of the label to locate the field by * @param n Nth elemenent to check for error. Use when checking MultiValueInput */ fieldError(fieldLabel: string, n?: number) { const elem = this.rootElement.locator('fieldset', { hasText: fieldLabel }).locator('.text-danger'); return n ? elem.nth(n) : elem; } }
# 分词器实用程序 > 译者:[片刻小哥哥](https://github.com/jiangzhonglian) > > 项目地址:<https://huggingface.apachecn.org/docs/transformers/internal/tokenization_utils> > > 原始地址:<https://huggingface.co/docs/transformers/internal/tokenization_utils> 本页列出了分词器使用的所有实用函数,主要是类 [PreTrainedTokenizerBase](/docs/transformers/v4.35.2/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase) 实现之间的通用方法 [PreTrainedTokenizer](/docs/transformers/v4.35.2/en/main_classes/tokenizer#transformers.PreTrainedTokenizer) 和 [PreTrainedTokenizerFast](/docs/transformers/v4.35.2/en/main_classes/tokenizer#transformers.PreTrainedTokenizerFast) 和混合 [SpecialTokensMixin](/docs/transformers/v4.35.2/en/internal/tokenization_utils#transformers.SpecialTokensMixin) 。 其中大多数仅当您正在研究库中标记器的代码时才有用。 ## 预训练分词器库 ### 班级 变压器。 预训练分词器库 [< 来源 >](https://github.com/huggingface/transformers/blob/v4.35.2/src/transformers/tokenization_utils_base.py#L1543) ( \*\*夸格 ) 参数 * **模型\_最大\_长度** ( `int` , *选修的* )— 变压器模型输入的最大长度(以令牌数量计)。当分词器是 满载 [来自\_pretrained()](/docs/transformers/v4.35.2/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.from_pretrained) ,这将被设置为 为关联模型存储的值 `最大模型输入大小` (往上看)。如果没有提供值,将 默认为非常大整数( `int(1e30)` )。 * **填充\_side** ( `str` , *选修的* )— 模型应应用填充的一侧。应在[‘右’、‘左’]之间选择。 默认值是从同名的类属性中选取的。 * **截断\_side** ( `str` , *选修的* )— 模型应应用截断的一侧。应在[‘右’、‘左’]之间选择。 默认值是从同名的类属性中选取的。 * **聊天\_模板** ( `str` , *选修的* )— Jinja 模板字符串,将用于格式化聊天消息列表。看 <https://huggingface.co/docs/transformers/chat_templated> 以获得完整的描述。 * **模型\_输入\_名称** ( `列表[字符串]` , *选修的* )— 模型前向传递接受的输入列表(例如 `“token_type_ids”` 或者 `“注意掩码”` )。默认值是从同名的类属性中选取的。 * **bos\_token** ( `str` 或者 `tokenizers.AddedToken` , *选修的* )— 代表句子开头的特殊标记。将关联到 `self.bos_token` 和 `self.bos_token_id` 。 * **eos\_token** ( `str` 或者 `tokenizers.AddedToken` , *选修的* )— 代表句子结尾的特殊标记。将关联到 `self.eos_token` 和 `self.eos_token_id` 。 * **unk\_token** ( `str` 或者 `tokenizers.AddedToken` , *选修的* )— 代表词汇表外标记的特殊标记。将关联到 `self.unk_token` 和 `self.unk_token_id` 。 * **sep\_token** ( `str` 或者 `tokenizers.AddedToken` , *选修的* )— 在同一输入中分隔两个不同句子的特殊标记(例如 BERT 使用)。将 关联到 `self.sep_token` 和 `self.sep_token_id` 。 * **填充\_token** ( `str` 或者 `tokenizers.AddedToken` , *选修的* )— 一种特殊的令牌,用于使令牌数组具有相同的大小以进行批处理。然后将被忽略 注意机制或损失计算。将关联到 `self.pad_token` 和 `self.pad_token_id` 。 * **cls\_token** ( `str` 或者 `tokenizers.AddedToken` , *选修的* )— 表示输入类别的特殊标记(例如 BERT 使用)。将关联到 `self.cls_token` 和 `self.cls_token_id` 。 * **掩码\_token** ( `str` 或者 `tokenizers.AddedToken` , *选修的* )— 代表掩码标记的特殊标记(由掩码语言建模预训练目标使用,例如 伯特)。将关联到 `self.mask_token` 和 `self.mask_token_id` 。 * **额外\_特殊\_令牌** (元组或列表 `str` 或者 `tokenizers.AddedToken` , *选修的* )— 附加特殊标记的元组或列表。将它们添加到此处以确保在解码时跳过它们 `skip_special_tokens` 设置为 True。如果它们不是词汇表的一部分,则会在末尾添加 的词汇。 * **清理\_up\_标记化\_spaces** ( `布尔` , *选修的* ,默认为 '真实' )— 模型是否应该清除在分割输入文本时添加的空格 标记化过程。 * **分割\_特殊\_代币** ( `布尔` , *选修的* ,默认为 ‘假’ )— 在标记化过程中是否应分割特殊标记。默认行为是 不分割特殊令牌。这意味着如果 `<s>` 是个 `bos_token` , 然后 `tokenizer.tokenize("<s>") = ['<s>` ]。否则,如果 `split_special_tokens=True` , 然后 `tokenizer.tokenize("<s>")` 将给予 `['<', 's', '>']` 。该论点仅支持 ‘慢’ 暂时使用标记器。 基类为 [PreTrainedTokenizer](/docs/transformers/v4.35.2/en/main_classes/tokenizer#transformers.PreTrainedTokenizer) 和 [PreTrainedTokenizerFast](/docs/transformers/v4.35.2/en/main_classes/tokenizer#transformers.PreTrainedTokenizerFast) 。 处理这两个类的共享(主要是样板)方法。 类属性(被派生类覆盖) * **词汇\_文件\_名称** ( `字典[str, str]` ) — 一本字典,其中的键为 `__init__` 每个的关键字名称 模型所需的词汇文件,以及关联值,用于保存关联文件的文件名 (细绳)。 * **预训练\_vocab\_files\_map** ( `字典[str,字典[str,str]]` ) — 字典中的字典,其中 高级按键是 `__init__` 模型所需的每个词汇文件的关键字名称, 低级是 `快捷名称` 预训练模型的相关值是 `网址` 到 关联的预训练词汇文件。 * **最大\_模型\_输入\_尺寸** ( `字典[str,可选[int]]` ) — 一本字典,其中的键为 `快捷名称` 预训练模型的数量,以及该模型的序列输入的最大长度作为关联值, 或者 `无` 如果模型没有最大输入大小。 * **预训练\_init\_configuration** ( `字典[str,字典[str,任意]]` ) — 一本字典,其中的键为 `快捷名称` 预训练模型的数据,以及作为关联值的特定参数的字典 传递到 `__init__` 加载分词器时此预训练模型的分词器类的方法 与 [来自\_pretrained()](/docs/transformers/v4.35.2/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.from_pretrained) 方法。 * **模型\_输入\_名称** ( `列表[str]` ) — 模型前向传递中预期的输入列表。 * **填充\_side** ( `str` ) — 模型应应用填充的一侧的默认值。 应该 ``对'`` 或者 ``左'` 。 * **截断\_side** ( `str` ) — 模型应截断一侧的默认值 应用。应该 ``对'`` 或者 ``左'` 。 #### \_\_称呼\_\_ [< 来源 >](https://github.com/huggingface/transformers/blob/v4.35.2/src/transformers/tokenization_utils_base.py#L2724) ( 文本 : 打字.Union[str, 打字.List[str], 打字.List[打字.List[str]]] = 无 文本\_对 : 打字.Union[str, 打字.List[str], 打字.List[打字.List[str]], NoneType] = 无 文本\_目标 : 打字.Union[str, 打字.List[str], 打字.List[打字.List[str]]] = 无 文本\_对\_目标 : 打字.Union[str, 打字.List[str], 打字.List[打字.List[str]], NoneType] = 无 添加\_特殊\_令牌 :布尔=真 填充 :打字.Union[bool,str,transformers.utils.generic.PaddingStrategy] = False 截断 :typing.Union[bool,str,transformers.tokenization\_utils\_base.TruncationStrategy] = None 最长长度 : 打字.Optional[int] = None 跨步 :整数=0 被\_分割\_成\_单词 :布尔=假 填充到多个 : 打字.Optional[int] = None 返回\_张量 :打字.Union[str,transformers.utils.generic.TensorType,NoneType] =无 返回\_token\_type\_ids : 打字.Optional[bool] = None 返回\_attention\_mask : 打字.Optional[bool] = None 返回\_overflowing\_tokens :布尔=假 返回\_特殊\_令牌\_掩码 :布尔=假 返回\_offsets\_mapping :布尔=假 返回\_length :布尔=假 冗长的 :布尔=真 \*\*夸格 ) → 导出常量元数据='未定义'; [BatchEncoding](/docs/transformers/v4.35.2/en/main_classes/tokenizer#transformers.BatchEncoding) 参数 * **text** ( `str` , `List[str]` , `List[List[str]]` , *optional* ) — The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set `is_split_into_words=True` (to lift the ambiguity with a batch of sequences). * **text\_pair** ( `str` , `List[str]` , `List[List[str]]` , *optional* ) — The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set `is_split_into_words=True` (to lift the ambiguity with a batch of sequences). * **text\_target** ( `str` , `List[str]` , `List[List[str]]` , *optional* ) — The sequence or batch of sequences to be encoded as target texts. Each sequence can be a string or a list of strings (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set `is_split_into_words=True` (to lift the ambiguity with a batch of sequences). * **text\_pair\_target** ( `str` , `List[str]` , `List[List[str]]` , *optional* ) — The sequence or batch of sequences to be encoded as target texts. Each sequence can be a string or a list of strings (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set `is_split_into_words=True` (to lift the ambiguity with a batch of sequences). * **add\_special\_tokens** ( `bool` , *optional* , defaults to `True` ) — Whether or not to add special tokens when encoding the sequences. This will use the underlying `PretrainedTokenizerBase.build_inputs_with_special_tokens` function, which defines which tokens are automatically added to the input ids. This is usefull if you want to add `bos` or `eos` tokens automatically. * **padding** ( `bool` , `str` or [PaddingStrategy](/docs/transformers/v4.35.2/en/internal/file_utils#transformers.utils.PaddingStrategy) , *optional* , defaults to `False` ) — Activates and controls padding. Accepts the following values: + `True` or `'longest'` : Pad to the longest sequence in the batch (or no padding if only a single sequence if provided). + `'max_length'` : Pad to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. + `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different lengths). * **truncation** ( `bool` , `str` or [TruncationStrategy](/docs/transformers/v4.35.2/en/internal/tokenization_utils#transformers.tokenization_utils_base.TruncationStrategy) , *optional* , defaults to `False` ) — Activates and controls truncation. Accepts the following values: + `True` or `'longest_first'` : Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided. + `'only_first'` : Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided. + `'only_second'` : Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided. + `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size). * **max\_length** ( `int` , *optional* ) — Controls the maximum length to use by one of the truncation/padding parameters. If left unset or set to `None` , this will use the predefined model maximum length if a maximum length is required by one of the truncation/padding parameters. If the model has no specific maximum input length (like XLNet) truncation/padding to a maximum length will be deactivated. * **stride** ( `int` , *optional* , defaults to 0) — If set to a number along with `max_length` , the overflowing tokens returned when `return_overflowing_tokens=True` will contain some tokens from the end of the truncated sequence returned to provide some overlap between truncated and overflowing sequences. The value of this argument defines the number of overlapping tokens. * **is\_split\_into\_words** ( `bool` , *optional* , defaults to `False` ) — Whether or not the input is already pre-tokenized (e.g., split into words). If set to `True` , the tokenizer assumes the input is already split into words (for instance, by splitting it on whitespace) which it will tokenize. This is useful for NER or token classification. * **pad\_to\_multiple\_of** ( `int` , *optional* ) — If set will pad the sequence to a multiple of the provided value. Requires `padding` to be activated. This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability `>= 7.5` (Volta). * **return\_tensors** ( `str` or [TensorType](/docs/transformers/v4.35.2/en/internal/file_utils#transformers.TensorType) , *optional* ) — If set, will return tensors instead of list of python integers. Acceptable values are: + `'tf'` : Return TensorFlow `tf.constant` objects. + `'pt'` : Return PyTorch `torch.Tensor` objects. + `'np'` : Return Numpy `np.ndarray` objects. * **return\_token\_type\_ids** ( `bool` , *optional* ) — Whether to return token type IDs. If left to the default, will return the token type IDs according to the specific tokenizer’s default, defined by the `return_outputs` attribute. [What are token type IDs?](../glossary#token-type-ids) * **return\_attention\_mask** ( `bool` , *optional* ) — Whether to return the attention mask. If left to the default, will return the attention mask according to the specific tokenizer’s default, defined by the `return_outputs` attribute. [What are attention masks?](../glossary#attention-mask) * **return\_overflowing\_tokens** ( `bool` , *optional* , defaults to `False` ) — Whether or not to return overflowing token sequences. If a pair of sequences of input ids (or a batch of pairs) is provided with `truncation_strategy = longest_first` or `True` , an error is raised instead of returning overflowing tokens. * **return\_special\_tokens\_mask** ( `bool` , *optional* , defaults to `False` ) — Whether or not to return special tokens mask information. * **return\_offsets\_mapping** ( `bool` , *optional* , defaults to `False` ) — Whether or not to return `(char_start, char_end)` for each token. This is only available on fast tokenizers inheriting from [PreTrainedTokenizerFast](/docs/transformers/v4.35.2/en/main_classes/tokenizer#transformers.PreTrainedTokenizerFast) , if using Python’s tokenizer, this method will raise `NotImplementedError` . * **return\_length** ( `bool` , *optional* , defaults to `False` ) — Whether or not to return the lengths of the encoded inputs. * **verbose** ( `bool` , *optional* , defaults to `True` ) — Whether or not to print more information and warnings. \*\*kwargs — passed to the `self.tokenize()` method 退货 导出常量元数据='未定义'; [BatchEncoding](/docs/transformers/v4.35.2/en/main_classes/tokenizer#transformers.BatchEncoding) 导出常量元数据='未定义'; A [BatchEncoding](/docs/transformers/v4.35.2/en/main_classes/tokenizer#transformers.BatchEncoding) 包含以下字段: * **输入\_ids** — 要馈送到模型的令牌 ID 列表。 [什么是输入 ID?](../glossary#input-ids) * **令牌\_type\_ids** — 要馈送到模型的令牌类型 ID 列表(当 `return_token_type_ids=True` 或者 如果 *“令牌\_type\_ids”* 是在 `self.model_input_names` )。 [什么是令牌类型 ID?](../glossary#token-type-ids) * **注意\_mask** — 指定模型应关注哪些标记的索引列表(当 `return_attention_mask=True` 或者如果 *“注意\_mask”* 是在 `self.model_input_names` )。 [什么是注意力蒙版?](../glossary#attention-mask) * **溢出\_tokens** — 溢出标记序列列表(当 `最大长度` 被指定并且 `return_overflowing_tokens=True` )。 * **num\_truncated\_tokens** — 被截断的令牌数量(当 `最大长度` 被指定并且 `return_overflowing_tokens=True` )。 * **特殊\_令牌\_掩码** — 0 和 1 的列表,其中 1 指定添加的特殊标记,0 指定 常规序列标记(当 `add_special_tokens=True` 和 `return_special_tokens_mask=True` )。 * **长度** — 输入的长度(当 `return_length=True` ) 对一个或多个序列或一对或多对序列进行标记和准备模型的主要方法 序列。 #### 应用\_聊天\_模板 [< 来源 >](https://github.com/huggingface/transformers/blob/v4.35.2/src/transformers/tokenization_utils_base.py#L1679) ( 对话 : Typing.Union[typing.List[typing.Dict[str, str]], ForwardRef('对话')] 聊天\_模板 : 打字.可选[str] =无 添加\_代\_提示 :布尔=假 标记化 :布尔=真 填充 :布尔=假 截断 :布尔=假 最长长度 : 打字.Optional[int] = None 返回\_张量 :打字.Union[str,transformers.utils.generic.TensorType,NoneType] =无 \*\*分词器\_kwargs ) → 导出常量元数据='未定义'; `列表[整数]` 参数 * **对话** (Union[List[Dict[str, str]], “Conversation”]) — Conversation 对象或字典列表 “角色”和“内容”键,代表到目前为止的聊天历史记录。 * **聊天\_模板** (str, *选修的* ) — 用于此转换的 Jinja 模板。如果 如果不通过,将使用模型的默认聊天模板。 * **添加\_生成\_提示** (布尔, *选修的* ) — 是否用指示的标记结束提示 助理消息的开始。当您想要从模型生成响应时,这非常有用。 请注意,此参数将传递到聊天模板,因此必须在 使该参数产生任何效果的模板。 * **标记化** ( `布尔` ,默认为 '真实' )— 是否对输出进行标记。如果 ‘假’ ,输出将是一个字符串。 * **填充** ( `布尔` ,默认为 ‘假’ )— 是否将序列填充到最大长度。如果 tokenize 为 则无效 ‘假’ 。 * **截断** ( `布尔` ,默认为 ‘假’ )— 是否以最大长度截断序列。如果 tokenize 为 则无效 ‘假’ 。 * **最长长度** ( `int` , *选修的* )— 用于填充或截断的最大长度(以标记为单位)。如果 tokenize 为 则无效 ‘假’ 。如果 未指定,标记器的 `最大长度` 属性将用作默认值。 * **返回\_张量** ( `str` 或者 [TensorType](/docs/transformers/v4.35.2/en/internal/file_utils#transformers.TensorType) , *选修的* )— 如果设置,将返回特定框架的张量。如果 tokenize 为 则无效 ‘假’ 。可以接受 值为: + `'tf'` :返回TensorFlow `tf.张量` 对象。 + `'点'` :返回PyTorch `火炬.张量` 对象。 + `'np'` :返回 NumPy `np.ndarray` 对象。 + `'贾克斯'` :返回JAX `jnp.ndarray` 对象。 \*\*tokenizer\_kwargs — 传递给 tokenizer 的附加 kwargs。 退货 导出常量元数据='未定义'; `列表[整数]` 导出常量元数据='未定义'; 代表到目前为止标记化聊天的令牌 ID 列表,包括控制令牌。这 输出已准备好直接或通过类似方法传递给模型 `生成()` 。 转换 Conversation 对象或字典列表 `“角色”` 和 `“内容”` 令牌列表的键 id。此方法旨在与聊天模型一起使用,并将读取分词器的 chat\_template 属性 确定转换时要使用的格式和控制令牌。当chat\_template为None时,会回退 到班级级别指定的默认聊天模板。 #### 作为\_target\_tokenizer [< 来源 >](https://github.com/huggingface/transformers/blob/v4.35.2/src/transformers/tokenization_utils_base.py#L3850) ( ) 临时设置用于对目标进行编码的分词器。对于关联到的分词器很有用 序列到序列模型需要对标签进行稍微不同的处理。 #### 批量解码 [< 来源 >](https://github.com/huggingface/transformers/blob/v4.35.2/src/transformers/tokenization_utils_base.py#L3682) ( 序列 :typing.Union[typing.List[int]、typing.List[typing.List[int]]、ForwardRef('np.ndarray')、ForwardRef('torch.Tensor')、ForwardRef('tf.Tensor') ] 跳过\_特殊\_令牌 :布尔=假 清理\_up\_标记化\_空间 : 布尔 = 无 \*\*夸格 ) → 导出常量元数据='未定义'; `列表[str]` 参数 * **序列** ( `Union[List[int], List[List[int]], np.ndarray, torch.Tensor, tf.Tensor]` )— 标记化输入 ID 列表。可以使用以下方式获得 `__call__` 方法。 * **跳过\_特殊\_令牌** ( `布尔` , *选修的* ,默认为 ‘假’ )— 是否在解码中删除特殊标记。 * **清理\_up\_标记化\_spaces** ( `布尔` , *选修的* )— 是否清理标记化空间。如果 `无` ,将默认为 `self.clean_up_tokenization_spaces` 。 * **夸格斯** (附加关键字参数, *选修的* )— 将传递给底层模型特定的解码方法。 退货 导出常量元数据='未定义'; `列表[str]` 导出常量元数据='未定义'; 已解码句子的列表。 通过调用decode将令牌ID列表的列表转换为字符串列表。 #### 批\_编码\_plus [< 来源 >](https://github.com/huggingface/transformers/blob/v4.35.2/src/transformers/tokenization_utils_base.py#L3021) ( 批次\_文本\_或\_文本\_对 :typing.Union[typing.List[str]、typing.List[typing.Tuple[str, str]]、typing.List[typing.List[str]]、typing.List[typing.Tuple[typing.List[ str]、typing.List[str]]]、typing.List[typing.List[int]]、typing.List[typing.Tuple[typing.List[int]、typing.List[int]]]] 添加\_特殊\_令牌 :布尔=真 填充 :打字.Union[bool,str,transformers.utils.generic.PaddingStrategy] = False 截断 :typing.Union[bool,str,transformers.tokenization\_utils\_base.TruncationStrategy] = None 最长长度 : 打字.Optional[int] = None 跨步 :整数=0 被\_分割\_成\_单词 :布尔=假 填充到多个 : 打字.Optional[int] = None 返回\_张量 :打字.Union[str,transformers.utils.generic.TensorType,NoneType] =无 返回\_token\_type\_ids : 打字.Optional[bool] = None 返回\_attention\_mask : 打字.Optional[bool] = None 返回\_overflowing\_tokens :布尔=假 返回\_特殊\_令牌\_掩码 :布尔=假 返回\_offsets\_mapping :布尔=假 返回\_length :布尔=假 冗长的 :布尔=真 \*\*夸格 ) → 导出常量元数据='未定义'; [BatchEncoding](/docs/transformers/v4.35.2/en/main_classes/tokenizer#transformers.BatchEncoding) 参数 * **batch\_text\_or\_text\_pairs** ( `List[str]` , `List[Tuple[str, str]]` , `List[List[str]]` , `List[Tuple[List[str], List[str]]]` , and for not-fast tokenizers, also `List[List[int]]` , `List[Tuple[List[int], List[int]]]` ) — Batch of sequences or pair of sequences to be encoded. This can be a list of string/string-sequences/int-sequences or a list of pair of string/string-sequences/int-sequence (see details in `encode_plus` ). * **add\_special\_tokens** ( `bool` , *optional* , defaults to `True` ) — Whether or not to add special tokens when encoding the sequences. This will use the underlying `PretrainedTokenizerBase.build_inputs_with_special_tokens` function, which defines which tokens are automatically added to the input ids. This is usefull if you want to add `bos` or `eos` tokens automatically. * **padding** ( `bool` , `str` or [PaddingStrategy](/docs/transformers/v4.35.2/en/internal/file_utils#transformers.utils.PaddingStrategy) , *optional* , defaults to `False` ) — Activates and controls padding. Accepts the following values: + `True` or `'longest'` : Pad to the longest sequence in the batch (or no padding if only a single sequence if provided). + `'max_length'` : Pad to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. + `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different lengths). * **truncation** ( `bool` , `str` or [TruncationStrategy](/docs/transformers/v4.35.2/en/internal/tokenization_utils#transformers.tokenization_utils_base.TruncationStrategy) , *optional* , defaults to `False` ) — Activates and controls truncation. Accepts the following values: + `True` or `'longest_first'` : Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided. + `'only_first'` : Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided. + `'only_second'` : Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided. + `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size). * **max\_length** ( `int` , *optional* ) — Controls the maximum length to use by one of the truncation/padding parameters. If left unset or set to `None` , this will use the predefined model maximum length if a maximum length is required by one of the truncation/padding parameters. If the model has no specific maximum input length (like XLNet) truncation/padding to a maximum length will be deactivated. * **stride** ( `int` , *optional* , defaults to 0) — If set to a number along with `max_length` , the overflowing tokens returned when `return_overflowing_tokens=True` will contain some tokens from the end of the truncated sequence returned to provide some overlap between truncated and overflowing sequences. The value of this argument defines the number of overlapping tokens. * **is\_split\_into\_words** ( `bool` , *optional* , defaults to `False` ) — Whether or not the input is already pre-tokenized (e.g., split into words). If set to `True` , the tokenizer assumes the input is already split into words (for instance, by splitting it on whitespace) which it will tokenize. This is useful for NER or token classification. * **pad\_to\_multiple\_of** ( `int` , *optional* ) — If set will pad the sequence to a multiple of the provided value. Requires `padding` to be activated. This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability `>= 7.5` (Volta). * **return\_tensors** ( `str` or [TensorType](/docs/transformers/v4.35.2/en/internal/file_utils#transformers.TensorType) , *optional* ) — If set, will return tensors instead of list of python integers. Acceptable values are: + `'tf'` : Return TensorFlow `tf.constant` objects. + `'pt'` : Return PyTorch `torch.Tensor` objects. + `'np'` : Return Numpy `np.ndarray` objects. * **return\_token\_type\_ids** ( `bool` , *optional* ) — Whether to return token type IDs. If left to the default, will return the token type IDs according to the specific tokenizer’s default, defined by the `return_outputs` attribute. [What are token type IDs?](../glossary#token-type-ids) * **return\_attention\_mask** ( `bool` , *optional* ) — Whether to return the attention mask. If left to the default, will return the attention mask according to the specific tokenizer’s default, defined by the `return_outputs` attribute. [What are attention masks?](../glossary#attention-mask) * **return\_overflowing\_tokens** ( `bool` , *optional* , defaults to `False` ) — Whether or not to return overflowing token sequences. If a pair of sequences of input ids (or a batch of pairs) is provided with `truncation_strategy = longest_first` or `True` , an error is raised instead of returning overflowing tokens. * **return\_special\_tokens\_mask** ( `bool` , *optional* , defaults to `False` ) — Whether or not to return special tokens mask information. * **return\_offsets\_mapping** ( `bool` , *optional* , defaults to `False` ) — Whether or not to return `(char_start, char_end)` for each token. This is only available on fast tokenizers inheriting from [PreTrainedTokenizerFast](/docs/transformers/v4.35.2/en/main_classes/tokenizer#transformers.PreTrainedTokenizerFast) , if using Python’s tokenizer, this method will raise `NotImplementedError` . * **return\_length** ( `bool` , *optional* , defaults to `False` ) — Whether or not to return the lengths of the encoded inputs. * **verbose** ( `bool` , *optional* , defaults to `True` ) — Whether or not to print more information and warnings. \*\*kwargs — passed to the `self.tokenize()` method 退货 导出常量元数据='未定义'; [BatchEncoding](/docs/transformers/v4.35.2/en/main_classes/tokenizer#transformers.BatchEncoding) 导出常量元数据='未定义'; A [BatchEncoding](/docs/transformers/v4.35.2/en/main_classes/tokenizer#transformers.BatchEncoding) 包含以下字段: * **输入\_ids** — 要馈送到模型的令牌 ID 列表。 [什么是输入 ID?](../glossary#input-ids) * **令牌\_type\_ids** — 要馈送到模型的令牌类型 ID 列表(当 `return_token_type_ids=True` 或者 如果 *“令牌\_type\_ids”* 是在 `self.model_input_names` )。 [什么是令牌类型 ID?](../glossary#token-type-ids) * **注意\_mask** — 指定模型应关注哪些标记的索引列表(当 `return_attention_mask=True` 或者如果 *“注意\_mask”* 是在 `self.model_input_names` )。 [什么是注意力蒙版?](../glossary#attention-mask) * **溢出\_tokens** — 溢出标记序列列表(当 `最大长度` 被指定并且 `return_overflowing_tokens=True` )。 * **num\_truncated\_tokens** — 被截断的令牌数量(当 `最大长度` 被指定并且 `return_overflowing_tokens=True` )。 * **特殊\_令牌\_掩码** — 0 和 1 的列表,其中 1 指定添加的特殊标记,0 指定 常规序列标记(当 `add_special_tokens=True` 和 `return_special_tokens_mask=True` )。 * **长度** — 输入的长度(当 `return_length=True` ) 对模型进行分词并准备序列列表或序列对列表。 此方法已被弃用, `__call__` 应该使用。 #### 使用特殊令牌构建输入 [< 来源 >](https://github.com/huggingface/transformers/blob/v4.35.2/src/transformers/tokenization_utils_base.py#L3317) ( 令牌\_ids\_0 : 打字.List[int] 令牌\_ids\_1 : 打字.可选[打字.列表[int]] =无 ) → 导出常量元数据='未定义'; `列表[整数]` 参数 * **令牌\_ids\_0** ( `列表[整数]` ) — 第一个标记化序列。 * **令牌\_ids\_1** ( `列表[整数]` , *选修的* ) — 第二个标记化序列。 退货 导出常量元数据='未定义'; `列表[整数]` 导出常量元数据='未定义'; 带有特殊标记的模型输入。 通过连接和构建序列分类任务的序列或一对序列的模型输入 添加特殊标记。 此实现不添加特殊标记,并且应在子类中重写此方法。 #### 清理\_up\_标记化 [< 来源 >](https://github.com/huggingface/transformers/blob/v4.35.2/src/transformers/tokenization_utils_base.py#L3793) ( 输出\_字符串 : 字符串 ) → 导出常量元数据='未定义'; `str` 参数 * **输出\_string** ( `str` ) — 要清理的文本。 退货 导出常量元数据='未定义'; `str` 导出常量元数据='未定义'; 清理干净的字符串。 清理简单的英语标记化工件列表,例如标点符号和缩写形式之前的空格。 #### 将\_tokens\_转换为\_string [< 来源 >](https://github.com/huggingface/transformers/blob/v4.35.2/src/transformers/tokenization_utils_base.py#L3669) ( 代币 : 打字.List[str] ) → 导出常量元数据='未定义'; `str` 参数 * **代币** ( `列表[str]` ) — 要加入字符串的标记。 退货 导出常量元数据='未定义'; `str` 导出常量元数据='未定义'; 加入的令牌。 将一系列标记转换为单个字符串。最简单的方法是 `" ".join(令牌)` 但我们 通常希望同时删除子词标记化伪影。 #### 创建\_token\_type\_ids\_from\_sequences [< 来源 >](https://github.com/huggingface/transformers/blob/v4.35.2/src/transformers/tokenization_utils_base.py#L3297) ( 令牌\_ids\_0 : 打字.List[int] 令牌\_ids\_1 : 打字.可选[打字.列表[int]] =无 ) → 导出常量元数据='未定义'; `列表[整数]` 参数 * **令牌\_ids\_0** ( `列表[整数]` ) — 第一个标记化序列。 * **令牌\_ids\_1** ( `列表[整数]` , *选修的* ) — 第二个标记化序列。 退货 导出常量元数据='未定义'; `列表[整数]` 导出常量元数据='未定义'; 令牌类型 ID。 创建与传递的序列相对应的令牌类型 ID。 [什么是代币类型 ID?](../glossary#token-type-ids) 如果模型有特殊的构建方法,则应在子类中重写。 #### 解码 [< 来源 >](https://github.com/huggingface/transformers/blob/v4.35.2/src/transformers/tokenization_utils_base.py#L3716) ( 令牌\_ids :typing.Union[int,typing.List[int],ForwardRef('np.ndarray'),ForwardRef('torch.Tensor'),ForwardRef('tf.Tensor')] 跳过\_特殊\_令牌 :布尔=假 清理\_up\_标记化\_空间 : 布尔 = 无 \*\*夸格 ) → 导出常量元数据='未定义'; `str` 参数 * **令牌\_ids** ( `Union[int, List[int], np.ndarray, torch.Tensor, tf.Tensor]` )— 标记化输入 ID 列表。可以使用以下方式获得 `__call__` 方法。 * **跳过\_特殊\_令牌** ( `布尔` , *选修的* ,默认为 ‘假’ )— 是否在解码中删除特殊标记。 * **清理\_up\_标记化\_spaces** ( `布尔` , *选修的* )— 是否清理标记化空间。如果 `无` ,将默认为 `self.clean_up_tokenization_spaces` 。 * **夸格斯** (附加关键字参数, *选修的* )— 将传递给底层模型特定的解码方法。 退货 导出常量元数据='未定义'; `str` 导出常量元数据='未定义'; 解码后的句子。 使用分词器和词汇表以及删除特殊选项的选项来转换字符串中的 id 序列 令牌并清理令牌化空间。 类似于做 `self.convert_tokens_to_string(self.convert_ids_to_tokens(token_ids))` 。 #### 编码 [< 来源 >](https://github.com/huggingface/transformers/blob/v4.35.2/src/transformers/tokenization_utils_base.py#L2532) ( 文本 : 打字.Union[str, 打字.List[str], 打字.List[int]] 文本\_对 : 打字.Union[str, 打字.List[str], 打字.List[int], NoneType] = 无 添加\_特殊\_令牌 :布尔=真 填充 :打字.Union[bool,str,transformers.utils.generic.PaddingStrategy] = False 截断 :typing.Union[bool,str,transformers.tokenization\_utils\_base.TruncationStrategy] = None 最长长度 : 打字.Optional[int] = None 跨步 :整数=0 返回\_张量 :打字.Union[str,transformers.utils.generic.TensorType,NoneType] =无 \*\*夸格 ) → 导出常量元数据='未定义'; `列表[整数]` , `火炬.张量` , `tf.张量` 或者 `np.ndarray` 参数 * **text** ( `str` , `List[str]` or `List[int]` ) — The first sequence to be encoded. This can be a string, a list of strings (tokenized string using the `tokenize` method) or a list of integers (tokenized string ids using the `convert_tokens_to_ids` method). * **text\_pair** ( `str` , `List[str]` or `List[int]` , *optional* ) — Optional second sequence to be encoded. This can be a string, a list of strings (tokenized string using the `tokenize` method) or a list of integers (tokenized string ids using the `convert_tokens_to_ids` method). * **add\_special\_tokens** ( `bool` , *optional* , defaults to `True` ) — Whether or not to add special tokens when encoding the sequences. This will use the underlying `PretrainedTokenizerBase.build_inputs_with_special_tokens` function, which defines which tokens are automatically added to the input ids. This is usefull if you want to add `bos` or `eos` tokens automatically. * **padding** ( `bool` , `str` or [PaddingStrategy](/docs/transformers/v4.35.2/en/internal/file_utils#transformers.utils.PaddingStrategy) , *optional* , defaults to `False` ) — Activates and controls padding. Accepts the following values: + `True` or `'longest'` : Pad to the longest sequence in the batch (or no padding if only a single sequence if provided). + `'max_length'` : Pad to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. + `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different lengths). * **truncation** ( `bool` , `str` or [TruncationStrategy](/docs/transformers/v4.35.2/en/internal/tokenization_utils#transformers.tokenization_utils_base.TruncationStrategy) , *optional* , defaults to `False` ) — Activates and controls truncation. Accepts the following values: + `True` or `'longest_first'` : Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided. + `'only_first'` : Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided. + `'only_second'` : Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided. + `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size). * **max\_length** ( `int` , *optional* ) — Controls the maximum length to use by one of the truncation/padding parameters. If left unset or set to `None` , this will use the predefined model maximum length if a maximum length is required by one of the truncation/padding parameters. If the model has no specific maximum input length (like XLNet) truncation/padding to a maximum length will be deactivated. * **stride** ( `int` , *optional* , defaults to 0) — If set to a number along with `max_length` , the overflowing tokens returned when `return_overflowing_tokens=True` will contain some tokens from the end of the truncated sequence returned to provide some overlap between truncated and overflowing sequences. The value of this argument defines the number of overlapping tokens. * **is\_split\_into\_words** ( `bool` , *optional* , defaults to `False` ) — Whether or not the input is already pre-tokenized (e.g., split into words). If set to `True` , the tokenizer assumes the input is already split into words (for instance, by splitting it on whitespace) which it will tokenize. This is useful for NER or token classification. * **pad\_to\_multiple\_of** ( `int` , *optional* ) — If set will pad the sequence to a multiple of the provided value. Requires `padding` to be activated. This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability `>= 7.5` (Volta). * **return\_tensors** ( `str` or [TensorType](/docs/transformers/v4.35.2/en/internal/file_utils#transformers.TensorType) , *optional* ) — If set, will return tensors instead of list of python integers. Acceptable values are: + `'tf'` : Return TensorFlow `tf.constant` objects. + `'pt'` : Return PyTorch `torch.Tensor` objects. + `'np'` : Return Numpy `np.ndarray` objects. \*\*kwargs — Passed along to the `.tokenize()` method. 退货 导出常量元数据='未定义'; `列表[整数]` , `火炬.张量` , `tf.张量` 或者 `np.ndarray` 导出常量元数据='未定义'; 文本的标记化 ID。 使用分词器和词汇表将字符串转换为 id(整数)序列。 和做一样 `self.convert_tokens_to_ids(self.tokenize(text))` 。 #### 编码\_plus [< 来源 >](https://github.com/huggingface/transformers/blob/v4.35.2/src/transformers/tokenization_utils_base.py#L2925) ( 文本 : 打字.Union[str, 打字.List[str], 打字.List[int]] 文本\_对 : 打字.Union[str, 打字.List[str], 打字.List[int], NoneType] = 无 添加\_特殊\_令牌 :布尔=真 填充 :打字.Union[bool,str,transformers.utils.generic.PaddingStrategy] = False 截断 :typing.Union[bool,str,transformers.tokenization\_utils\_base.TruncationStrategy] = None 最长长度 : 打字.Optional[int] = None 跨步 :整数=0 被\_分割\_成\_单词 :布尔=假 填充到多个 : 打字.Optional[int] = None 返回\_张量 :打字.Union[str,transformers.utils.generic.TensorType,NoneType] =无 返回\_token\_type\_ids : 打字.Optional[bool] = None 返回\_attention\_mask : 打字.Optional[bool] = None 返回\_overflowing\_tokens :布尔=假 返回\_特殊\_令牌\_掩码 :布尔=假 返回\_offsets\_mapping :布尔=假 返回\_length :布尔=假 冗长的 :布尔=真 \*\*夸格 ) → 导出常量元数据='未定义'; [BatchEncoding](/docs/transformers/v4.35.2/en/main_classes/tokenizer#transformers.BatchEncoding) 参数 * **text** ( `str` , `List[str]` or `List[int]` (the latter only for not-fast tokenizers)) — The first sequence to be encoded. This can be a string, a list of strings (tokenized string using the `tokenize` method) or a list of integers (tokenized string ids using the `convert_tokens_to_ids` method). * **text\_pair** ( `str` , `List[str]` or `List[int]` , *optional* ) — Optional second sequence to be encoded. This can be a string, a list of strings (tokenized string using the `tokenize` method) or a list of integers (tokenized string ids using the `convert_tokens_to_ids` method). * **add\_special\_tokens** ( `bool` , *optional* , defaults to `True` ) — Whether or not to add special tokens when encoding the sequences. This will use the underlying `PretrainedTokenizerBase.build_inputs_with_special_tokens` function, which defines which tokens are automatically added to the input ids. This is usefull if you want to add `bos` or `eos` tokens automatically. * **padding** ( `bool` , `str` or [PaddingStrategy](/docs/transformers/v4.35.2/en/internal/file_utils#transformers.utils.PaddingStrategy) , *optional* , defaults to `False` ) — Activates and controls padding. Accepts the following values: + `True` or `'longest'` : Pad to the longest sequence in the batch (or no padding if only a single sequence if provided). + `'max_length'` : Pad to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. + `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different lengths). * **truncation** ( `bool` , `str` or [TruncationStrategy](/docs/transformers/v4.35.2/en/internal/tokenization_utils#transformers.tokenization_utils_base.TruncationStrategy) , *optional* , defaults to `False` ) — Activates and controls truncation. Accepts the following values: + `True` or `'longest_first'` : Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided. + `'only_first'` : Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided. + `'only_second'` : Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided. + `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size). * **max\_length** ( `int` , *optional* ) — Controls the maximum length to use by one of the truncation/padding parameters. If left unset or set to `None` , this will use the predefined model maximum length if a maximum length is required by one of the truncation/padding parameters. If the model has no specific maximum input length (like XLNet) truncation/padding to a maximum length will be deactivated. * **stride** ( `int` , *optional* , defaults to 0) — If set to a number along with `max_length` , the overflowing tokens returned when `return_overflowing_tokens=True` will contain some tokens from the end of the truncated sequence returned to provide some overlap between truncated and overflowing sequences. The value of this argument defines the number of overlapping tokens. * **is\_split\_into\_words** ( `bool` , *optional* , defaults to `False` ) — Whether or not the input is already pre-tokenized (e.g., split into words). If set to `True` , the tokenizer assumes the input is already split into words (for instance, by splitting it on whitespace) which it will tokenize. This is useful for NER or token classification. * **pad\_to\_multiple\_of** ( `int` , *optional* ) — If set will pad the sequence to a multiple of the provided value. Requires `padding` to be activated. This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability `>= 7.5` (Volta). * **return\_tensors** ( `str` or [TensorType](/docs/transformers/v4.35.2/en/internal/file_utils#transformers.TensorType) , *optional* ) — If set, will return tensors instead of list of python integers. Acceptable values are: + `'tf'` : Return TensorFlow `tf.constant` objects. + `'pt'` : Return PyTorch `torch.Tensor` objects. + `'np'` : Return Numpy `np.ndarray` objects. * **return\_token\_type\_ids** ( `bool` , *optional* ) — Whether to return token type IDs. If left to the default, will return the token type IDs according to the specific tokenizer’s default, defined by the `return_outputs` attribute. [What are token type IDs?](../glossary#token-type-ids) * **return\_attention\_mask** ( `bool` , *optional* ) — Whether to return the attention mask. If left to the default, will return the attention mask according to the specific tokenizer’s default, defined by the `return_outputs` attribute. [What are attention masks?](../glossary#attention-mask) * **return\_overflowing\_tokens** ( `bool` , *optional* , defaults to `False` ) — Whether or not to return overflowing token sequences. If a pair of sequences of input ids (or a batch of pairs) is provided with `truncation_strategy = longest_first` or `True` , an error is raised instead of returning overflowing tokens. * **return\_special\_tokens\_mask** ( `bool` , *optional* , defaults to `False` ) — Whether or not to return special tokens mask information. * **return\_offsets\_mapping** ( `bool` , *optional* , defaults to `False` ) — Whether or not to return `(char_start, char_end)` for each token. This is only available on fast tokenizers inheriting from [PreTrainedTokenizerFast](/docs/transformers/v4.35.2/en/main_classes/tokenizer#transformers.PreTrainedTokenizerFast) , if using Python’s tokenizer, this method will raise `NotImplementedError` . * **return\_length** ( `bool` , *optional* , defaults to `False` ) — Whether or not to return the lengths of the encoded inputs. * **verbose** ( `bool` , *optional* , defaults to `True` ) — Whether or not to print more information and warnings. \*\*kwargs — passed to the `self.tokenize()` method 退货 导出常量元数据='未定义'; [BatchEncoding](/docs/transformers/v4.35.2/en/main_classes/tokenizer#transformers.BatchEncoding) 导出常量元数据='未定义'; A [BatchEncoding](/docs/transformers/v4.35.2/en/main_classes/tokenizer#transformers.BatchEncoding) 包含以下字段: * **输入\_ids** — 要馈送到模型的令牌 ID 列表。 [什么是输入 ID?](../glossary#input-ids) * **令牌\_type\_ids** — 要馈送到模型的令牌类型 ID 列表(当 `return_token_type_ids=True` 或者 如果 *“令牌\_type\_ids”* 是在 `self.model_input_names` )。 [什么是令牌类型 ID?](../glossary#token-type-ids) * **注意\_mask** — 指定模型应关注哪些标记的索引列表(当 `return_attention_mask=True` 或者如果 *“注意\_mask”* 是在 `self.model_input_names` )。 [什么是注意力蒙版?](../glossary#attention-mask) * **溢出\_tokens** — 溢出标记序列列表(当 `最大长度` 被指定并且 `return_overflowing_tokens=True` )。 * **num\_truncated\_tokens** — 被截断的令牌数量(当 `最大长度` 被指定并且 `return_overflowing_tokens=True` )。 * **特殊\_令牌\_掩码** — 0 和 1 的列表,其中 1 指定添加的特殊标记,0 指定 常规序列标记(当 `add_special_tokens=True` 和 `return_special_tokens_mask=True` )。 * **长度** — 输入的长度(当 `return_length=True` ) 对一个序列或一对序列进行标记并为模型准备。 此方法已被弃用, `__call__` 应该使用。 #### 来自\_预训练 [< 来源 >](https://github.com/huggingface/transformers/blob/v4.35.2/src/transformers/tokenization_utils_base.py#L1798) ( 预训练\_model\_name\_or\_path : 打字.Union[str, os.PathLike] \*初始化\_输入 缓存\_dir : 打字.Union[str, os.PathLike, NoneType] = None 强制\_下载 :布尔=假 仅本地\_文件\_ :布尔=假 代币 : 打字.Union[str, bool, NoneType] = None 修订 : str = '主' \*\*夸格 ) 参数 * **预训练\_model\_name\_or\_path** ( `str` 或者 `os.PathLike` )— 可以是: + 一个字符串, *型号编号* 托管在 Huggingface.co 上的模型存储库内的预定义标记生成器。 有效的模型 ID 可以位于根级别,例如 `bert-base-uncased` ,或命名空间下 用户或组织名称,例如 `dbmdz/bert-base-德语-大小写` 。 + 通往a的路径 *目录* 包含分词器所需的词汇文件,例如保存的 使用 [save\_pretrained()](/docs/transformers/v4.35.2/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase.save_pretrained) 方法,例如 `./my_model_directory/` 。 + ( **已弃用** ,不适用于所有派生类)单个已保存词汇表的路径或 url 文件(当且仅当分词器仅需要单个词汇文件(如 Bert 或 XLNet)时),例如 `./my_model_directory/vocab.txt` 。 * **缓存\_dir** ( `str` 或者 `os.PathLike` , *选修的* )— 如果是,则应缓存下载的预定义分词器词汇文件的目录路径 不应使用标准缓存。 * **强制\_下载** ( `布尔` , *选修的* ,默认为 ‘假’ )— 是否强制(重新)下载词汇文件并覆盖缓存版本(如果它们) 存在。 * **继续\_下载** ( `布尔` , *选修的* ,默认为 ‘假’ )— 是否删除接收不完整的文件。如果存在此类文件,请尝试恢复下载 存在。 * **代理** ( `字典[str, str]` , *选修的* )— 由协议或端点使用的代理服务器字典,例如, `{'http': 'foo.bar:3128', 'http://主机名': 'foo.bar:4012'}` 。每个请求都会使用代理。 * **代币** ( `str` 或者 *布尔* , *选修的* )— 用作远程文件的 HTTP 承载授权的令牌。如果 '真实' ,将使用生成的令牌 跑步时 `huggingface-cli 登录` (存储在 `~/.huggingface` )。 * **本地\_文件\_only** ( `布尔` , *选修的* ,默认为 ‘假’ )— 是否仅依赖本地文件而不尝试下载任何文件。 * **修订** ( `str` , *选修的* ,默认为 `“主要”` )— 要使用的具体型号版本。它可以是分支名称、标签名称或提交 ID,因为我们使用 基于 git 的系统,用于在 Huggingface.co 上存储模型和其他工件,因此 `修订` 可以是任何 git 允许的标识符。 * **子文件夹** ( `str` , *选修的* )— 如果相关文件位于 Huggingface.co 上模型存储库的子文件夹内(例如, facebook/rag-token-base),请在此处指定。 * **输入** (额外的位置参数, *选修的* )— 将被传递到 Tokenizer `__init__` 方法。 * **夸格斯** (附加关键字参数, *选修的* )— 将被传递到 Tokenizer `__init__` 方法。可用于设置特殊标记,例如 `bos_token` , `eos_token` , `unk_token` , `sep_token` , `pad_token` , `cls_token` , `掩码令牌` , `additional_special_tokens` 。请参阅中的参数 `__init__` 更多细节。 实例化一个 [PreTrainedTokenizerBase](/docs/transformers/v4.35.2/en/internal/tokenization_utils#transformers.PreTrainedTokenizerBase) (或派生类)来自预定义的 分词器。 通过 `令牌=真` 当您想使用私有模型时需要。 例子: ``` # We can't instantiate directly the base class \*PreTrainedTokenizerBase\* so let's show our examples on a derived class: BertTokenizer # Download vocabulary from huggingface.co and cache. tokenizer = BertTokenizer.from_pretrained("bert-base-uncased") # Download vocabulary from huggingface.co (user-uploaded) and cache. tokenizer = BertTokenizer.from_pretrained("dbmdz/bert-base-german-cased") # If vocabulary files are in a directory (e.g. tokenizer was saved using \*save\_pretrained('./test/saved\_model/')\*) tokenizer = BertTokenizer.from_pretrained("./test/saved\_model/") # If the tokenizer uses a single vocabulary file, you can point directly to this file tokenizer = BertTokenizer.from_pretrained("./test/saved\_model/my\_vocab.txt") # You can link tokens to special vocabulary when instantiating tokenizer = BertTokenizer.from_pretrained("bert-base-uncased", unk_token="<unk>") # You should be sure '<unk>' is in the vocabulary when doing that. # Otherwise use tokenizer.add\_special\_tokens({'unk\_token': '<unk>'}) instead) assert tokenizer.unk_token == "<unk>" ``` #### 获取\_特殊\_令牌\_掩码 [< 来源 >](https://github.com/huggingface/transformers/blob/v4.35.2/src/transformers/tokenization_utils_base.py#L3762) ( 令牌\_ids\_0 : 打字.List[int] 令牌\_ids\_1 : 打字.可选[打字.列表[int]] =无 已经\_有\_特殊\_令牌 :布尔=假 ) → 导出常量元数据='未定义'; [0, 1] 范围内的整数列表 参数 * **令牌\_ids\_0** ( `列表[整数]` )— 第一个序列的 id 列表。 * **令牌\_ids\_1** ( `列表[整数]` , *选修的* )— 第二个序列的 id 列表。 * **已经\_有\_特殊\_令牌** ( `布尔` , *选修的* ,默认为 ‘假’ )— 标记列表是否已使用模型的特殊标记进行格式化。 退货 导出常量元数据='未定义'; [0, 1] 范围内的整数列表 导出常量元数据='未定义'; 1 表示特殊标记,0 表示序列标记。 从未添加特殊标记的标记列表中检索序列 ID。添加时会调用该方法 使用标记器的特殊标记 `准备模型` 或者 `编码加` 方法。 #### 获取\_vocab [< 来源 >](https://github.com/huggingface/transformers/blob/v4.35.2/src/transformers/tokenization_utils_base.py#L1667) ( ) → 导出常量元数据='未定义'; `字典[str, int]` 退货 导出常量元数据='未定义'; `字典[str, int]` 导出常量元数据='未定义'; 词汇。 将词汇表作为标记字典返回到索引。 `tokenizer.get_vocab()[令牌]` 相当于 `tokenizer.convert_tokens_to_ids(token)` 什么时候 `令牌` 在里面 词汇。 #### 软垫 [< 来源 >](https://github.com/huggingface/transformers/blob/v4.35.2/src/transformers/tokenization_utils_base.py#L3124) ( 编码\_输入 :typing.Union[transformers.tokenization\_utils\_base.BatchEncoding,typing.List[transformers.tokenization\_utils\_base.BatchEncoding],typing.Dict[str,typing.List[int]],typing.Dict[str,打字.List[打字.List[int]]],打字.List[打字.Dict[str,打字.List[int]]]] 填充 :打字.Union[bool,str,transformers.utils.generic.PaddingStrategy] = True 最长长度 : 打字.Optional[int] = None 填充到多个 : 打字.Optional[int] = None 返回\_attention\_mask : 打字.Optional[bool] = None 返回\_张量 :打字.Union[str,transformers.utils.generic.TensorType,NoneType] =无 冗长的 :布尔=真 ) 参数 * **编码\_输入** ( [BatchEncoding](/docs/transformers/v4.35.2/en/main_classes/tokenizer#transformers.BatchEncoding) , 列表 [BatchEncoding](/docs/transformers/v4.35.2/en/main_classes/tokenizer#transformers.BatchEncoding) , `字典[str,列表[int]]` , `字典[str,列表[列表[int]]` 或者 `列表[字典[str,列表[int]]]` )— 标记化输入。可以代表一个输入( [BatchEncoding](/docs/transformers/v4.35.2/en/main_classes/tokenizer#transformers.BatchEncoding) 或者 `字典[str,列表[int]]` )或一批 标记化输入(列表 [BatchEncoding](/docs/transformers/v4.35.2/en/main_classes/tokenizer#transformers.BatchEncoding) , *字典[str,列表[列表[int]]]* 或者 *列表[字典[str, 列表[整数]]]* ),因此您可以在预处理期间以及 PyTorch 数据加载器中使用此方法 整理功能。 代替 `列表[整数]` 你可以有张量(numpy 数组、PyTorch 张量或 TensorFlow 张量),请参阅 上面关于返回类型的注释。 * **填充** ( `布尔` , `str` 或者 [PaddingStrategy](/docs/transformers/v4.35.2/en/internal/file_utils#transformers.utils.PaddingStrategy) , *选修的* ,默认为 '真实' )— 选择一种策略来填充返回的序列(根据模型的填充边和填充 索引)其中: + `真实` 或者 「最长」 :填充到批次中最长的序列(如果只有一个,则不填充 顺序(如果提供)。 + `'最大长度'` :填充到参数指定的最大长度 `最大长度` 或最大 如果未提供该参数,则模型可接受的输入长度。 + `假` 或者 `'不要垫'` (默认):无填充(即,可以输出具有不同序列的批次 长度)。 * **最长长度** ( `int` , *选修的* )— 返回列表的最大长度和可选的填充长度(见上文)。 * **填充\_到\_多个\_的** ( `int` , *选修的* )— 如果设置,会将序列填充为提供值的倍数。 这对于在具有计算能力的 NVIDIA 硬件上使用 Tensor Core 特别有用 `>= 7.5` (沃尔特)。 * **返回\_attention\_mask** ( `布尔` , *选修的* )— 是否返回注意力掩码。如果保留默认值,将根据 特定分词器的默认值,由 `返回输出` 属性。 [什么是注意力蒙版?](../glossary#attention-mask) * **返回\_张量** ( `str` 或者 [TensorType](/docs/transformers/v4.35.2/en/internal/file_utils#transformers.TensorType) , *选修的* )— 如果设置,将返回张量而不是 python 整数列表。可接受的值为: + `'tf'` :返回TensorFlow `tf.常量` 对象。 + `'点'` :返回PyTorch `火炬.张量` 对象。 + `'np'` :返回Numpy `np.ndarray` 对象。 * **详细** ( `布尔` , *选修的* ,默认为 '真实' )— 是否打印更多信息和警告。 将单个编码输入或一批​​编码输入填充到预定义长度或最大序列长度 在批次中。 填充侧(左/右)填充标记 ID 在标记器级别定义(使用 `self.padding_side` , `self.pad_token_id` 和 `self.pad_token_type_id` )。 请注意,对于快速分词器,使用 `__call__` 方法比使用方法编码更快 文本后跟一个电话 `垫` 获取填充编码的方法。 如果 `编码输入` 传递的是 numpy 数组、PyTorch 张量或 TensorFlow 张量的字典, 结果将使用相同的类型,除非您提供不同的张量类型 `返回张量` 。如果是 PyTorch 张量,但是您将丢失张量的特定设备。 #### 准备模型 [< 来源 >](https://github.com/huggingface/transformers/blob/v4.35.2/src/transformers/tokenization_utils_base.py#L3337) ( id : 打字.List[int] 对\_ids : 打字.可选[打字.列表[int]] =无 添加\_特殊\_令牌 :布尔=真 填充 :打字.Union[bool,str,transformers.utils.generic.PaddingStrategy] = False 截断 :typing.Union[bool,str,transformers.tokenization\_utils\_base.TruncationStrategy] = None 最长长度 : 打字.Optional[int] = None 跨步 :整数=0 填充到多个 : 打字.Optional[int] = None 返回\_张量 :打字.Union[str,transformers.utils.generic.TensorType,NoneType] =无 返回\_token\_type\_ids : 打字.Optional[bool] = None 返回\_attention\_mask : 打字.Optional[bool] = None 返回\_overflowing\_tokens :布尔=假 返回\_特殊\_令牌\_掩码 :布尔=假 返回\_offsets\_mapping :布尔=假 返回\_length :布尔=假 冗长的 :布尔=真 前置\_batch\_axis :布尔=假 \*\*夸格 ) → 导出常量元数据='未定义'; [BatchEncoding](/docs/transformers/v4.35.2/en/main_classes/tokenizer#transformers.BatchEncoding) 参数 * **ids** ( `List[int]` ) — Tokenized input ids of the first sequence. Can be obtained from a string by chaining the `tokenize` and `convert_tokens_to_ids` methods. * **pair\_ids** ( `List[int]` , *optional* ) — Tokenized input ids of the second sequence. Can be obtained from a string by chaining the `tokenize` and `convert_tokens_to_ids` methods. * **add\_special\_tokens** ( `bool` , *optional* , defaults to `True` ) — Whether or not to add special tokens when encoding the sequences. This will use the underlying `PretrainedTokenizerBase.build_inputs_with_special_tokens` function, which defines which tokens are automatically added to the input ids. This is usefull if you want to add `bos` or `eos` tokens automatically. * **padding** ( `bool` , `str` or [PaddingStrategy](/docs/transformers/v4.35.2/en/internal/file_utils#transformers.utils.PaddingStrategy) , *optional* , defaults to `False` ) — Activates and controls padding. Accepts the following values: + `True` or `'longest'` : Pad to the longest sequence in the batch (or no padding if only a single sequence if provided). + `'max_length'` : Pad to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. + `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different lengths). * **truncation** ( `bool` , `str` or [TruncationStrategy](/docs/transformers/v4.35.2/en/internal/tokenization_utils#transformers.tokenization_utils_base.TruncationStrategy) , *optional* , defaults to `False` ) — Activates and controls truncation. Accepts the following values: + `True` or `'longest_first'` : Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided. + `'only_first'` : Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided. + `'only_second'` : Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided. + `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size). * **max\_length** ( `int` , *optional* ) — Controls the maximum length to use by one of the truncation/padding parameters. If left unset or set to `None` , this will use the predefined model maximum length if a maximum length is required by one of the truncation/padding parameters. If the model has no specific maximum input length (like XLNet) truncation/padding to a maximum length will be deactivated. * **stride** ( `int` , *optional* , defaults to 0) — If set to a number along with `max_length` , the overflowing tokens returned when `return_overflowing_tokens=True` will contain some tokens from the end of the truncated sequence returned to provide some overlap between truncated and overflowing sequences. The value of this argument defines the number of overlapping tokens. * **is\_split\_into\_words** ( `bool` , *optional* , defaults to `False` ) — Whether or not the input is already pre-tokenized (e.g., split into words). If set to `True` , the tokenizer assumes the input is already split into words (for instance, by splitting it on whitespace) which it will tokenize. This is useful for NER or token classification. * **pad\_to\_multiple\_of** ( `int` , *optional* ) — If set will pad the sequence to a multiple of the provided value. Requires `padding` to be activated. This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability `>= 7.5` (Volta). * **return\_tensors** ( `str` or [TensorType](/docs/transformers/v4.35.2/en/internal/file_utils#transformers.TensorType) , *optional* ) — If set, will return tensors instead of list of python integers. Acceptable values are: + `'tf'` : Return TensorFlow `tf.constant` objects. + `'pt'` : Return PyTorch `torch.Tensor` objects. + `'np'` : Return Numpy `np.ndarray` objects. * **return\_token\_type\_ids** ( `bool` , *optional* ) — Whether to return token type IDs. If left to the default, will return the token type IDs according to the specific tokenizer’s default, defined by the `return_outputs` attribute. [What are token type IDs?](../glossary#token-type-ids) * **return\_attention\_mask** ( `bool` , *optional* ) — Whether to return the attention mask. If left to the default, will return the attention mask according to the specific tokenizer’s default, defined by the `return_outputs` attribute. [What are attention masks?](../glossary#attention-mask) * **return\_overflowing\_tokens** ( `bool` , *optional* , defaults to `False` ) — Whether or not to return overflowing token sequences. If a pair of sequences of input ids (or a batch of pairs) is provided with `truncation_strategy = longest_first` or `True` , an error is raised instead of returning overflowing tokens. * **return\_special\_tokens\_mask** ( `bool` , *optional* , defaults to `False` ) — Whether or not to return special tokens mask information. * **return\_offsets\_mapping** ( `bool` , *optional* , defaults to `False` ) — Whether or not to return `(char_start, char_end)` for each token. This is only available on fast tokenizers inheriting from [PreTrainedTokenizerFast](/docs/transformers/v4.35.2/en/main_classes/tokenizer#transformers.PreTrainedTokenizerFast) , if using Python’s tokenizer, this method will raise `NotImplementedError` . * **return\_length** ( `bool` , *optional* , defaults to `False` ) — Whether or not to return the lengths of the encoded inputs. * **verbose** ( `bool` , *optional* , defaults to `True` ) — Whether or not to print more information and warnings. \*\*kwargs — passed to the `self.tokenize()` method 退货 导出常量元数据='未定义'; [BatchEncoding](/docs/transformers/v4.35.2/en/main_classes/tokenizer#transformers.BatchEncoding) 导出常量元数据='未定义'; A [BatchEncoding](/docs/transformers/v4.35.2/en/main_classes/tokenizer#transformers.BatchEncoding) 包含以下字段: * **输入\_ids** — 要馈送到模型的令牌 ID 列表。 [什么是输入 ID?](../glossary#input-ids) * **令牌\_type\_ids** — 要馈送到模型的令牌类型 ID 列表(当 `return_token_type_ids=True` 或者 如果 *“令牌\_type\_ids”* 是在 `self.model_input_names` )。 [什么是令牌类型 ID?](../glossary#token-type-ids) * **注意\_mask** — 指定模型应关注哪些标记的索引列表(当 `return_attention_mask=True` 或者如果 *“注意\_mask”* 是在 `self.model_input_names` )。 [什么是注意力蒙版?](../glossary#attention-mask) * **溢出\_tokens** — 溢出标记序列列表(当 `最大长度` 被指定并且 `return_overflowing_tokens=True` )。 * **num\_truncated\_tokens** — 被截断的令牌数量(当 `最大长度` 被指定并且 `return_overflowing_tokens=True` )。 * **特殊\_令牌\_掩码** — 0 和 1 的列表,其中 1 指定添加的特殊标记,0 指定 常规序列标记(当 `add_special_tokens=True` 和 `return_special_tokens_mask=True` )。 * **长度** — 输入的长度(当 `return_length=True` ) 准备一个输入 id 序列或一对输入 id 序列,以便模型可以使用它。它 添加特殊标记,如果溢出则截断序列,同时考虑特殊标记和 管理溢出令牌的移动窗口(具有用户定义的步幅)。请注意,对于 *对\_id* 不同于 `无` 和 *截断\_策略 = 最长\_优先* 或者 '真实' , 无法返回 溢出的代币。这样的参数组合会引发错误。 #### 准备\_seq2seq\_batch [< 来源 >](https://github.com/huggingface/transformers/blob/v4.35.2/src/transformers/tokenization_utils_base.py#L3893) ( 源\_文本 : 打字.List[str] tgt\_文本 : 打字.可选[打字.列表[str]] =无 最长长度 : 打字.Optional[int] = None 最大目标长度 : 打字.Optional[int] = None 填充 : str = '最长' 返回\_张量 :str=无 截断 :布尔=真 \*\*夸格 ) → 导出常量元数据='未定义'; [BatchEncoding](/docs/transformers/v4.35.2/en/main_classes/tokenizer#transformers.BatchEncoding) 参数 * **src\_texts** ( `列表[str]` )— 要总结的文档列表或源语言文本。 * **tgt\_文本** ( `列表` , *选修的* )— 摘要或目标语言文本列表。 * **最长长度** ( `int` , *选修的* )— 控制编码器输入的最大长度(要摘要的文档或源语言文本)如果 未设置或设置为 `无` ,如果最大长度为,这将使用预定义的模型最大长度 截断/填充参数之一所需。如果模型没有特定的最大输入长度 (如 XLNet)截断/填充到最大长度将被停用。 * **最大\_目标\_长度** ( `int` , *选修的* )— 控制解码器输入的最大长度(目标语言文本或摘要)如果未设置或已设置 到 `无` ,这将使用 max\_length 值。 * **填充** ( `布尔` , `str` 或者 [PaddingStrategy](/docs/transformers/v4.35.2/en/internal/file_utils#transformers.utils.PaddingStrategy) , *选修的* ,默认为 ‘假’ )— 激活并控制填充。接受以下值: + `真实` 或者 「最长」 :填充到批次中最长的序列(如果只有一个,则不填充 顺序(如果提供)。 + `'最大长度'` :填充到参数指定的最大长度 `最大长度` 或最大 如果未提供该参数,则模型可接受的输入长度。 + `假` 或者 `'不要垫'` (默认):无填充(即,可以输出具有不同序列的批次 长度)。 * **返回\_张量** ( `str` 或者 [TensorType](/docs/transformers/v4.35.2/en/internal/file_utils#transformers.TensorType) , *选修的* )— 如果设置,将返回张量而不是 python 整数列表。可接受的值为: + `'tf'` :返回TensorFlow `tf.常量` 对象。 + `'点'` :返回PyTorch `火炬.张量` 对象。 + `'np'` :返回Numpy `np.ndarray` 对象。 * **截断** ( `布尔` , `str` 或者 [TruncationStrategy](/docs/transformers/v4.35.2/en/internal/tokenization_utils#transformers.tokenization_utils_base.TruncationStrategy) , *选修的* ,默认为 '真实' )— 激活并控制截断。接受以下值: + `真实` 或者 `'最长优先'` :截断到参数指定的最大长度 `最大长度` 或者 如果未提供该参数,则为模型可接受的最大输入长度。这会 逐个截断标记,如果一对,则从该对中最长的序列中删除一个标记 提供序列(或一批对)。 + `'仅_第一个'` :截断到参数指定的最大长度 `最大长度` 或到 如果未提供该参数,则模型可接受的最大输入长度。这只会 如果提供了一对序列(或一批序列对),则截断一对序列中的第一个序列。 + `'仅_第二'` :截断到参数指定的最大长度 `最大长度` 或到 如果未提供该参数,则模型可接受的最大输入长度。这只会 如果提供了一对序列(或一批序列对),则截断一对序列中的第二个序列。 + `假` 或者 `'不截断'` (默认):无截断(即,可以输出具有序列长度的批次 大于模型最大允许输入大小)。 \*\*kwargs — 附加关键字参数传递给 `self.__call__` 。 退货 导出常量元数据='未定义'; [BatchEncoding](/docs/transformers/v4.35.2/en/main_classes/tokenizer#transformers.BatchEncoding) 导出常量元数据='未定义'; A [BatchEncoding](/docs/transformers/v4.35.2/en/main_classes/tokenizer#transformers.BatchEncoding) 包含以下字段: * **输入\_ids** — 要馈送到编码器的令牌 ID 列表。 * **注意\_mask** — 指定模型应关注哪些标记的索引列表。 * **标签** — tgt\_texts 的令牌 ID 列表。 全套按键 `[input_ids、attention_mask、标签]` ,仅当 tgt\_texts 被传递时才会返回。 否则,input\_ids、attention\_mask 将是唯一的键。 准备用于翻译的模型输入。为了获得最佳效果,请一次翻译一个句子。 #### 推送\_到\_集线器 [< 来源 >](https://github.com/huggingface/transformers/blob/v4.35.2/src/transformers/utils/hub.py#L791) ( 仓库\_id : 字符串 使用\_temp\_dir : 打字.Optional[bool] = None 提交\_消息 : 打字.可选[str] =无 私人的 : 打字.Optional[bool] = None 代币 : 打字.Union[bool, str, NoneType] = None 最大分片大小 : 打字.Union[int, str, NoneType] = '5GB' 创建\_pr :布尔=假 安全\_序列化 :布尔=真 修订 :str=无 提交\_描述 :str=无 \*\*已弃用\_kwargs ) 参数 * **仓库\_id** ( `str` )— 您想要将标记生成器推送到的存储库的名称。它应包含您的组织名称 当推送到给定组织时。 * **使用\_temp\_dir** ( `布尔` , *选修的* )— 是否使用临时目录来存储推送到 Hub 之前保存的文件。 将默认为 '真实' 如果没有类似名称的目录 `repo_id` , ‘假’ 否则。 * **提交\_消息** ( `str` , *选修的* )— 推送时要提交的消息。将默认为 `“上传分词器”` 。 * **私人的** ( `布尔` , *选修的* )— 创建的存储库是否应该是私有的。 * **代币** ( `布尔` 或者 `str` , *选修的* )— 用作远程文件的 HTTP 承载授权的令牌。如果 '真实' ,将使用生成的令牌 跑步时 `huggingface-cli 登录` (存储在 `~/.huggingface` )。将默认为 '真实' 如果 `repo_url` 没有指定。 * **最大\_分片\_大小** ( `int` 或者 `str` , *选修的* ,默认为 `“5GB”` )— 仅适用于型号。分片之前检查点的最大大小。检查点分片 那么每个尺寸都会小于这个尺寸。如果表示为字符串,则后面需要跟数字 按一个单位(例如 `“5MB”` )。我们默认为 `“5GB”` 以便用户可以轻松地在免费层上加载模型 Google Colab 实例没有任何 CPU OOM 问题。 * **创建\_pr** ( `布尔` , *选修的* ,默认为 ‘假’ )— 是否使用上传的文件创建 PR 或直接提交。 * **安全\_序列化** ( `布尔` , *选修的* ,默认为 '真实' )— 是否将模型权重转换为安全张量格式以实现更安全的序列化。 * **修订** ( `str` , *选修的* )— 将上传的文件推送到的分支。 * **提交\_描述** ( `str` , *选修的* )— 将创建的提交的描述 将标记生成器文件上传到 🤗 模型中心。 例子: ``` from transformers import AutoTokenizer tokenizer = AutoTokenizer.from_pretrained("bert-base-cased") # Push the tokenizer to your namespace with the name "my-finetuned-bert". tokenizer.push_to_hub("my-finetuned-bert") # Push the tokenizer to an organization with the name "my-finetuned-bert". tokenizer.push_to_hub("huggingface/my-finetuned-bert") ``` #### 注册\_for\_auto\_class [< 来源 >](https://github.com/huggingface/transformers/blob/v4.35.2/src/transformers/tokenization_utils_base.py#L3867) ( 汽车\_类 = '自动标记器' ) 参数 * **汽车\_class** ( `str` 或者 `类型` , *选修的* ,默认为 `“自动标记器”` )— 用于注册这个新分词器的自动类。 使用给定的自动类注册此类。这应该只用于自定义分词器,如 库已经映射了 `自动标记器` 。 此 API 是实验性的,在下一版本中可能会有一些轻微的重大更改。 #### 保存\_预训练 [< 来源 >](https://github.com/huggingface/transformers/blob/v4.35.2/src/transformers/tokenization_utils_base.py#L2297) ( 保存\_目录 : 打字.Union[str, os.PathLike] 旧版\_格式 : 打字.Optional[bool] = None 文件名\_前缀 : 打字.可选[str] =无 推送\_到\_集线器 :布尔=假 \*\*夸格 ) → 导出常量元数据='未定义'; 一个元组 `str` 参数 * **保存\_目录** ( `str` 或者 `os.PathLike` ) — 将保存标记生成器的目录的路径。 * **旧版\_格式** ( `布尔` , *选修的* )— 仅适用于快速分词器。如果未设置(默认),会将标记生成器保存在统一的 JSON 中 格式以及遗留格式(如果存在),即具有分词器特定词汇和单独的 添加\_tokens 文件。 如果 ‘假’ ,只会以统一的 JSON 格式保存 tokenizer。此格式不兼容 “慢”分词器(不由 *分词器* 库),因此分词器将无法 加载到相应的“慢”分词器中。 如果 '真实' ,将以旧格式保存标记生成器。如果“慢”分词器不存在,则一个值 出现错误。 * **文件名\_前缀** ( `str` , *选修的* )— 添加到标记生成器保存的文件名称的前缀。 * **推送\_到\_hub** ( `布尔` , *选修的* ,默认为 ‘假’ )— 保存模型后是否将其推送到 Hugging Face 模型中心。您可以指定 您想要推送到的存储库 `repo_id` (将默认为名称 `保存目录` 在你的 命名空间)。 * **夸格斯** ( `字典[str,任意]` , *选修的* )— 传递给的附加关键字参数 [push\_to\_hub()](/docs/transformers/v4.35.2/en/main_classes/configuration#transformers.PretrainedConfig.push_to_hub) 方法。 退货 导出常量元数据='未定义'; 一个元组 `str` 导出常量元数据='未定义'; 文件已保存。 保存完整的标记器状态。 此方法确保可以使用以下命令重新加载完整的分词器 `~tokenization_utils_base.PreTrainedTokenizer.from_pretrained` 类方法.. 警告,无这不会保存您在实例化后可能应用到分词器的修改(例如 实例,修改 `tokenizer.do_lower_case` 创建后)。 #### 保存\_词汇 [< 来源 >](https://github.com/huggingface/transformers/blob/v4.35.2/src/transformers/tokenization_utils_base.py#L2494) ( 保存\_目录 : 字符串 文件名\_前缀 : 打字.可选[str] =无 ) → 导出常量元数据='未定义'; `元组(str)` 参数 * **保存\_目录** ( `str` )— 保存词汇的目录。 * **文件名\_前缀** ( `str` , *选修的* )— 添加到已保存文件的名称中的可选前缀。 退货 导出常量元数据='未定义'; `元组(str)` 导出常量元数据='未定义'; 保存的文件的路径。 仅保存分词器的词汇表(词汇+添加的标记)。 此方法不会保存分词器的配置和特殊标记映射。使用 `_save_pretrained()` 保存标记器的整个状态。 #### 标记化 [< 来源 >](https://github.com/huggingface/transformers/blob/v4.35.2/src/transformers/tokenization_utils_base.py#L2512) ( 文本 : 字符串 一对 : 打字.可选[str] =无 添加\_特殊\_令牌 :布尔=假 \*\*夸格 ) → 导出常量元数据='未定义'; `列表[str]` 参数 * **文本** ( `str` )— 要编码的序列。 * **一对** ( `str` , *选修的* )— 将与第一个序列一起编码的第二个序列。 * **添加\_特殊\_令牌** ( `布尔` , *选修的* ,默认为 ‘假’ )— 是否添加与对应模型关联的特殊标记。 * **夸格斯** (附加关键字参数, *选修的* )— 将传递给底层模型特定的编码方法。详情请参阅 [**称呼** ()](/docs/transformers/v4.35.2/en/main_classes/tokenizer#transformers.PreTrainedTokenizerFast.__call__) 退货 导出常量元数据='未定义'; `列表[str]` 导出常量元数据='未定义'; 代币列表。 将字符串转换为标记序列,将未知标记替换为 `unk_token` 。 #### 截断\_序列 [< 来源 >](https://github.com/huggingface/transformers/blob/v4.35.2/src/transformers/tokenization_utils_base.py#L3473) ( id : 打字.List[int] 对\_ids : 打字.可选[打字.列表[int]] =无 要删除的 num\_tokens\_ :整数=0 截断\_策略 : Typing.Union[str, Transformers.tokenization\_utils\_base.TruncationStrategy] = '最长\_first' 跨步 :整数=0 ) → 导出常量元数据='未定义'; `元组[列表[int],列表[int],列表[int]]` 参数 * **ID** ( `列表[整数]` )— 第一个序列的标记化输入 ID。可以通过链接从字符串中获得 `标记化` 和 `convert_tokens_to_ids` 方法。 * **对\_id** ( `列表[整数]` , *选修的* )— 第二个序列的标记化输入 ID。可以通过链接从字符串中获得 `标记化` 和 `convert_tokens_to_ids` 方法。 * **num\_tokens\_to\_remove** ( `int` , *选修的* ,默认为 0) — 使用截断策略删除的标记数。 * **截断\_策略** ( `str` 或者 [TruncationStrategy](/docs/transformers/v4.35.2/en/internal/tokenization_utils#transformers.tokenization_utils_base.TruncationStrategy) , *选修的* ,默认为 ‘假’ )— 截断遵循的策略。可: + `'最长优先'` :截断到参数指定的最大长度 `最大长度` 或到 如果未提供该参数,则模型可接受的最大输入长度。这将截断 逐个标记,如果一对序列(或一个 提供成对的批次)。 + `'仅_第一个'` :截断到参数指定的最大长度 `最大长度` 或到 如果未提供该参数,则模型可接受的最大输入长度。这只会 如果提供了一对序列(或一批序列对),则截断一对序列中的第一个序列。 + `'仅_第二'` :截断到参数指定的最大长度 `最大长度` 或到 如果未提供该参数,则模型可接受的最大输入长度。这只会 如果提供了一对序列(或一批序列对),则截断一对序列中的第二个序列。 + `'不截断'` (默认):无截断(即,可以输出序列长度更大的批次 比模型最大允许输入大小)。 * **大步** ( `int` , *选修的* ,默认为 0) — 如果设置为正数,则返回的溢出令牌将包含来自主程序的一些令牌 返回序列。该参数的值定义附加标记的数量。 退货 导出常量元数据='未定义'; `元组[列表[int],列表[int],列表[int]]` 导出常量元数据='未定义'; 被截断的 `id` ,截断的 `pair_id` 和名单 溢出的代币。注: *最长\_第一个* 如果一对,则策略返回溢出令牌的空列表 提供序列(或一批对)。 按照策略就地截断序列对。 ## SpecialTokensMixin ### 班级 变压器。 SpecialTokensMixin [< 来源 >](https://github.com/huggingface/transformers/blob/v4.35.2/src/transformers/tokenization_utils_base.py#L795) ( 冗长的 = 假 \*\*夸格 ) 参数 * **bos\_token** ( `str` 或者 `tokenizers.AddedToken` , *选修的* )— 代表句子开头的特殊标记。 * **eos\_token** ( `str` 或者 `tokenizers.AddedToken` , *选修的* )— 代表句子结尾的特殊标记。 * **unk\_token** ( `str` 或者 `tokenizers.AddedToken` , *选修的* )— 代表词汇表外标记的特殊标记。 * **sep\_token** ( `str` 或者 `tokenizers.AddedToken` , *选修的* )— 在同一输入中分隔两个不同句子的特殊标记(例如 BERT 使用)。 * **填充\_token** ( `str` 或者 `tokenizers.AddedToken` , *选修的* )— 一种特殊的令牌,用于使令牌数组具有相同的大小以进行批处理。然后将被忽略 注意机制或损失计算。 * **cls\_token** ( `str` 或者 `tokenizers.AddedToken` , *选修的* )— 表示输入类别的特殊标记(例如 BERT 使用)。 * **掩码\_token** ( `str` 或者 `tokenizers.AddedToken` , *选修的* )— 代表掩码标记的特殊标记(由掩码语言建模预训练目标使用,例如 伯特)。 * **额外\_特殊\_令牌** (元组或列表 `str` 或者 `tokenizers.AddedToken` , *选修的* )— 一个元组或附加标记的列表,将被标记为 ‘特别’ ,这意味着他们将 解码时跳过 if `skip_special_tokens` 被设定为 '真实' 。 派生出的 mixin [PreTrainedTokenizer](/docs/transformers/v4.35.2/en/main_classes/tokenizer#transformers.PreTrainedTokenizer) 和 [PreTrainedTokenizerFast](/docs/transformers/v4.35.2/en/main_classes/tokenizer#transformers.PreTrainedTokenizerFast) 处理与相关的特定行为 特殊令牌。特别是,该类包含可用于直接访问这些特殊的属性 以独立于模型的方式处理令牌,并允许设置和更新特殊令牌。 #### 添加\_特殊\_令牌 [< 来源 >](https://github.com/huggingface/transformers/blob/v4.35.2/src/transformers/tokenization_utils_base.py#L873) ( 特殊\_tokens\_dict : Typing.Dict[str, Typing.Union[str, tokenizers.AddedToken]] 替换\_额外\_特殊\_令牌 = 正确 ) → 导出常量元数据='未定义'; `int` 参数 * **特殊\_tokens\_dict** (字典 *字符串* 到 *字符串* 或者 `tokenizers.AddedToken` )— 键应该位于预定义的特殊属性列表中:[ `bos_token` , `eos_token` , `unk_token` , `sep_token` , `pad_token` , `cls_token` , `掩码令牌` , `additional_special_tokens` ]。 仅当标记尚未出现在词汇表中时才会添加标记(通过检查标记生成器是否 分配索引 `unk_token` 给他们)。 * **替换\_附加\_特殊\_令牌** ( `布尔` , *选修的* ,, 默认为 '真实' )— 如果 '真实' ,现有的附加特殊令牌列表将被替换为中提供的列表 `special_tokens_dict` 。否则, `self._additional_special_tokens` 只是延长了。在前者中 在这种情况下,标记不会从标记器的完整词汇表中删除 - 它们只是被标记 作为非特殊标记。请记住,这仅影响解码期间跳过的标记,而不影响 `added_tokens_encoder` 和 `added_tokens_decoder` 。这意味着之前的 `additional_special_tokens` 仍然是添加的 token,不会被模型分割。 退货 导出常量元数据='未定义'; `int` 导出常量元数据='未定义'; 添加到词汇表中的标记数量。 将特殊标记(eos、pad、cls 等)的字典添加到编码器并将它们链接到类属性。如果 特殊标记不在词汇表中,它们被添加到词汇表中(从最后一个索引开始索引) 当前词汇)。 当向词汇表中添加新标记时,您应该确保还调整了词汇表的标记嵌入矩阵的大小 模型,使其嵌入矩阵与分词器匹配。 为此,请使用 [resize\_token\_embeddings()](/docs/transformers/v4.35.2/en/main_classes/model#transformers.PreTrainedModel.resize_token_embeddings) 方法。 使用 `add_special_tokens` 将确保您的特殊代币可以通过多种方式使用: * 使用解码时可以跳过特殊标记 `skip_special_tokens = True` 。 * 特殊标记由分词器仔细处理(它们永远不会被分割),类似于 `添加的令牌` 。 * 您可以使用 tokenizer 类属性轻松引用特殊标记,例如 `tokenizer.cls_token` 。这 可以轻松开发与模型无关的训练和微调脚本。 如果可能,已经为提供的预训练模型注册了特殊令牌(例如 [BertTokenizer](/docs/transformers/v4.35.2/en/model_doc/bert#transformers.BertTokenizer) `cls_token` 已注册为 :obj *'[CLS]'* XLM 的也注册为 `'</s>'` )。 例子: ``` # Let's see how to add a new classification token to GPT-2 tokenizer = GPT2Tokenizer.from_pretrained("gpt2") model = GPT2Model.from_pretrained("gpt2") special_tokens_dict = {"cls\_token": "<CLS>"} num_added_toks = tokenizer.add_special_tokens(special_tokens_dict) print("We have added", num_added_toks, "tokens") # Notice: resize\_token\_embeddings expect to receive the full size of the new vocabulary, i.e., the length of the tokenizer. model.resize_token_embeddings(len(tokenizer)) assert tokenizer.cls_token == "<CLS>" ``` #### 添加\_令牌 [< 来源 >](https://github.com/huggingface/transformers/blob/v4.35.2/src/transformers/tokenization_utils_base.py#L974) ( 新\_代币 :typing.Union[str,tokenizers.AddedToken,typing.List[typing.Union[str,tokenizers.AddedToken]]] 特殊\_令牌 :布尔=假 ) → 导出常量元数据='未定义'; `int` 参数 * **新\_代币** ( `str` , `tokenizers.AddedToken` 或列表 *字符串* 或者 `tokenizers.AddedToken` )— 仅当词汇表中尚不存在时,才会添加标记。 `tokenizers.AddedToken` 包裹一个字符串 令牌让您个性化其行为:此令牌是否只应与单个单词匹配, 该标记是否应去除左侧所有潜在的空白,该标记是否应 去除右侧所有潜在的空格等。 * **特殊\_令牌** ( `布尔` , *选修的* ,默认为 ‘假’ )— 可用于指定令牌是否是特殊令牌。这主要改变了规范化行为 (例如,像 CLS 或 [MASK] 这样的特殊标记通常不是小写的)。 查看详细信息 `tokenizers.AddedToken` 在 HuggingFace 分词器库中。 退货 导出常量元数据='未定义'; `int` 导出常量元数据='未定义'; 添加到词汇表中的标记数量。 将新标记列表添加到标记生成器类中。如果新的标记不在词汇表中,则将它们添加到 它的索引从当前词汇的长度开始,并且将在标记化之前被隔离 应用算法。因此,添加的标记和来自标记化算法词汇表的标记是 不以同样的方式对待。 请注意,在向词汇表添加新标记时,您应该确保还调整标记嵌入矩阵的大小 模型的嵌入矩阵与分词器匹配。 为此,请使用 [resize\_token\_embeddings()](/docs/transformers/v4.35.2/en/main_classes/model#transformers.PreTrainedModel.resize_token_embeddings) 方法。 例子: ``` # Let's see how to increase the vocabulary of Bert model and tokenizer tokenizer = BertTokenizerFast.from_pretrained("bert-base-uncased") model = BertModel.from_pretrained("bert-base-uncased") num_added_toks = tokenizer.add_tokens(["new\_tok1", "my\_new-tok2"]) print("We have added", num_added_toks, "tokens") # Notice: resize\_token\_embeddings expect to receive the full size of the new vocabulary, i.e., the length of the tokenizer. model.resize_token_embeddings(len(tokenizer)) ``` #### 清理\_特殊\_令牌 [< 来源 >](https://github.com/huggingface/transformers/blob/v4.35.2/src/transformers/tokenization_utils_base.py#L865) ( ) 这 `清理特殊令牌` 现已弃用,保留向后兼容性,并将在 变形金刚 v5. ## 枚举和命名元组 ### 班级 Transformers.tokenization\_utils\_base。 截断策略 [< 来源 >](https://github.com/huggingface/transformers/blob/v4.35.2/src/transformers/tokenization_utils_base.py#L138) ( 价值 名字 = 无 模块 = 无 限定名 = 无 类型 = 无 开始 = 1 ) 可能的值 `截断` 论证中 [PreTrainedTokenizerBase. **称呼** ()](/docs/transformers/v4.35.2/en/main_classes/tokenizer#transformers.PreTrainedTokenizerFast.__call__) 。对于制表符补全很有用 一个IDE。 ### 班级 变压器。 字符跨度 [< 来源 >](https://github.com/huggingface/transformers/blob/v4.35.2/src/transformers/tokenization_utils_base.py#L150) ( 开始 :整数 结尾 :整数 ) 参数 * **开始** ( `int` ) — 原始字符串中第一个字符的索引。 * **结尾** ( `int` ) — 原始字符串中最后一个字符后面的字符的索引。 原始字符串中的字符跨度。 ### 班级 变压器。 令牌跨度 [< 来源 >](https://github.com/huggingface/transformers/blob/v4.35.2/src/transformers/tokenization_utils_base.py#L163) ( 开始 :整数 结尾 :整数 ) 参数 * **开始** ( `int` ) — 范围中第一个标记的索引。 * **结尾** ( `int` ) — 范围中最后一个标记之后的标记索引。 编码字符串(标记列表)中的标记范围。
import styled from "styled-components"; import { Link } from "react-router-dom"; const CardEdit = ({ user, information, className }) => { const image = require("../../../image Hackathon/image/background.jpeg"); const location = require("../../../image Hackathon/icon/pin.png"); const call = require("../../../image Hackathon/icon/call.png"); const telephone = require("../../../image Hackathon/icon/telephone.png"); const facebook = require("../../../image Hackathon/icon/facebook.png"); const mail = require("../../../image Hackathon/icon/arroba.png"); const website = require("../../../image Hackathon/icon/world-wide-web.png"); const linkIn = require("../../../image Hackathon/icon/linkedin-big-logo.png"); return ( <div className={className}> <div className="profile"> <div className="box-profile"> <div className="profile-image"> <img src={information.profile} /> <p id="detail"> {information.description} </p> </div> <div className="line"></div> <div className="profile-detail"> <h1 id="name">{information.name}</h1> <div className="detail" id="location"> <img src={location} /> <p>{information.address}</p> </div> <div className="contact"> <h2>ช่องทางการติดต่อ</h2> <div className="total-detail"> <div id="left"> <div className="detail" id="เบอร์สำนักงาน"> <img src={call} /> <p>{information.tel}</p> </div> {/* <div className="detail" id="เบอร์ตัวเอง"> <img src={telephone} /> <p>099-386-5212</p> </div> */} <div className="detail" id="facebook"> <img src={facebook} /> <p>{information.facebook}</p> </div> <div className="detail" id="mail"> <img src={mail} /> <p>{information.email}</p> </div> </div> <div className="rigth"> <div className="detail" id="linkIn"> <img src={linkIn} /> <p>{information.instagram}</p> </div> <div className="detail" id="website"> <img src={website} /> <p>{information.website}</p> </div> </div> </div> </div> <div className="button-chat"> {user.status == "company" || user.status == "waiting" ?( <Link to="/edit-profile-company"> <button>แก้ไข</button></Link> ):( <Link to="/profile-user-edit"> <button>แก้ไข</button></Link> )} </div> </div> </div> </div> </div> ); }; export default styled(CardEdit)` @import url("https://fonts.googleapis.com/css2?family=Anuphan:wght@200;300;400;500&family=Lora:wght@400;500;600;700&family=Pangolin&family=Prompt:wght@200;500;700&display=swap"); /* Profile */ .profile { display: flex; justify-content: center; margin: 150px 70px 20px 70px; } .box-profile { background-image: linear-gradient(to bottom, #12136e, #6b78ff); width: 1200px; height: 650px; border-radius: 20px; display: flex; align-items: center; } .profile-image { width: 45%; height: 100%; display: flex; flex-direction: column; justify-content: center; align-items: center; border: 20px; } .profile-image img { width: 280px; height: 310px; object-fit: cover; box-shadow: rgba(50, 50, 93, 0.25) 0px 50px 100px -20px, rgba(0, 0, 0, 0.3) 0px 30px 60px -30px, rgba(10, 37, 64, 0.35) 0px -2px 6px 0px inset; } .profile-image #detail { width: 280px; color: white; font-family: "Anuphan"; font-size: 20px; font-weight: 400; text-align: center; line-height: 33px; } .line { border-right: 3px solid white; height: 500px; } .profile-detail { display: flex; flex-direction: column; width: 55%; justify-content: center; align-items: center; } .profile-detail #name { font-family: "Anuphan"; font-size: 50px; font-weight: 600; color: white; margin: 10px; } .detail { display: flex; align-items: center; width: 100%; } .detail img { width: 30px; height: 30px; margin-right: 15px; } .detail p { font-family: "Anuphan"; font-size: 18px; font-weight: 400; color: white; } .profile-detail #location { isplay: flex; width: 100%; justify-content: center; } .contact { width: 100%; display: flex; align-items: center; flex-direction: column; } .contact h2 { font-family: "Anuphan"; color: white; font-size: 30px; font-weight: 600; } .total-detail { display: flex; width: 100%; } .total-detail #left { width: 50%; } .total-detail .detail img { width: 25px; height: 25px; } .total-detail .detail { display: flex; padding-left: 50px; } .button-chat { width: 100%; display: flex; justify-content: flex-end; padding-right: 130px; } .button-chat button { width: 130px; height: 45px; margin: 30px 20px 0px 0px; border-radius: 20px; font-family: "Anuphan"; font-size: 25px; font-weight: 600; background-color: white; box-shadow: #fff 0px 2px 30px -10px; } .button-chat button:hover { box-shadow: rgba(27, 31, 35, 0.04) 0px 1px 0px, rgba(255, 255, 255, 0.25) 0px 1px 0px inset; cursor: pointer; } `;
import React from 'react' import { Box, Container, StatHelpText } from '@chakra-ui/react' import { useState } from 'react'; import Loader from './Loader'; import { useEffect } from 'react'; import axios from 'axios'; import { server } from '../index'; import {useParams} from "react-router-dom" import {HStack,VStack} from "@chakra-ui/react"; import { Radio,RadioGroup } from '@chakra-ui/react'; import {Text} from "@chakra-ui/react"; import { Image } from '@chakra-ui/react'; import { Stat ,StatLabel,StatNumber,StatArrow} from '@chakra-ui/react'; import { Badge } from '@chakra-ui/react'; import { Progress } from '@chakra-ui/react'; import { Chart } from 'chart.js'; const CoinDetails = () => { const [coins,setCoins] = useState([]); const [loading, setloading] = useState(true); const [error,setError] = useState(false); const [page,setPage] = useState(1); const [currency, setCurrency] = useState("inr"); const currencySymbol = currency==="inr"?"₹":currency === "eur"? "€": "$"; const params = useParams(); useEffect(() => { const fetchCoin = async () =>{ try{ const {data} = await axios.get(`${server}/coins/${params.id}`); console.log(data); setCoins(data); setloading(false); console.log(data);} catch(error){ setError(true); setloading(false); } }; fetchCoin(); }, [params.id]); return <Container maxW={"container.xl"}> {loading? ( <Loader/> ) : ( <> <Box width={"full"} borderWidth={1}> <RadioGroup value={currency} onChange={setCurrency} p={"8"}> <HStack spacing={"4"}> <Radio value={"inr"}>INR</Radio> <Radio value={"usd"}>USD</Radio> </HStack> </RadioGroup> <VStack spacing = {"4"} p="16" alignItems={"flex-start"}> <Text fontsize={'small'} alignItems="center" opacity={0.7}> Last Updated on {Date(coins.market_data.last_updated).split("G")[0]} </Text> <Image src={coins.image.large} w={"16"} h={"16"} objectFit={"contain"}/> <Stat> <StatLabel>{coins.name}</StatLabel> <StatNumber>{currencySymbol}{coins.market_data.current_price[currency]}</StatNumber> <StatHelpText> <StatArrow type={coins.market_data.current_price_24h > 0 ? "increase" :"decrease"} /> {coins.market_data.price_change_percentage_24h}% </StatHelpText> </Stat> <Badge fontSize={"2xl"} bgColor={"blackAlpha.900"} color={"white"}> {`#${coins.market_cap_rank}`} </Badge> <CustomBar high={`${currencySymbol}${coins.market_data.high_24h[currency]}`} low={`${currencySymbol}${coins.market_data.low_24h[currency]}`}/> <Box w ={"full"} p="4"> <Item title={"Max Supply"} value={coins.market_data.max_supply}/> <Item title={"Circulating Supply"} value={coins.market_data.circulating_supply}/> <Item title={"Market Cap"} value={`${currencySymbol}${coins.market_data.market_cap[currency]}`}/> <Item title={"All Time Low"} value={`${currencySymbol}${coins.market_data.atl[currency]}`}/> <Item title={"All Time High"} value={`${currencySymbol}${coins.market_data.ath[currency]}`} /> </Box> </VStack> </Box> </> )} </Container> } const Item=({title,value})=>( <HStack justifyContent={"space-between"} w={"full"} my={"4"}> <Text fontFamily={"Bebas Neue"} letterSpacing={"widest"}>{title}</Text> <Text>{value}</Text> </HStack> ) const CustomBar = ({high,low}) =>( <VStack w={"full"}> <Progress value={50} colorScheme={"teal"} w={"full"}/> <HStack justifyContent={"space-between"} w={"full"}> <Badge children={low} colorScheme={"red"} /> <Text fontSize={"sm"}>24H Range</Text> <Badge children={high} colorScheme={"green"} /> </HStack> </VStack> ) export default CoinDetails
/* eslint-disable jsx-a11y/anchor-is-valid */ import React from "react"; import { PostModel } from "@/types/post"; import ListComments from "../comment/list-comments"; import { formateDMYHH } from "@/utils"; import { BiComment } from "react-icons/bi"; import { MdDeleteOutline, MdOutlineModeEdit } from "react-icons/md"; import ReactPlayer from "react-player"; import { CreateOrUpdateFormLike } from "../like-follow/create-or-update-form-like"; import { HtmlParser } from "@/utils/html-parser"; import { IoShareOutline } from "react-icons/io5"; import { FiDownload } from "react-icons/fi"; import Link from "next/link"; import { useRouter } from "next/router"; import { UserVisitorModel } from "@/types/user.type"; type Props = { item?: PostModel; commentTake: number; userVisitor: UserVisitorModel; }; const ListPublicPostsComments: React.FC<Props> = ({ item, commentTake, userVisitor, }) => { const { push, locale } = useRouter(); return ( <> <div key={item?.id} className="mt-8 overflow-hidden bg-white shadow-2xl shadow-gray-4300/60" > <div className="p-8 sm:py-7 sm:px-8"> <div className="flex items-center"> <div className="cursor-pointer"> <p className="mt-1 text-sm font-medium text-gray-500"> {formateDMYHH(item?.createdAt as Date, locale as string)} </p> </div> <div className="ml-auto"> <button title="Share" className="ml-2 text-gray-600 hover:text-gray-900 focus:ring-gray-900" > <IoShareOutline className="w-5 h-5" /> </button> {item?.allowDownload ? ( <button title="Download" className="ml-2 text-gray-600 hover:text-gray-900 focus:ring-gray-900" > <FiDownload className="w-5 h-5" /> </button> ) : null} {userVisitor?.id === item?.userId ? ( <> <button onClick={() => push( `/posts/${ item?.id }/edit?type=${item?.type.toLocaleLowerCase()}` ) } title="Edit" className="ml-2 text-gray-600 hover:text-indigo-400 focus:ring-indigo-400" > <MdOutlineModeEdit className="w-5 h-5" /> </button> <button // onClick={() => deleteItem(item)} title="Delete" className="ml-2 text-gray-600 hover:text-red-400 focus:ring-red-400" > <MdDeleteOutline className="w-5 h-5" /> </button> </> ) : null} </div> </div> {item?.urlMedia && ["VIDEO", "AUDIO"].includes(item?.type) ? ( <div className="mt-2"> <ReactPlayer className="mr-auto" url={item?.urlMedia} width="100%" height="400px" controls /> </div> ) : null} {item?.id ? ( <Link href={`/${item?.profile?.username}/posts/${item?.slug}`} className="mt-4 text-lg font-bold text-gray-900 cursor-pointer" > {item?.title ?? ""} </Link> ) : null} <p className="mt-4 text-sm font-normal text-gray-600"> <HtmlParser html={String(item?.description)} /> </p> <div className="flex mt-4 items-center"> <CreateOrUpdateFormLike typeLike="POST" item={item} /> <button className="ml-3.5 text-lg font-bold"> <BiComment /> </button> <span className="ml-1.5 font-normal text-sm"> {item?.totalComment ?? 0} </span> </div> <ListComments model="POST" modelIds={["POST"]} organizationId={String(item?.organizationId)} postId={String(item?.id)} take={commentTake} userVisitorId={userVisitor?.id ?? ""} /> </div> </div> </> ); }; export { ListPublicPostsComments };
<?php namespace App\Http\Controllers; use App\Cover; use Illuminate\Http\Request; use Illuminate\Support\Facades\Auth; class HomeController extends Controller { /** * Create a new controller instance. * * @return void */ public function __construct() { $this->middleware('auth'); } /** * Show the application dashboard. * * @return \Illuminate\Http\Response */ public function index() { $covers = Cover::paginate(2); return view('home', compact('covers')); } public function buy($id) { $cover = Cover::findOrFail($id); $user = Auth::user(); $user->purse -= $cover->price; $user->covers()->attach($cover); $user->save(); return redirect()->action('HomeController@index'); } }
package it.unitn.disi.zanshin.simulation.cases.acad; import it.unitn.disi.zanshin.simulation.Logger; import it.unitn.disi.zanshin.simulation.cases.SimulationPart; /** * Simulation of the failure of AR15 during an execution of the A-CAD. * * @author Vitor E. Silva Souza ([email protected]) * @version 1.0 */ public final class AcadAR11FailureSimulation extends AbstractAcadSimulation { /** The logger. */ private static final Logger log = new Logger(AcadAR11FailureSimulation.class); /** @see it.unitn.disi.zanshin.simulation.cases.AbstractSimulation#doInit() */ @Override public void doInit() throws Exception { // Registers the A-CAD as target system in Zanshin. registerTargetSystem(); // Adds the first part of the simulation to the list. parts.add(new SimulationPart() { @Override public void run() throws Exception { // Creates a user session, as if someone were using the A-CAD. sessionId = zanshin.createUserSession(targetSystemId); log.info("Created a new user session with id: {0}", sessionId); //$NON-NLS-1$ // Simulates a failure in quality constraint "Dispatching occurs in 3 min". log.info("Current incident took more than 3 minutes do dispatch!"); //$NON-NLS-1$ zanshin.logRequirementStart(targetSystemId, sessionId, Q_DISPATCH); zanshin.logRequirementFailure(targetSystemId, sessionId, Q_DISPATCH); // Ends the user session. zanshin.disposeUserSession(targetSystemId, sessionId); } @Override public boolean shouldWait() { return true; } }); // Adds the second part. parts.add(new SimulationPart() { @Override public void run() throws Exception { // Creates yet another user session, as if someone else were using the A-CAD. sessionId = zanshin.createUserSession(targetSystemId); log.info("Created a new user session with id: {0}", sessionId); //$NON-NLS-1$ // Simulates another failure, because the first increment in the parameter was not enough. log.info("First adaptation attempt was not enough, dispatch is still took more than 3 minutes in another incident!"); //$NON-NLS-1$ zanshin.logRequirementStart(targetSystemId, sessionId, Q_DISPATCH); zanshin.logRequirementFailure(targetSystemId, sessionId, Q_DISPATCH); // Ends the user session. zanshin.disposeUserSession(targetSystemId, sessionId); } @Override public boolean shouldWait() { return true; } }); // Adds the third part. parts.add(new SimulationPart() { @Override public void run() throws Exception { // Creates yet another user session, as if someone else were using the A-CAD. sessionId = zanshin.createUserSession(targetSystemId); log.info("Created a new user session with id: {0}", sessionId); //$NON-NLS-1$ // The second change was enough, the quality constraint is now satisfied. log.info("OK, for a third incident dispatching now took less than 3 minutes!"); //$NON-NLS-1$ zanshin.logRequirementStart(targetSystemId, sessionId, Q_DISPATCH); zanshin.logRequirementSuccess(targetSystemId, sessionId, Q_DISPATCH); // Ends the user session. zanshin.disposeUserSession(targetSystemId, sessionId); } @Override public boolean shouldWait() { return false; } }); } }
# Brain Agriculture Case This project is a React application and is part of the Brain Agriculture evaluation case. Solutions were applied to the problems proposed in the repository challenge - [brain-agriculture](https://github.com/brain-ag/trabalhe-conosco). For this, I used: - Programming logic, observing business rules; - Data manipulation and processing; - Maintaining application state with the [React Context API](https://react.dev/reference/react/useContext); - Memorizing functions, expensive calculations and components; - Form management in React with [Unform](https://unform-rocketseat.vercel.app/); - [React Hooks](https://react.dev/reference/react/hooks); - Unit tests with [Jest](https://jestjs.io/). ### Let's start? :computer: To test the application, make sure you have Node.js and Git installed. Open your terminal and clone the repository to your machine: ``` git clone https://github.com/gabrielucas/brain-agriculture.git ``` ``` cd brain-agriculture/ && code . ``` Run the `yarn` command to install all dependencies. *Note: This project was configured with [yarn](https://yarnpkg.com/), however, if you don't have it installed on your machine, make sure you have [npm](https://www.npmjs.com/) installed and run `npm install` to install all dependencies. To run the application, simply run the `npm run dev` command.* ### Starting the application :rocket: To start the application, run the command `yarn dev`. This application was built with Vite. To view it in your browser, go to (http://localhost:5173/). If you want to view the mobile experience, on your phone, run `yarn dev --host` to expose your IP address. *Note: remember that you must have your cell phone connected to the same network as your machine for the IP to be visible to your device.* On your device, open the browser and access the address (http://your-ip-address:5173). ### Testing :test_tube: To run the unit tests, run the command below in your terminal: ``` yarn test ``` If you want to check test coverage, run the command below: ``` yarn coverage ``` --- **Now it's up to you. Feel free to explore the application's features**
void fileRelatedAssertionChains() { int size = 1; String name = "name"; assertThat(getFile()).hasSize(0); // Noncompliant {{Use isEmpty() instead.}} assertThat(getFile().length()).isZero(); // Noncompliant {{Use assertThat(actual).isEmpty() instead.}} assertThat(getFile().length()).isEqualTo(0); // Noncompliant {{Use isZero() instead.}} assertThat(getFile().length()).isNotZero(); // Noncompliant {{Use assertThat(actual).isNotEmpty() instead.}} assertThat(getFile().length()).isNotEqualTo(0); // Noncompliant {{Use isNotZero() instead.}} assertThat(getFile().length()).isEqualTo(size); // Noncompliant {{Use assertThat(actual).hasSize(expected) instead.}} assertThat(getFile().canRead()).isTrue(); // Noncompliant {{Use assertThat(actual).canRead() instead.}} assertThat(getFile().canWrite()).isTrue(); // Noncompliant {{Use assertThat(actual).canWrite() instead.}} assertThat(getFile().exists()).isTrue();// Noncompliant {{Use assertThat(actual).exists() instead.}} assertThat(getFile().exists()).isFalse(); // Noncompliant {{Use assertThat(actual).doesNotExist() instead.}} assertThat(getFile().getName()).isEqualTo(name); // Noncompliant {{Use assertThat(actual).hasName(expected) instead.}} assertThat(getFile().getParent()).isEqualTo(name); // Noncompliant {{Use assertThat(actual).hasParent(expected) instead.}} assertThat(getFile().getParentFile()).isEqualTo(getFile()); // Noncompliant {{Use assertThat(actual).hasParent(expected) instead.}} assertThat(getFile().getParent()).isNull(); // Noncompliant {{Use assertThat(actual).hasNoParent() instead.}} assertThat(getFile().getParentFile()).isNull(); // Noncompliant {{Use assertThat(actual).hasNoParent() instead.}} assertThat(getFile().isAbsolute()).isTrue(); // Noncompliant {{Use assertThat(actual).isAbsolute() instead.}} assertThat(getFile().isAbsolute()).isFalse(); // Noncompliant {{Use assertThat(actual).isRelative() instead.}} assertThat(getFile().isDirectory()).isTrue(); // Noncompliant {{Use assertThat(actual).isDirectory() instead.}} assertThat(getFile().isFile()).isTrue(); // Noncompliant {{Use assertThat(actual).isFile() instead.}} assertThat(getFile().list()).isEmpty(); // Noncompliant {{Use assertThat(actual).isEmptyDirectory() instead.}} assertThat(getFile().listFiles()).isEmpty(); // Noncompliant {{Use assertThat(actual).isEmptyDirectory() instead.}} assertThat(getFile().list()).isNotEmpty(); // Noncompliant {{Use assertThat(actual).isNotEmptyDirectory() instead.}} assertThat(getFile().listFiles()).isNotEmpty(); // Noncompliant {{Use assertThat(actual).isNotEmptyDirectory() instead.}} // We report only step by step, not the final transformation possible assertThat(getFile().list().length).isEqualTo(0); // Noncompliant {{Use isZero() instead.}} assertThat(getFile().list().length).isZero(); // Noncompliant {{Use assertThat(actual).isEmpty() instead.}} assertThat(getFile().list()).isEmpty(); // Noncompliant {{Use assertThat(actual).isEmptyDirectory() instead.}} assertThat(getFile()).isEmptyDirectory(); // Compliant, 3 iterations to reach a nice assertion }
package com.astha.singh.service; import com.astha.singh.model.Category; import com.astha.singh.model.Items; import com.astha.singh.repository.CategoryRepository; import com.astha.singh.repository.ItemRepository; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.stereotype.Service; import org.springframework.transaction.annotation.Transactional; import java.util.ArrayList; import java.util.List; @Service @Transactional public class CategoryService { @Autowired private CategoryRepository repo; @Autowired private ItemRepository itemsRepo; //READ public Category getCategoryById(int id) { return repo.getReferenceById(id); } //READ ALL public List<Category> getAllCategory() { return repo.findAll(); } //CREATE public Category createCategory(Category category) { return repo.save(category); } //UPDATE public Category updateCategory(Category category) { repo.save(category); return category; } //DELETE public void deleteCategory(int id) { repo.deleteById(id); } public List<Items> getItemsByCategoryID(int id) { Category category1=repo.getReferenceById(id); List<Integer> itemsid= category1.getListOfItems(); List<Items> response=new ArrayList<>(); for(Integer itemid: itemsid) { Items items = itemsRepo.getReferenceById(itemid); response.add(items); } Singleton singleton = Singleton.getInstance(); System.out.println(singleton.getList()); return response; } public int totalPriceByCategoryId(int id) { Category category2=repo.getReferenceById(id); List<Integer> itemsid= category2.getListOfItems(); int sum=0; for(Integer idsum:itemsid) { Items items=itemsRepo.getReferenceById(idsum); sum+=items.getQuantity()*items.getPrice(); } return sum; } }
import { Observable } from 'rxjs'; import { FormBuilder, FormGroup, Validators } from '@angular/forms'; import { HttpClient } from '@angular/common/http'; import { Injectable } from '@angular/core'; @Injectable({ providedIn: 'root' }) export class CityService { constructor(private httpClient: HttpClient, private formBuilder: FormBuilder) { } apiUrl = 'https://webapi.cihancopur.com/City/'; cityFormGroup: FormGroup = this.formBuilder.group({ CITY_ID: ["0", Validators.required], CITY_TR: ["", Validators.required], CITY_EN: ["", Validators.required], COUNTRY: { COUNTRY_ID: "0", COUNTRY_TR: "", COUNTRY_EN: "" }, X: "0", Y: "0" }); initializeFormGroup() { this.cityFormGroup.setValue( { CITY_ID: "0", CITY_TR: "", CITY_EN: "", COUNTRY: { COUNTRY_ID: "0", COUNTRY_TR: "", COUNTRY_EN: "" }, X: "0", Y: "0" } ); } getCityFormGroup(): FormGroup { return this.cityFormGroup; } getCity(CITY_ID, COUNTRY_ID): Observable<any> { let newPath = this.apiUrl + 'Get?p_iId=' + CITY_ID + '&p_sCountryId=' + COUNTRY_ID; return this.httpClient.get<any>(newPath); } addCity(city) { let newPath = this.apiUrl + 'Add'; return this.httpClient.post<any>(newPath, city); } updateCity(city) { let newPath = this.apiUrl + 'Update'; return this.httpClient.post<any>(newPath, city); } pupulateForm(city) { this.cityFormGroup.setValue(city); } }
from .mobilevit import MobileVITModel from .base_config import get_base_config import yaml, os import tensorflow as tf def load_mobilevit_v1_small(is_feature_extractor=False) -> tf.keras.Model: config_file_path = f"configs/mobilevit_v1_small.yaml" with open(config_file_path, "r") as f: data = yaml.safe_load(f) print("Instantiating Tensorflow model...") config = get_base_config( include_top = not is_feature_extractor, hidden_sizes = data.get('hidden_sizes'), neck_hidden_sizes = data.get('neck_hidden_sizes'), expand_ratio = data.get('expand_ratio') ) tf_model = MobileVITModel(config) image_dim = 256 dummy_inputs = tf.ones((1, image_dim, image_dim, 3)) _ = tf_model(dummy_inputs)[0] return tf_model def load_mobilevit_v1_xsmall(is_feature_extractor=False) -> tf.keras.Model: config_file_path = f"configs/mobilevit_v1_xsmall.yaml" with open(config_file_path, "r") as f: data = yaml.safe_load(f) print("Instantiating Tensorflow model...") config = get_base_config( include_top = not is_feature_extractor, hidden_sizes = data.get('hidden_sizes'), neck_hidden_sizes = data.get('neck_hidden_sizes'), expand_ratio = data.get('expand_ratio') ) tf_model = MobileVITModel(config) image_dim = 256 dummy_inputs = tf.ones((1, image_dim, image_dim, 3)) _ = tf_model(dummy_inputs)[0] return tf_model def load_mobilevit_v1_xxsmall(is_feature_extractor=False) -> tf.keras.Model: config_file_path = f"configs/mobilevit_v1_xxsmall.yaml" with open(config_file_path, "r") as f: data = yaml.safe_load(f) print("Instantiating Tensorflow model...") config = get_base_config( include_top = not is_feature_extractor, hidden_sizes = data.get('hidden_sizes'), neck_hidden_sizes = data.get('neck_hidden_sizes'), expand_ratio = data.get('expand_ratio') ) tf_model = MobileVITModel(config) image_dim = 256 dummy_inputs = tf.ones((1, image_dim, image_dim, 3)) _ = tf_model(dummy_inputs)[0] return tf_model
import java.io.IOException; import java.sql.Connection; import java.sql.DriverManager; import java.sql.PreparedStatement; import java.sql.ResultSet; import jakarta.servlet.ServletException; import jakarta.servlet.annotation.WebServlet; import jakarta.servlet.http.HttpServlet; import jakarta.servlet.http.HttpServletRequest; import jakarta.servlet.http.HttpServletResponse; import jakarta.servlet.http.HttpSession; @WebServlet("/AddToFavSoilServlet") public class AddToFavSoilServlet extends HttpServlet { boolean status = false; @Override protected void doPost(HttpServletRequest request, HttpServletResponse response) throws ServletException, IOException { HttpSession session = request.getSession(); String email = (String) session.getAttribute("email"); String soilName = request.getParameter("soil_name"); String description = request.getParameter("description"); String amount = request.getParameter("amount"); String imagePath = request.getParameter("image_path"); Integer qty = 1; String url = "jdbc:mysql://localhost:4306/web_plant"; String username = "root"; String password = ""; try { Class.forName("com.mysql.cj.jdbc.Driver"); Connection connection = DriverManager.getConnection(url, username, password); String checkQuery = "SELECT COUNT(*) AS count FROM fav_soil WHERE email=? AND soil_name=?"; PreparedStatement ps = connection.prepareStatement(checkQuery); ps.setString(1, email); ps.setString(2, soilName); ResultSet rs = ps.executeQuery(); status = rs.next(); if (!(rs.getInt("count") > 0)) { String insertQuery = "INSERT INTO fav_soil (email, soil_name, description, amount, image, qty) VALUES (?, ?, ?, ?, ?, ?)"; PreparedStatement preparedStatement = connection.prepareStatement(insertQuery); preparedStatement.setString(1, email); preparedStatement.setString(2, soilName); preparedStatement.setString(3, description); preparedStatement.setString(4, amount); preparedStatement.setString(5, imagePath); preparedStatement.setInt(6, qty); preparedStatement.executeUpdate(); preparedStatement.close(); response.getWriter().println("<script>alert('Item added to favorites successfully'); window.location.replace('soil_fertizer.jsp');</script>"); } else { response.getWriter().println("<script>alert('Item already in favorites'); window.location.replace('soil_fertizer.jsp');</script>"); } ps.close(); rs.close(); connection.close(); } catch (Exception e) { // Handle exceptions properly e.printStackTrace(); response.getWriter().println("<script>alert('Error occurred while adding item to favorites'); window.location.replace('soil_fertizer.jsp');</script>"); } } }
import { tokenSupported } from "@config/constants/token-supported"; import { Button, CircularProgress, Dialog, DialogContent, FormControl, MenuItem, Select, TextField, } from "@mui/material"; import { formatNumber } from "@utils/format"; import { Form, Formik } from "formik"; import { useState } from "react"; import useBankContractStore from "store/useBankContract/useBankContractStore"; import Swal from "sweetalert2"; import * as Yup from "yup"; const BankTransferSchema = Yup.object().shape({ tokenAddress: Yup.string().required("Please select token."), from: Yup.string().required("Please enter from bank account name."), to: Yup.string().required("Please enter to bank account name."), amount: Yup.number() .required("Please enter your amount.") .test("", "Please enter number more than zero", (value) => { if (value) { return value > 0; } return false; }), }); interface IBankTransferForm { tokenAddress: string; from: string; to: string; amount: number; } const initialValues = { tokenAddress: "", from: "", to: "", amount: 0, }; const BankTransferForm = () => { const [visible, setVisible] = useState<boolean>(false); const [multiTransfer, setMultiTransfer] = useState<IBankTransferForm[]>([]); const { batchBankTransfer, getAllMyAccount } = useBankContractStore(); const [loading, setLoading] = useState(false); const handleOpenModal = () => { setVisible(true); }; const handleCloseModal = () => { setVisible(false); }; const handleSubmit = (value: IBankTransferForm) => { setMultiTransfer([...multiTransfer, value]); handleCloseModal(); }; const handleBatchTransfer = async () => { if (multiTransfer.length > 0) { const lengthData = multiTransfer.length; const tokenAddressArr = multiTransfer.map((f) => f.tokenAddress); const senderNameArr = multiTransfer.map((f) => f.from); const recipientName = multiTransfer.map((f) => f.to); const amountArr = multiTransfer.map((f) => f.amount); if ( tokenAddressArr.length === lengthData && senderNameArr.length === lengthData && recipientName.length === lengthData && amountArr.length === lengthData ) { setLoading(true); try { await batchBankTransfer( tokenAddressArr, senderNameArr, recipientName, amountArr ); await getAllMyAccount() Swal.fire({ text: "Transfer successfully", icon: "success", confirmButtonText: "Ok", }); setMultiTransfer([]); } catch (err) {} setLoading(false); } } }; const displayToken = (tokenAddress: string) => { const tokenData = tokenSupported.find((f) => f.address === tokenAddress); return ( <div className="flex items-center"> <img src={tokenData?.image} className="w-7 h-7" /> <div className="font-semibold text-white ml-2">{tokenData?.symbol}</div> </div> ); }; return ( <div className="w-full rounded-lg p-4 bg-secondary text-purple1 space-y-5 border border-purple2"> <div className="flex justify-between items-center"> <div className="font-bold mb-2"> Multi transfer</div> <div className="space-x-2"> <button type="submit" className="bg-red-700 hover:bg-red-500 text-white px-2 py-1 rounded-md text-md font-medium tracking-wider" onClick={() => setMultiTransfer([])} > CLEAR </button> <button type="submit" className="bg-blue-700 hover:bg-blue-500 text-white px-2 py-1 rounded-md text-md font-medium tracking-wider" onClick={handleOpenModal} > ADD </button> </div> </div> {multiTransfer.length === 0 ? ( <div className="flex w-full justify-center items-center border py-4 rounded-md border-gray-600 text-gray-600"> No Orders </div> ) : ( <div className="space-y-4"> {multiTransfer?.map((item, index) => { return ( <div key={index} className="px-4 py-2 rounded-lg space-y-2 border-purple1 border-2" > <div className="flex justify-between items-center"> <div className="text-white font-semibold">from</div> <div className="font-semibold text-pink-500">{item.from}</div> </div> <div className="flex justify-between items-center"> <div className="text-white font-semibold">to</div> <div className="font-semibold text-pink-500">{item.to}</div> </div> <div className="flex justify-between items-center"> <div className="text-white font-semibold">token</div> {displayToken(item.tokenAddress)} </div> <div className="flex justify-between items-center"> <div className="text-white font-semibold">amount</div> <div className="text-green1 font-bold"> {formatNumber(item.amount)} </div> </div> </div> ); })} </div> )} <button type="submit" className="bg-purple1 hover:bg-purple-700 active:bg-purple-700 w-full h-12 text-white rounded-md font-bold" onClick={handleBatchTransfer} > {loading ? ( <CircularProgress className="text-white" size={20} /> ) : ( "TRANSFER" )} </button> <Dialog fullWidth open={visible} onClose={handleCloseModal}> <DialogContent> <div className="text-2xl font-bold mb-4">Transfer</div> <Formik validationSchema={BankTransferSchema} initialValues={initialValues} onSubmit={handleSubmit} > {({ errors, touched, values, handleBlur, handleChange, setFieldValue, }) => ( <Form> <div className="mb-4"> <div className="font-bold mb-2">From</div> <TextField variant="outlined" className="w-full" name="from" error={Boolean(touched.from && errors.from)} onBlur={handleBlur} onChange={handleChange} value={values.from} /> <div className="text-xs mt-2 text-red-500"> {touched.from && errors.from} </div> </div> <div className="mb-4"> <div className="font-bold mb-2">To</div> <TextField variant="outlined" className="w-full" name="to" error={Boolean(touched.to && errors.to)} onBlur={handleBlur} onChange={handleChange} value={values.to} /> <div className="text-xs mt-2 text-red-500"> {touched.to && errors.to} </div> </div> <div className="mt-4 mb-4"> <div className="font-bold mb-2">Token</div> <FormControl fullWidth> <Select labelId="demo-simple-select-label" id="demo-simple-select" value={values.tokenAddress} onChange={(e) => { setFieldValue("tokenAddress", e.target.value); }} className="bg-white text-black" > {tokenSupported.map((item) => ( <MenuItem value={item.address} key={item.address}> <div className="flex items-center space-x-3"> <img src={item.image} className="w-8 h-8" /> <div>{item.symbol}</div> </div> </MenuItem> ))} </Select> </FormControl> <div className="text-xs mt-2 text-red-500"> {touched.tokenAddress && errors.tokenAddress} </div> </div> <div className="mb-4"> <div className="font-bold mb-2">Amount</div> <TextField variant="outlined" className="w-full" name="amount" type="number" error={Boolean(touched.amount && errors.amount)} onBlur={handleBlur} onChange={handleChange} value={values.amount} /> <div className="text-xs mt-2 text-red-500"> {touched.amount && errors.amount} </div> </div> <button type="submit" className="bg-purple1 hover:bg-purple-700 w-full h-12 mt-2 text-white rounded-md" > Add </button> </Form> )} </Formik> </DialogContent> </Dialog> </div> ); }; export default BankTransferForm;
<div align="center"> <h1 align="center"> <img src="https://miro.medium.com/v2/resize:fit:800/1*3gOnS8dkzrKSbpnJSdDuQg.png" width= "200px"> <br>Libft (is being updated)</br> </h1> <h3>Your very first own library</h3> <p align="center"> <img src="https://img.shields.io/badge/Barcelona-100000?style=flat-square&logo=42&logoColor=white&labelColor=000000&color=000000" alt="42 Barcelona"/> </p> </div> ## Table of Contents - [Summary](#-summary) - [Content](#-Content) - [Installation](#-installation) # Summary Programming in C can be boring when one doesn't have access to the most commonly used functions. This project will allow you to understand how these functions work, how to implement them, and learn how to use them. You will create your own library, which will be very useful as you will use it in future C projects. **!NOTE** <br /> Because of 42 School norm requirements: <br /> * All variables are declared and aligned at the top of each function <br /> * Each function can't have more then 25 lines of code <br /> * C++ style code commenting is forbidden <br /> * Project should be created just with allowed functions otherwise it's cheating. <br /> --- # Content ### The project consists of 3 main logical parts: * Standart Libc functions * Additional functions * Bonus part functions --- ## Description of each function ### Libc functions | Function | Description | | ------------- | --------------------------------------------------------------------------------------| | [ft_isalpha](./ft_isalpha.c) | Checks if a character is an alphabetic character. | | [ft_isdigit](./ft_isdigit.c) | Verifies whether a character is a digit. | | [ft_isalnum](.isalnum.c) | Checks if a character is alphanumeric. | | [ft_isascii](./isascii) | Checks if a character is within the ASCII range. | | [ft_isprint](./ft_isprint.c) | Printing character test (space character inclusive) | | [ft_toupper](./ft_toupper.c) | Converts a lowercase alphabetic character to its uppercase equivalent. | | [ft_tolower](./ft_tolower.c) | Converts an uppercase alphabetic character to its lowercase equivalent. | | [ft_atoi](./ft_atoi.c) | Converts a string str to an integer value. It skips leading whitespace characters, handles an optional sign, and converts the remaining characters into an integer. | | [ft_memset](./ft_memset.c) | Sets each byte of the memory block pointed to by str to the value c, up to a specified length n. It returns the pointer str after modifying the memory block. | | [ft_bzero](./ft_bzero) | Sets the first n bytes of the memory pointed to by s to zero. | | [ft_memcpy](./ft_memcpy.c) | Copies n bytes from the memory area pointed to by src to the memory area pointed to by dest. It returns a pointer to the destination memory area. | | [ft_memmove](./ft_memmove.c) | Copies n bytes from the memory area pointed to by src to the memory area pointed to by dest. It handles overlapping memory regions correctly by using a temporary buffer. It returns a pointer to the destination memory area. | | [ft_memchr](./ft_memchr.c) | Searches for the first occurrence of the character c in the memory block pointed to by str, up to a specified length n. It iterates through the memory block using a while loop and checks if each byte matches c. If a match is found, it returns a pointer to that byte. If no match is found within the specified length, it returns NULL. | | [ft_memcmp](./ft_memcmp.c) | Compares the memory blocks pointed to by s1 and s2 up to a specified length n. It iterates through the memory blocks using a while loop, comparing each byte. If a byte in s1 is not equal to the corresponding byte in s2, it returns the difference between the two bytes. If all bytes are equal within the specified length, it returns 0 to indicate that the memory blocks are equal. | | [ft_strlen](./ft_strlen.c) | Calculates the length of a null-terminated string. It takes a pointer to a constant character array (str) as input.Finally, it returns the value of i, representing the length of the string. | | [ft_strdup](./ft_strdup.c) | Duplicates a string src by allocating memory for a new string, copying the contents of src into it, and returning the pointer to the new string. | | [ft_strlcpy](./ft_strlcpy.c) | Copies a null-terminated string from src to dest, ensuring proper null termination within a limited size of n. It returns the length of the source string. | [ft_strlcat](./ft_strlcat.c) | Appends a null-terminated string from src to the end of dest, ensuring proper null termination within a limited size of n. It returns the total length of the resulting string, considering the length of dest and the length of the appended src string. | | [ft_strchr](./ft_strchr.c) | Searches for the first occurrence of the character c in the string str. | | [ft_strrchr](./ft_strrchr.c) | Searches for the last occurrence of the character c in the string str. | | [ft_strnstr](./ft_strnstr.c) | Searches for the first occurrence of the string needle within the string haystack, up to a specified length n. | | [ft_strncmp](./ft_strncmp.c) | Compares the first n characters of the strings s1 and s2. | | [ft_calloc](./ft_calloc.c) | Allocates memory for an array of elements with a specified count and n size. It checks for overflow, allocates memory using malloc, and zeroes out the memory before returning the pointer to the allocated memory. ### Additional functions | Function | Description | | :--------------:| :----------:| | [ft_substr](./ft_substr.c) | Extracts a substring from a string s starting at index start with a specified length len. It allocates memory for the substring, copies the characters from s to the substring, and returns the pointer to the substring.| | [ft_strjoin](./ft_strjoin.c) | Concatenates two strings s1 and s2 into a new string. It checks if either s1 or s2 is NULL, returning NULL in that case. It allocates memory for the new string based on the combined length of s1 and s2, and then copies the characters from s1 and s2 into the new string. Finally, it adds a null terminator and returns the pointer to the new string.| | [ft_striteri](./ft_striteri.c) | Applies a function f to each character of the string s, along with its corresponding index. It checks if both s and f are valid pointers before proceeding. It initializes a counter i to keep track of the index and iterates over the characters of the string s, applying the function f with the current index and character.| | [ft_strtrim](./ft_strtrim.c) | Trims the leading and trailing characters from a string s1 that match any character in the set set. It allocates memory for the trimmed string, copies the non-matching characters from s1 to the new string, adds a null terminator, and returns the pointer to the trimmed string.| | [ft_split](./ft_split.c) | Splits the string s into an array of strings based on the delimiter character c. It counts the number of words in the string, allocates memory for the array of strings, and then splits the string into individual words, creating an array of strings. The resulting array of strings is returned.| | [ft_itoa](./ft_itoa.c) | Converts an integer n into a string representation. It determines the length of the number, allocates memory for the string, and converts each digit of the number into a character by dividing and modulo operations. The resulting string is returned, with a negative sign added if necessary.| | [ft_strmapi](./ft_strmapi.c) | Applies a function f to each character of the string s, generating a new string with the modified characters. It first determines the length of the input string and allocates memory for the new string. Then, it iterates over the characters of the input string, applying the function f and storing the result in the new string. The resulting modified string is returned.| [ft_putchar_fd](./ft_putchar_fd.c) | Writes the character c to the specified file descriptor fd. It uses the write system call to write a single character to the file descriptor. The character c is passed as the address of a memory location containing the character, and the size of 1 is specified to write exactly one byte.| | [ft_putstr_fd](./ft_putstr_fd.c) | Writes the string s to the specified file descriptor fd. It first checks if the string s is not null, and then uses the write system call to write the entire string to the file descriptor. The number of bytes to write is calculated with the use of a helper function "str_len", as each character requires exactly one byte.| | [ft_putendl_fd](./ft_putendl_fd.c) | Writes the string s to the specified file descriptor fd, followed by a newline character. It first checks if the string s is not null, and then uses the write system call to write the entire string to the file descriptor. The length of the string is obtained using the "str_len" function. After writing the string, it writes a newline character ('\n') to the file descriptor to move to the next line.| | [ft_putnbr_fd](./ft_putnbr_fd.c) | Writes an integer n to the specified file descriptor fd. It uses a custom base conversion mechanism to convert the integer into a string representation based on the decimal digits. The function checks for valid base input and handles negative numbers. It recursively divides the number by the base length and writes the corresponding character to the file descriptor until the entire number is converted.| --- ### Bonus | Function | Description | | :--------------:| :----------:| | [ft_lstnew](./srcs/list/ft_lstnew_bonus.c) | Creates a new node for a linked list and initializes it with the given 'content'. It allocates memory for the new node using 'malloc', and if the allocation fails, it returns NULL. The 'content' is set to the provided input, and the 'next' pointer is initialized to NULL. Finally, it returns a pointer to the newly created node.| | [ft_lstadd_front](./srcs/list/ft_lstadd_front_bonus.c) | Adds the given element 'new' to the front of the linked list pointed to by 'lst'. If 'lst' or 'new' is NULL, the function returns. The 'next' pointer of 'new' is set to the current first element of the list, and 'new' becomes the new first element by updating the 'lst' pointer to point to it.| | [ft_lstsize](./srcs/list/ft_lstsize_bonus.c) | Calculates and returns the number of nodes in a given linked list 'lst'. It initializes a variable 'i' to 0 and then iterates through the list using a loop, incrementing 'i' for each node encountered until the end of the list is reached (lst becomes NULL). The function then returns the final value of 'i', which represents the total number of nodes in the list.| | [ft_lstlast](./srcs/list/ft_lstlast_bonus.c) | Returns a pointer to the last node of a linked list 'lst'. If 'lst' is NULL (i.e., the list is empty), it returns NULL. The function iterates through the list using a loop, moving the 'lst' pointer to the next node until it reaches the last node (where 'next' is NULL). Then, it returns the 'lst' pointer, which now points to the last node of the list.| | [ft_lstadd_back](./srcs/list/ft_lstadd_back_bonus.c) | Adds the given element 'new' to the end of the linked list pointed to by 'lst'. If 'lst' or 'new' is NULL, the function returns. If 'lst' is an empty list, 'new' becomes the first element. Otherwise, the function iterates through the list until the last element is reached and links 'new' to it.| | [ft_lstdelone](./srcs/list/ft_lstdelone_bonus.c) | Deletes a single node 'lst' from the linked list and frees its content using the 'del' function, then deallocates the memory for the node. However, it fails to remove 'lst' from the list fully, as it only sets the local 'lst' pointer to NULL. The list re-linking must be done externally.| | [ft_lstclear](./srcs/list/ft_lstclear_bonus.c) | Clears the entire linked list pointed to by 'lst' and deallocates memory for each node. The 'del' function is used to free the memory associated with the content of each node. If 'lst' is NULL, 'del' is NULL, or the list is already empty, the function returns. The function recursively calls itself with the next node until the end of the list is reached. Then, it frees the content of the current node, deallocates the node itself, and sets 'lst' to NULL.| | [ft_lstiter](./srcs/list/ft_lstiter_bonus.c) | Iterates through a linked list 'lst' and applies the function 'f' to each node's content. If 'lst' or 'f' is NULL, the function returns. The loop runs while there are still nodes in the list. Within each iteration, the 'f' function is called with the content of the current node, and the 'lst' pointer is moved to the next node in the list.| | [ft_lstmap](./ft_lstmap_bonus.c) | Creates a new linked list by applying the function 'f' to the content of each node in the input list 'lst'. If 'lst', 'f', or 'del' is NULL or if memory allocation fails, it returns NULL. It uses 'lst_new' to create new nodes and links them together to form the resulting list. If any memory allocation fails during the process, it clears the already created nodes and returns NULL. The 'del' function is used to free the memory allocated for each node's content if needed.| --- ## Installation 1. Clone the Libft repository: ```sh git clone ``` 2. Change to the project directory: ```sh cd libft ``` 3. Compile the library: Using Makefile you can create library file libft.a<br/> Makefile has 4 main options:<br/> ```sh make ``` to compile C files - create object files and library libft.a ```sh make clean ``` to remove object files. ```sh make fclean ``` remove libft.a file. ```sh make re ``` recompile the library. [**Return**](#Top)
#ifndef BLOOM_FILTER_HPP #define BLOOM_FILTER_HPP #include <algorithm> #include <cmath> #include <cstdint> #include <vector> namespace BF { typedef std::vector<std::uint64_t> hashes; class murmur3 { public: // taken from: https://github.com/aappleby/smhasher/blob/master/src/MurmurHash3.cpp void operator()(const void* key, const std::uint64_t len, std::uint64_t k, hashes& out, const std::uint32_t seed = 0xbeefeebb) const { // do not do any work if it is not needed... if (k == 0) return; const std::uint8_t* data = (const std::uint8_t*)key; const std::uint64_t nblocks = len / 16; std::uint64_t h1 = seed; std::uint64_t h2 = seed; const std::uint64_t c1 = 0x87c37b91114253d5LLU; const std::uint64_t c2 = 0x4cf5ad432745937fLLU; const std::uint64_t* blocks = (const std::uint64_t*)(data); for (std::uint64_t i = 0; i < nblocks; i++) { std::uint64_t k1 = getblock64(blocks, i * 2 + 0); std::uint64_t k2 = getblock64(blocks, i * 2 + 1); k1 *= c1; k1 = ROTL64(k1, 31); k1 *= c2; h1 ^= k1; h1 = ROTL64(h1, 27); h1 += h2; h1 = h1 * 5 + 0x52dce729; k2 *= c2; k2 = ROTL64(k2, 33); k2 *= c1; h2 ^= k2; h2 = ROTL64(h2, 31); h2 += h1; h2 = h2 * 5 + 0x38495ab5; } const std::uint8_t* tail = (const std::uint8_t*)(data + nblocks * 16); std::uint64_t k1 = 0; std::uint64_t k2 = 0; switch (len & 15) { case 15: k2 ^= ((std::uint64_t)tail[14]) << 48; case 14: k2 ^= ((std::uint64_t)tail[13]) << 40; case 13: k2 ^= ((std::uint64_t)tail[12]) << 32; case 12: k2 ^= ((std::uint64_t)tail[11]) << 24; case 11: k2 ^= ((std::uint64_t)tail[10]) << 16; case 10: k2 ^= ((std::uint64_t)tail[9]) << 8; case 9: k2 ^= ((std::uint64_t)tail[8]) << 0; k2 *= c2; k2 = ROTL64(k2, 33); k2 *= c1; h2 ^= k2; case 8: k1 ^= ((std::uint64_t)tail[7]) << 56; case 7: k1 ^= ((std::uint64_t)tail[6]) << 48; case 6: k1 ^= ((std::uint64_t)tail[5]) << 40; case 5: k1 ^= ((std::uint64_t)tail[4]) << 32; case 4: k1 ^= ((std::uint64_t)tail[3]) << 24; case 3: k1 ^= ((std::uint64_t)tail[2]) << 16; case 2: k1 ^= ((std::uint64_t)tail[1]) << 8; case 1: k1 ^= ((std::uint64_t)tail[0]) << 0; k1 *= c1; k1 = ROTL64(k1, 31); k1 *= c2; h1 ^= k1; }; h1 ^= len; h2 ^= len; h1 += h2; h2 += h1; h1 = fmix64(h1); h2 = fmix64(h2); h1 += h2; h2 += h1; if (k == 1) out.push_back(h1); else if (k > 1) { out.push_back(h1); out.push_back(h2); // apply the Kirsch-Mitzenmacher-Optimization for (std::uint64_t i = 3; i <= k; ++i) { auto g = h1 + i * h2; out.push_back(g); std::swap(h1, h2); std::swap(h2, g); } } } private: inline std::uint64_t ROTL64(std::uint64_t x, std::int8_t r) const { return (x << r) | (x >> (64 - r)); } inline std::uint64_t fmix64(std::uint64_t k) const { k ^= k >> 33; k *= 0xff51afd7ed558ccdLLU; k ^= k >> 33; k *= 0xc4ceb9fe1a85ec53LLU; k ^= k >> 33; return k; } inline std::uint64_t getblock64(const std::uint64_t* p, std::uint64_t i) const { return p[i]; } }; template <typename hasher = murmur3> class bloom_filter { public: bloom_filter() : m(0) , k(0) , n(0) , p(0.0) { } bloom_filter(const bloom_filter& other) = default; bloom_filter& operator=(const bloom_filter& other) = default; bloom_filter(bloom_filter&& other) : m(other.m) , k(other.k) , n(other.n) , p(other.p) { if (!other.bits.empty()) std::swap(bits, other.bits); other.m = other.k = other.n = other.p = 0; } bloom_filter& operator=(bloom_filter&& other) { if (this != &other) { m = other.m; k = other.k; n = other.n; p = other.p; if (!other.bits.empty()) { std::swap(bits, other.bits); other.bits.clear(); // in case it is not empty } other.m = other.k = other.n = other.p = 0; } return *this; } bool config(std::uint64_t m, std::uint64_t k, std::uint64_t n) { if (m == 0 || k == 0 || n == 0) return false; this->m = m; this->k = k; this->n = n; this->p = compute_p(m, k, n); const std::uint64_t byte_count = m / 8 + static_cast<bool>(m & 7); bits.clear(); bits.resize(byte_count > 0 ? byte_count : 1, 0); return true; } bool config(std::uint64_t n, double p) { if (p >= 1.0 || p <= 0.0 || n == 0) return false; this->n = n; this->p = p; m = compute_m(n, p); k = compute_k(m, n); const std::uint64_t byte_count = m / 8 + static_cast<bool>(m & 7); bits.clear(); bits.resize(byte_count > 0 ? byte_count : 1, 0); return true; } // Create a bf from the components of an existing one. // Deep copies the values from the raw byte pointer. bool from(std::uint64_t m, std::uint64_t k, std::uint64_t n, double p, const std::uint8_t* raw, std::uint64_t raw_size) { if (p >= 1.0 || p <= 0.0 || n == 0) return false; const std::uint64_t byte_count = m / 8 + static_cast<bool>(m & 7); if (!raw || raw_size == 0 || byte_count != raw_size) return false; this->n = n; this->p = p; this->m = m; this->k = k; bits.clear(); bits.reserve(raw_size); std::copy(raw, raw + raw_size, std::back_inserter(bits)); return true; } std::uint64_t bit_count() const { return m; } std::uint64_t hash_count() const { return k; } std::uint64_t expected_elements() const { return n; } double false_positive() const { return p; } std::size_t size() const { return bits.size(); } // in bytes const std::uint8_t* raw() const { if (bits.empty()) return nullptr; return bits.data(); } bool add(const void* key, const std::uint64_t len) { if (m == 0 || k == 0 || n == 0 || p == 0.0) return false; hashes hash_values; hash_values.reserve(k); h(key, len, k, hash_values); if (k != hash_values.size()) return false; for (std::uint64_t i = 0; i < k; ++i) { const std::uint64_t abs_bit_id = hash_values[i] % m; const std::uint64_t byte_id = abs_bit_id / 8; bits[byte_id] |= BIT_POS[abs_bit_id & 7]; } return true; } bool contains(const void* key, const std::uint64_t len) const { if (m == 0 || k == 0 || n == 0 || p == 0.0) return false; hashes hash_values; hash_values.reserve(k); h(key, len, k, hash_values); if (k != hash_values.size()) return false; for (std::uint64_t i = 0; i < k; ++i) { const std::uint64_t abs_bit_id = hash_values[i] % m; const std::uint64_t byte_id = abs_bit_id / 8; if (!(bits[byte_id] & BIT_POS[abs_bit_id & 7])) return false; } return true; } bool merge(const bloom_filter& other) { if (m == 0 || k == 0 || n == 0 || p == 0.0) return false; if (m != other.m || k != other.k || n != other.n || p != other.p || bits.size() != other.bits.size()) return false; for (std::uint64_t i = 0; i < bits.size(); ++i) bits[i] |= other.bits[i]; return true; } private: static constexpr std::uint8_t BIT_POS[8] = { 0x1u, 0x2u, 0x4u, 0x8u, 0x10u, 0x20u, 0x40u, 0x80u }; std::uint64_t m; // size in bits std::uint64_t k; // number of hashes std::uint64_t n; // expected number of elements double p; // false positive probability(> 0 && < 1) std::vector<std::uint8_t> bits; hasher h; inline std::uint64_t compute_m(std::uint64_t n, double p) const { return std::ceil((n * std::log(p)) / std::log(1.0 / std::pow(2.0, std::log(2.0)))); } inline std::uint64_t compute_k(std::uint64_t m, std::uint64_t n) const { return std::round((static_cast<double>(m) / n) * std::log(2.0)); } inline double compute_p(std::uint64_t m, std::uint64_t k, std::uint64_t n) const { return std::pow(1.0 - std::exp((-static_cast<double>(k) * n) / m), k); } }; } // BF #endif // BLOOM_FILTER_HPP
import React, { useState } from 'react'; import { Link, useNavigate } from 'react-router-dom'; import Login from './login'; import supabase from '../config/supabaseClient'; import logo from "../assets/logo.png" const SignUp = () => { const navigate = useNavigate(); const [formData, setFormData] = useState({ fullName: '', email: '', password: '' }); const [signupSuccess, setSignupSuccess] = useState(false); const [selectedOption, setSelectedOption] = useState(null); const handleChange = (event) => { setFormData((prevFormData) => ({ ...prevFormData, [event.target.name]: event.target.value })); }; const handleSubmit = async (e) => { e.preventDefault(); try { const { data, error } = await supabase.auth.signUp({ email: formData.email, password: formData.password, options: { data: { fullName: formData.fullName, role: selectedOption // Assuming role will be stored in user data } } }); if (error) { throw error; } else { alert('Signup Success'); setSignupSuccess(true); } } catch (error) { alert(error.message); } }; if (signupSuccess) { navigate('/'); } const renderForm = () => { return ( <form style={styles.form} onSubmit={handleSubmit}> <input placeholder="Full Name" name="fullName" style={styles.input} onChange={handleChange} required /> <input placeholder="Email" name="email" style={styles.input} type="email" onChange={handleChange} required /> <input placeholder="Password" name="password" type="password" style={styles.input} onChange={handleChange} required /> <button style={styles.button} type="submit"> Submit </button> </form> ); }; return ( <div> <div style={styles.header}> <div style={styles.logoContainer}> <Link to="/"> <img src={logo} alt="Logo" style={styles.logo} /> </Link> <div style={styles.textContainer}> <p style={styles.universityName}>Florida Atlantic University</p> <p style={styles.systemName}>Graduate Teaching Assistantship Management System</p> </div> </div> <div> <h1>Signup as</h1> </div> <div style={styles.container}> <div style={styles.card} onClick={() => setSelectedOption('applicant')}> <h2>Applicant</h2> {selectedOption === 'applicant' && renderForm()} </div> <div style={styles.card} onClick={() => setSelectedOption('admin')}> <h2>Administrator</h2> {selectedOption === 'admin' && renderForm()} </div> <div style={styles.card} onClick={() => setSelectedOption('committee')}> <h2>Committee Member</h2> {selectedOption === 'committee' && renderForm()} </div> <div style={styles.card} onClick={() => setSelectedOption('instructor')}> <h2>Instructor</h2> {selectedOption === 'instructor' && renderForm()} </div> </div> <div style={styles.centeredParagraph}> <p> Don't have an account? <Link to="/" style={styles.link}>Login</Link> </p> </div> </div> </div> ); }; const styles = { container: { display: 'flex', justifyContent: 'center', alignItems: 'center', flexDirection: 'row', // Change from 'column' to 'row' flexWrap: 'wrap', // Allow cards to wrap to the next row if needed }, card: { flex: '1', // Make the card flexible to cover extra space minWidth: '250px', // Minimum width for the card padding: '50px', margin: '10px', backgroundColor: '#f0f0f0', borderRadius: '10px', textAlign: 'center', cursor: 'pointer', }, form: { width: '100%', display: 'flex', flexDirection: 'column', alignItems: 'center', }, input: { display: 'block', width: '100%', marginBottom: '10px' }, button: { width: '100%', padding: '10px', backgroundColor: '#007bff', color: '#fff', border: 'none', borderRadius: '5px', cursor: 'pointer' }, centeredParagraph: { textAlign: 'center', marginTop: '20px', }, link: { color: '#007bff' }, header: { padding: '60px', textAlign: 'center', }, logoContainer: { display: 'flex', alignItems: 'center', justifyContent: 'center', }, logo: { width: '250px', // Adjust width as needed height: 'auto', // Maintain aspect ratio marginRight: '10px', }, textContainer: { textAlign: 'left', }, universityName: { margin: '0', fontWeight: 'bold', fontSize: '54px', }, systemName: { margin: '0', fontSize: '38px', color: '#555', }, }; export default SignUp;
/* SymTable Data Structure */ typedef struct SymTable *SymTable_T; /* Create and return a new allocated SymTable_T */ SymTable_T SymTable_new(void); /* Free allocated memory for oSymTable. If oSymTable == NULL do nothing */ void SymTable_free(SymTable_T oSymTable); /* Count and return the number of saved elements */ unsigned int SymTable_getLength(SymTable_T oSymTable); /* Insert element to oSymTable. Assert oSymTable and pcKey not NULL and return 1 on success, or if element with the same string as pcKey exists return 0. */ int SymTable_put(SymTable_T oSymTable, const char *pcKey, const void *pvValue); /* Look up and remove element with the same string as pcKey. Return 1 on success, 0 if not found. */ int SymTable_remove(SymTable_T oSymTable, const char *pcKey); /* Look up and return 1 if element with the same string as pcKey exists, or else 0. */ int SymTable_contains(SymTable_T oSymTable, const char *pcKey); /* Look up and return pvValue of element with the same string as pcKey from oSymTable if found, or else NULL. Assert oSymTable and pcKey not being NULL. */ void *SymTable_get(SymTable_T oSymTable, const char *pcKey); /* Apply to all oSymTable elements the function pfApply. Assert oSymTable and pfApply not being NULL. */ void SymTable_map(SymTable_T oSymTable, void (*pfApply)(const char *pcKey, void *pvValue, void *pvExtra), const void *pvExtra);
package music; import java.io.File; import java.io.IOException; import javax.sound.sampled.AudioFormat; import javax.sound.sampled.AudioInputStream; import javax.sound.sampled.AudioSystem; import javax.sound.sampled.Clip; import javax.sound.sampled.DataLine; import javax.sound.sampled.LineEvent; import javax.sound.sampled.LineListener; import javax.sound.sampled.LineUnavailableException; import javax.sound.sampled.UnsupportedAudioFileException; /** * This is an example program that demonstrates how to play back an audio file * using the Clip in Java Sound API. * @author www.codejava.net * */ public class audioPlayer implements LineListener { /** * this flag indicates whether the playback completes or not. */ boolean playCompleted; AudioInputStream audioStream; AudioFormat format; DataLine.Info info; Clip audioClip; /** * Play a given audio file. * @param audioFilePath Path of the audio file. */ public void play(String audioFilePath) { File audioFile = new File(audioFilePath); try { audioStream = AudioSystem.getAudioInputStream(audioFile); format = audioStream.getFormat(); info = new DataLine.Info(Clip.class, format); audioClip = (Clip) AudioSystem.getLine(info); audioClip.addLineListener(this); audioClip.open(audioStream); audioClip.start(); /** Makes it wait for playback to be completed while (!playCompleted) { // wait for the playback completes try { Thread.sleep(1000); } catch (InterruptedException ex) { ex.printStackTrace(); } } audioClip.close(); */ } catch (UnsupportedAudioFileException ex) { System.out.println("The specified audio file is not supported."); ex.printStackTrace(); } catch (LineUnavailableException ex) { System.out.println("Audio line for playing back is unavailable."); ex.printStackTrace(); } catch (IOException ex) { System.out.println("Error playing the audio file."); ex.printStackTrace(); } } /** * Listens to the START and STOP events of the audio line. */ @Override public void update(LineEvent event) { LineEvent.Type type = event.getType(); if (type == LineEvent.Type.START) { System.out.println("Playback started."); } else if (type == LineEvent.Type.STOP) { playCompleted = true; System.out.println("Playback completed."); } } public void close(){ audioClip.close(); } /** public static void main(String[] args) { //String audioFilePath = "Z:/thingsBesidesTurnIn/ogpcSongs/menuSongWav.wav"; //WORKS String audioFilePath = "res/menuSongWav.wav"; audioPlayer player = new audioPlayer(); player.play(audioFilePath); } */ }
// import React from 'react' import { skills } from '../data/Skills'; const container = `bg-gradient-to-t from-slate-950 to-slate-900 text-gray-100 pt-6`; const wrapper = `flex flex-col justify-center items-center py-20 md:py-16 px-10 md:py-10 gap-4`; const title = "text-4xl font-bold text-sky-500"; const desc = "text-xl text-center"; const skillContainer = "flex flex-col p-3 gap-6 md:grid grid-cols-2 grid-rows-auto"; const skillWrapper = "flex flex-col justify-center items-center bg-gray-800 py-4 p-3 rounded-xl border border-sky-800 shadow-md hover:shadow-cyan-500 hover:scale-105 hover:border-sky-500 duration-500"; const skillTitle = "text-sky-500 text-2xl font-semibold text-center my-3"; const skillList = "flex flex-wrap gap-3 justify-center items-center px-3 py-4"; const oneSkillStyle = "flex items-center flex-wrap p-2 border-2 border-gray-700 rounded-xl gap-2"; const skillImage = "h-8 w-auto"; const skillName = "text-gray-300"; function Skills() { return ( <> <div name="skills" className={container}> <div className={wrapper}> {/* <div className={title}>Skills</div> */} <h2 className={title}> Our <span>Skills</span> </h2> <div className={desc}> Here are some of my skills, on which I have worked{" "} </div> <div className={skillContainer}> {skills.map((oneSection) => ( <div className={skillWrapper} key={oneSection.id}> <div className={skillTitle}>{oneSection.title}</div> <div className={skillList}> {oneSection.skill.map((item) => ( <div className={oneSkillStyle} key={item.id}> <img className={skillImage} src={item.image} alt="" /> <span className={skillName}>{item.name}</span> </div> ))} </div> </div> ))} </div> </div> </div> </> ); } export default Skills
<h2> Makes the characters glow when the mouse is over </h2> To make the text glow when the mouse is over, specify "opacity: opacity;" as the style of a: hover. It's easy to take advantage of the fact that it looks shiny as the transparency increases. It's a good idea to fine-tune the opacity from around 0.8. <style> .mouse-over-light a: hover { opacity: 0.8; } </style> <div class = "mouse-over-light"> <a href="/"> Shines when hovering</a> </div> <pre> <style> .mouse-over-light a: hover { opacity: 0.8; } </style> <div class = "mouse-over-light"> <a href="/"> Shines when hovering</a> </div> </pre> <h3> What is the effect of shining when hovering? </H3> When you bring the mouse up, it glows and you can intuitively press it. On smartphones, it has nothing to do with it, so you should consider this effect as a supplementary effect and consider a design that you can press just by looking at it.
import { describe, it, expect, vi } from "vitest"; import { DEFAULT_VALUE } from "../constants"; import type { Dependency } from "../packageUtils"; import { getUniqueProblems, determineResolutionType, semverReverseSort, findPossibleResolution, formatResolution, } from "./solution"; const mocks = vi.hoisted(() => { return { execSync: vi.fn(), }; }); const defaultDependency: Dependency = { name: "dep1", type: "dependencies", version: "^1.0.0", isPeerDevDependency: false, isPeerOptionalDependency: false, depender: { name: "test-package", version: "1.0.0", packagePath: "/test-package", dependencies: [], devDependencies: [], optionalDependencies: [], peerDependencies: [], }, }; vi.mock("node:child_process", () => { return { execSync: mocks.execSync, }; }); describe("getUniqueProblems", () => { it("should return unique problems", () => { const problems: Dependency[] = [ defaultDependency, defaultDependency, { name: "dep2", type: "dependencies", version: "^2.0.0", isPeerDevDependency: false, isPeerOptionalDependency: false, depender: { name: "test-package", version: "1.0.0", packagePath: "/test-package", dependencies: [], devDependencies: [], optionalDependencies: [], peerDependencies: [], }, }, ]; const result = getUniqueProblems(problems); // unique problems are dep1 and dep2 expect(result).toHaveLength(2); expect(result).toContainEqual(problems[0]); expect(result).toContainEqual(problems[2]); }); }); describe("determineResolutionType", () => { it('should return "upgrade" if installedVersion is present', () => { const problem: Dependency = { name: "dep1", type: "dependencies", version: "^1.0.0", installedVersion: "1.0.0", isPeerDevDependency: false, isPeerOptionalDependency: false, depender: { name: "test-package", version: "1.0.0", packagePath: "/test-package", dependencies: [], devDependencies: [], optionalDependencies: [], peerDependencies: [], }, }; expect(determineResolutionType(problem)).toBe("upgrade"); }); it('should return "devInstall" if isPeerDevDependency is true', () => { const problem: Dependency = { name: "dep1", type: "dependencies", version: "^1.0.0", isPeerDevDependency: true, isPeerOptionalDependency: false, depender: { name: "test-package", version: "1.0.0", packagePath: "/test-package", dependencies: [], devDependencies: [], optionalDependencies: [], peerDependencies: [], }, }; expect(determineResolutionType(problem)).toBe("devInstall"); }); it('should return "install" if neither installedVersion is present nor isPeerDevDependency is true', () => { const problem: Dependency = { name: "dep1", type: "dependencies", version: "^1.0.0", isPeerDevDependency: false, isPeerOptionalDependency: false, depender: { name: "test-package", version: "1.0.0", packagePath: "/test-package", dependencies: [], devDependencies: [], optionalDependencies: [], peerDependencies: [], }, }; expect(determineResolutionType(problem)).toBe("install"); }); }); describe("semverReverseSort", () => { it("should sort semver versions in reverse order", () => { const versions = ["1.0.0", "2.0.0", "1.2.0", "2.1.0", "1.1.0"]; const sortedVersions = versions.sort(semverReverseSort); expect(sortedVersions).toEqual([ "2.1.0", "2.0.0", "1.2.0", "1.1.0", "1.0.0", ]); }); }); describe("findPossibleResolution", () => { it("should find a possible resolution for a package with pre-release", () => { const packageName = "dep1"; const allPeerDeps: Dependency[] = [defaultDependency]; vi.mocked(mocks.execSync).mockReturnValueOnce( JSON.stringify(["2.0.1", "1.0.0", "3.0.0", "1.5.2-beta.1"]), ); const resolution = findPossibleResolution(packageName, allPeerDeps, { ...DEFAULT_VALUE, includePrerelease: true, }); expect(resolution).toBe("1.5.2-beta.1"); }); it("should find a possible resolution for a package without pre-release", () => { const packageName = "dep1"; const allPeerDeps: Dependency[] = [defaultDependency]; vi.mocked(mocks.execSync).mockReturnValueOnce( JSON.stringify(["2.0.1", "1.0.0", "3.0.0", "1.5.2-beta.1"]), ); const resolution = findPossibleResolution(packageName, allPeerDeps, { ...DEFAULT_VALUE, includePrerelease: false, }); expect(resolution).toBe("1.0.0"); }); }); describe("formatResolution", () => { it("should format the resolution correctly", () => { const problem = defaultDependency; const resolutionVersion = "1.0.0"; const formattedResolution = formatResolution(problem, resolutionVersion); expect(formattedResolution).toBe(`${problem.name}@${resolutionVersion}`); }); it("should return an empty string if resolutionVersion is not provided", () => { const problem = defaultDependency; const formattedResolution = formatResolution(problem, ""); expect(formattedResolution).toBe(""); }); });
package com.example.dentistryapp.ui.appointments import android.annotation.SuppressLint import androidx.compose.foundation.layout.Arrangement import androidx.compose.foundation.layout.PaddingValues import androidx.compose.foundation.layout.fillMaxWidth import androidx.compose.foundation.layout.padding import androidx.compose.foundation.lazy.LazyColumn import androidx.compose.material.icons.Icons import androidx.compose.material.icons.outlined.Medication import androidx.compose.material3.Card import androidx.compose.material3.CardDefaults import androidx.compose.material3.CenterAlignedTopAppBar import androidx.compose.material3.ExperimentalMaterial3Api import androidx.compose.material3.Icon import androidx.compose.material3.IconButton import androidx.compose.material3.Scaffold import androidx.compose.material3.Text import androidx.compose.runtime.Composable import androidx.compose.runtime.getValue import androidx.compose.runtime.livedata.observeAsState import androidx.compose.ui.Modifier import androidx.compose.ui.text.style.TextOverflow import androidx.compose.ui.unit.dp import androidx.compose.ui.unit.sp import androidx.hilt.navigation.compose.hiltViewModel import androidx.navigation.NavController import com.example.dentistryapp.ui.greeting.GreetingScreen import com.example.dentistryapp.ui.model.AppointmentUi @OptIn(ExperimentalMaterial3Api::class) @SuppressLint("UnusedMaterial3ScaffoldPaddingParameter") @Composable fun AppointmentsScreen( navController: NavController, viewModel: AppointmentsViewModel = hiltViewModel() ) { if (!viewModel.isRegistered()) { GreetingScreen(navController = navController) } else { viewModel.getAppointments() val items by viewModel.appointments.observeAsState(emptyList()) Scaffold( topBar = { CenterAlignedTopAppBar( title = { Text( "Ваши записи", maxLines = 1, overflow = TextOverflow.Ellipsis ) }, actions = { IconButton(onClick = { /* doSomething() */ }) { Icon( imageVector = Icons.Outlined.Medication, contentDescription = "Localized description" ) } } ) } ) { innerPadding -> LazyColumn( verticalArrangement = Arrangement.spacedBy(12.dp), contentPadding = PaddingValues(24.dp), modifier = Modifier.padding(innerPadding) ) { items.forEach { item { AppointmentItem( item = it ) } } } } } } @OptIn(ExperimentalMaterial3Api::class) @Composable private fun AppointmentItem( item: AppointmentUi ) { Card( elevation = CardDefaults.cardElevation( defaultElevation = 10.dp ), modifier = Modifier .fillMaxWidth(), onClick = { } ) { Text( text = item.date, modifier = Modifier .padding(start = 16.dp, top = 16.dp), fontSize = 24.sp, lineHeight = 36.sp ) Text( text = item.timeBegin, modifier = Modifier .padding(start = 16.dp, top = 16.dp, bottom = 16.dp), fontSize = 24.sp, lineHeight = 36.sp ) Text( text = item.timeEnd, modifier = Modifier .padding(start = 16.dp, bottom = 16.dp), fontSize = 24.sp, lineHeight = 36.sp ) } }
import { addMonths, getMonth, getQuarter, getYear, subDays } from "date-fns"; import type { CalendarEvent } from "./calendar-parser"; export const filterByYearPredicate = (year: number) => (event: CalendarEvent): boolean => getYear(event.start) === year; export const filterByMonthPredicate = (month: number) => (event: CalendarEvent): boolean => getMonth(event.start) === month; export const filterByQuarterPredicate = (quarter: number) => (event: CalendarEvent): boolean => getQuarter(event.start) === quarter; export const filterByUpcomingPredicate = (months: number) => (event: CalendarEvent): boolean => { const includeFrom = subDays(Date.now(), 3).getTime(); if (event.start < includeFrom) { return false; } const includeTo = addMonths(Date.now(), months).getTime(); if (event.start > includeTo) { return false; } return true; };
<div data-events-form-target="kindActivity" hidden class="h-full w-full"> <%= form_with(model: @event, url: account_contact_events_path(current_user.account, contact_id: @deal.contact.id, deal_id: @deal.id), namespace: 'activity') do |form| %> <% if @event.errors.any? %> <div id="error_explanation"> <h2><%= pluralize(@event.errors.count, "error") %> prohibited this activity kind from being saved:</h2> <ul> <% @event.errors.each do |error| %> <li><%= error.full_message %></li> <% end %> </ul> </div> <% end %> <%= form.hidden_field :kind, value: 'activity' %> <%= form.hidden_field :deal_id %> <div class="p-3 mt-4"> <%= form.label :title, 'Tipo', class:'py-1' %> <%= form.text_field :title, class: 'woo-input w-full' %> </div> <div class="border-t border-light-palette-p3 p-3 mt-4"> <%= form.label :content, 'Descrição', class:'py-2' %> <%= form.rich_text_area :content, class: 'woo-input' %> </div> <div class="border-t border-light-palette-p3 p-3 mt-4"> <%= form.label "Agendar" %> <%= form.text_field :due, class: 'woo-input w-full', type: 'datetime-local' %> </div> <div class="flex flex-row-reverse border-t border-light-palette-p3 p-3 mt-4"> <%= form.submit 'Confirmar', class: 'h-10 rounded-md border-light-palette-p3 bg-brand-palette-03 text-light-palette-p5 px-3' %> <input type="button" value="Cancelar" data-action="click->events-form#selectNone" class="h-10 rounded-md border border-gray-pallete-p3 bg-light-palette-p5 text-gray-pallete-p3 px-3 mx-2"> <div class="mr-auto flex items-center"> <%= form.check_box :done, class: 'rounded text-brand-palette-03' %> <%= form.label :done, "Marcar atividade como feita", class: 'px-3 m-0' %> </div> </div> <% end %> </div>
# Copyright 2023 DeepMind Technologies Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """A base pycolab environment for finding the object that's an odd one out. The agent will be placed in a room with four objects. These objects have different colors, shapes, and possible positions. Two values of each attribute will appear in any given trial. For example, there might be red and blue objects triangles and squares, and objects either in the corner or center. Two of these attributes will be evenly distributed across the objects, so that half the objects have each value, but one will be unevenly split. For example, there might be two red and two blue objects, two triangles and two squares, but three objects in the center and only one in the corner. The agent will be rewarded for touching the object that's the odd one out. For the meta-learning task, the code in this file mostly corresponds to a single trial; the linking across trials is handled in the MetaOddOneOutEnvironment class in odd_one_out_environment.py. """ import curses import dataclasses import enum from typing import Tuple from absl import app from absl import logging import numpy as np from pycolab import ascii_art from pycolab import human_ui from pycolab import things as plab_things from pycolab.prefab_parts import sprites as prefab_sprites ROOM_SIZE = (11, 11) # one square around edge will be wall. OBJECT_POSITIONS = { "in_corner": [(1, 1), (1, 9), (9, 1), (9, 9)], "against_wall_x": [(1, 4), (1, 5), (1, 6), (9, 4), (9, 5), (9, 6)], "against_wall_y": [(4, 1), (5, 1), (6, 1), (4, 9), (5, 9), (6, 9)], "in_center": [(4, 4), (4, 5), (4, 6), (5, 4), (5, 5), (5, 6), (6, 4), (6, 5), (6, 6)]} AGENT_CHAR = "A" WALL_CHAR = "#" FLOOR_CHAR = " " RESERVED_CHARS = [AGENT_CHAR, WALL_CHAR, FLOOR_CHAR] POSSIBLE_OBJECT_CHARS = [ chr(i) for i in range(65, 91) if chr(i) not in RESERVED_CHARS ] EXPLAIN_PHASE_LENGTH = 16 EPISODE_LENGTH = 128 CORRECT_REWARD = 1. INCORRECT_REWARD = 0. META_BETWEEN_TRIAL_CLEANUP_KEYS = ( # plot items to reinitialize between trials "next_progressive_level", "explanation_string", "instruction_string", "char_to_color_shape", "char_to_color_shape_updated", "transformations_allowed", "transformations_happening_now", "extant_attributes", "concept_type", "explain", "termination_countdown", "value_multiplier", ) class ACTIONS(enum.IntEnum): """The possible actions the agent can take.""" # movement directions MOVE_N = 0 MOVE_NE = 1 MOVE_E = 2 MOVE_SE = 3 MOVE_S = 4 MOVE_SW = 5 MOVE_W = 6 MOVE_NW = 7 # transformations (used for meta environment only) TRANSFORM_COLOR = 8 TRANSFORM_TEXTURE = 9 TRANSFORM_SHAPE = 10 TRANSFORM_ACTIONS = ( ACTIONS.TRANSFORM_COLOR, ACTIONS.TRANSFORM_TEXTURE, ACTIONS.TRANSFORM_SHAPE) def terminate_episode_cleanup(the_plot): """Cleans up between trials in meta-learning setting.""" if the_plot["next_progressive_level"] is not None: the_plot.next_chapter = the_plot["next_progressive_level"] logging.info("Progressing to next level! %i", the_plot.next_chapter) # don't carry these keys over, will be reset when the new game is built for k in META_BETWEEN_TRIAL_CLEANUP_KEYS: del the_plot[k] @dataclasses.dataclass class ObjectProperties: """Class for holding the properties of objects while building.""" character: str position: Tuple[int, int] position_type: str shape: str color: str texture: str value: float class ObjectDrape(plab_things.Drape): """A `Drape` for objects in the room. See parent class for details of Drapes. These drapes handle logic of providing explanations to the agent, and handle rewards etc. if agent moves onto one. """ def __init__(self, curtain, character, object_properties, properties_string, explanation_string): assert character == object_properties.character super(ObjectDrape, self).__init__(curtain, object_properties.character) self.color = object_properties.color self.texture = object_properties.texture self.shape = object_properties.shape self.properties_string = properties_string self.explanation_string = explanation_string self.value = object_properties.value self.position = object_properties.position self.agent_is_adjacent = False def _handle_player_touch(self, the_plot): """What happens if player moves onto this object.""" if the_plot["termination_countdown"] is not None: if the_plot["termination_countdown"] < EXPLAIN_PHASE_LENGTH - 2: # touched something else already, and time passed since, terminate. the_plot.terminate_episode() else: # touched for the very first time! the_plot.add_reward(self.value) the_plot["termination_countdown"] = EXPLAIN_PHASE_LENGTH the_plot["explanation_string"] = self.explanation_string def _handle_adjacency(self, the_plot, adjacent, *args): """What happens if player is adjacent to this object.""" if adjacent: if the_plot["termination_countdown"] is None: the_plot["explanation_string"] = self.properties_string elif self.agent_is_adjacent: # no longer adjacent, but was, reset instruction if needed self.agent_is_adjacent = False if the_plot["explanation_string"] == self.properties_string: the_plot["explanation_string"] = "" def update(self, actions, board, layers, backdrop, things, the_plot): """Update state given player actions, etc. Player updates earlier.""" rows, cols = things[AGENT_CHAR].position if self.curtain[(rows, cols)]: self._handle_player_touch(the_plot) else: # is character adjacent to object? adjacent = False poss_rows = range(rows - 1, rows + 2) poss_cols = range(cols - 1, cols + 2) for x in poss_rows: for y in poss_cols: possible_position = (x, y) if self.curtain[possible_position]: adjacent = True break self._handle_adjacency(the_plot, adjacent, actions, things) class MetaObjectDrape(ObjectDrape): """A `Drape` for objects in the meta-learning version.""" def _set_value_and_explanations(self, things, the_plot): """Update value and explanations (e.g., after any transformations).""" self.value = CORRECT_REWARD for k, thing in things.items(): if k not in [self.character, AGENT_CHAR]: # if matches along relevant dimension, then not odd one out if ((the_plot["concept_type"] == "color" and thing.color == self.color) or (the_plot["concept_type"] == "texture" and thing.texture == self.texture) or (the_plot["concept_type"] == "shape" and thing.shape == self.shape)): self.value = INCORRECT_REWARD break # same with explanations if self.explanation_string: if self.value == CORRECT_REWARD: explanation_string = ["Correct the concept is"] explanation_string += [the_plot["concept_type"]] explanation_string += ["and it is uniquely"] else: explanation_string = ["Incorrect the concept is"] explanation_string += [the_plot["concept_type"]] explanation_string += ["and other objects are"] if the_plot["concept_type"] == "color": explanation_string.append(self.color) elif the_plot["concept_type"] == "shape": explanation_string.append(self.shape) elif the_plot["concept_type"] == "texture": explanation_string.append(self.texture) self.explanation_string = " ".join(explanation_string) if self.properties_string: self.properties_string = " ".join([ "This is a", self.color, self.texture, self.shape]) def _handle_player_touch(self, the_plot): if the_plot["termination_countdown"] is not None: if the_plot["termination_countdown"] < EXPLAIN_PHASE_LENGTH - 2: # touched something else already, and time passed since, terminate. terminate_episode_cleanup(the_plot) the_plot.terminate_episode() return else: the_plot.add_reward(self.value * the_plot["value_multiplier"]) the_plot["termination_countdown"] = EXPLAIN_PHASE_LENGTH the_plot["explanation_string"] = self.explanation_string the_plot["instruction_string"] = "" def _handle_adjacency(self, the_plot, adjacent, actions, things): if adjacent: if the_plot["transformations_happening_now"] and actions in [8, 9, 10]: print("transforming adjacent") original = the_plot["char_to_color_shape"][self.character] original = original.split() updated = None if actions == ACTIONS.TRANSFORM_COLOR: for other_color in the_plot["extant_attributes"]["color"]: if other_color != self.color: logging.info("Transforming: %s -> %s", self.color, other_color) self.color = other_color updated = [other_color] + original[1:] break elif actions == ACTIONS.TRANSFORM_TEXTURE: # transform texture for other_texture in the_plot["extant_attributes"]["texture"]: if other_texture != self.texture: logging.info( "Transforming: %s -> %s", self.texture, other_texture) self.texture = other_texture updated = [original[0], other_texture, original[2]] break elif actions == ACTIONS.TRANSFORM_SHAPE: # transform shape for other_shape in the_plot["extant_attributes"]["shape"]: if other_shape != self.shape: logging.info("Transforming: %s -> %s", self.shape, other_shape) self.shape = other_shape updated = original[:2] + [other_shape] break updated = " ".join(updated) the_plot["char_to_color_shape"][self.character] = updated the_plot["char_to_color_shape_updated"] = True # update value etc. anytime player is adjacent, when it matters... self._set_value_and_explanations(things, the_plot) if (the_plot["termination_countdown"] is None and the_plot["explanation_string"][:15] != "You transformed"): the_plot["explanation_string"] = self.properties_string elif self.agent_is_adjacent: # no longer adjacent, but was, reset instruction if needed self.agent_is_adjacent = False if the_plot["explanation_string"] == self.properties_string: the_plot["explanation_string"] = "" def update(self, actions, board, layers, backdrop, things, the_plot): if "char_to_color_shape" not in the_plot: # trial over, nothing left to do! return return super().update(actions, board, layers, backdrop, things, the_plot) class PlayerSprite(prefab_sprites.MazeWalker): """The player character. Player character, moves around and handles some game logic. See parent class for further details. """ def __init__(self, corner, position, character): super(PlayerSprite, self).__init__( corner, position, character, impassable="#") self.start_position = position def _terminate_episode(self, the_plot): the_plot.terminate_episode() def update(self, actions, board, layers, backdrop, things, the_plot): """Update self and game state given an action.""" # basic movement if actions == ACTIONS.MOVE_N: self._north(board, the_plot) elif actions == ACTIONS.MOVE_NE: self._northeast(board, the_plot) elif actions == ACTIONS.MOVE_E: self._east(board, the_plot) elif actions == ACTIONS.MOVE_SE: self._southeast(board, the_plot) elif actions == ACTIONS.MOVE_S: self._south(board, the_plot) elif actions == ACTIONS.MOVE_SW: self._southwest(board, the_plot) elif actions == ACTIONS.MOVE_W: self._west(board, the_plot) elif actions == ACTIONS.MOVE_NW: self._northwest(board, the_plot) # game logic if the_plot["termination_countdown"] is not None: if the_plot["termination_countdown"] == 0: self._terminate_episode(the_plot) else: the_plot["termination_countdown"] -= 1 class MetaPlayerSprite(PlayerSprite): """The player for the meta-learning tasks.""" def _terminate_episode(self, the_plot): terminate_episode_cleanup(the_plot) the_plot.terminate_episode() def update(self, actions, board, layers, backdrop, things, the_plot): if actions in TRANSFORM_ACTIONS and the_plot["transformations_allowed"] > 0: the_plot["transformations_allowed"] -= 1 the_plot["transformations_happening_now"] = True else: the_plot["transformations_happening_now"] = False if the_plot["explanation_string"][:15] == "You transformed": the_plot["explanation_string"] = "" super().update(actions, board, layers, backdrop, things, the_plot) def _generate_level_layout(object_properties, agent_start): """Generates pycolab-style ascii map containing room, objects, and agent.""" level_layout = np.array([[FLOOR_CHAR] * ROOM_SIZE[1]] * ROOM_SIZE[0]) # insert walls level_layout[0, :] = WALL_CHAR level_layout[-1, :] = WALL_CHAR level_layout[:, 0] = WALL_CHAR level_layout[:, -1] = WALL_CHAR # add agent and objects level_layout[agent_start] = AGENT_CHAR for obj in object_properties: level_layout[obj.position] = obj.character # convert to pycolab's ascii format level_layout = ["".join(x) for x in level_layout.tolist()] return level_layout def make_game(object_properties, concept_type, explain="full", agent_start=None, explain_only_concept_type=False, rng=None): """Makes a basic pycolab odd-one-out game. Args: object_properties: list of ObjectProperties for defining objects in level. concept_type: one of ["position", "color", "texture" "shape"], indicating which attribute has the odd-one-out. explain: One of "full" "reward", "properties" or "none." If none, no explanation. If "reward" the explanation describes whether the answer was correct or incorrect, and the features that show it. If "properties", will identify the properties of objects when adjacent to them. If "full", gives both properties + reward. agent_start: Optional agent start position (mostly for testing). explain_only_concept_type: explain only the single dimension corresponding to concept_type; used for the confounding experiments only. rng: An optional numpy Random Generator for choosing agent_start (if not set), to set a fixed seed use e.g. `rng=np.random.default_rng(seed=...)` Returns: this_game: Pycolab engine running the specified game. """ if rng is None: rng = np.random.default_rng() char_to_color_shape = [] drape_creators = {} forbidden_locations = [] for obj in object_properties: # can't have player starting here forbidden_locations.append(obj.position) # instruction if explain not in ["none", "full", "reward", "properties"]: raise ValueError("Unrecognized explanation type: {}".format(explain)) if explain in ["full", "properties"]: if explain_only_concept_type: properties_string = "This is a " if concept_type == "color": properties_string += obj.color elif concept_type == "texture": properties_string += obj.texture elif concept_type == "shape": properties_string += obj.shape elif concept_type == "position": properties_string += obj.position_type else: properties_string = " ".join([ "This is a", obj.color, obj.texture, obj.shape, obj.position_type]) else: properties_string = "" explanation_string = "" if explain in ["full", "reward"]: if obj.value > 0.: explanation_string = ["Correct it is uniquely"] if concept_type == "position": explanation_string.append(obj.position_type) elif concept_type == "color": explanation_string.append(obj.color) elif concept_type == "shape": explanation_string.append(obj.shape) elif concept_type == "texture": explanation_string.append(obj.texture) else: if explain_only_concept_type: explanation_string = ["Incorrect other objects are"] if concept_type == "position": explanation_string.append(obj.position_type) elif concept_type == "color": explanation_string.append(obj.color) elif concept_type == "shape": explanation_string.append(obj.shape) elif concept_type == "texture": explanation_string.append(obj.texture) else: explanation_string = [ "Incorrect other objects are", obj.color, obj.texture, obj.shape, "or", obj.position_type] explanation_string = " ".join(explanation_string) # create object builders drape_creators[obj.character] = ascii_art.Partial( ObjectDrape, object_properties=obj, properties_string=properties_string, explanation_string=explanation_string) char_to_color_shape.append( (obj.character, " ".join((obj.color, obj.texture, obj.shape)))) # set up agent start if agent_start is None: poss_starts = [] for x in range(1, 10): for y in range(1, 10): if (x, y) not in forbidden_locations: poss_starts.append((x, y)) agent_start = poss_starts[ rng.integers(len(poss_starts))] sprites = {AGENT_CHAR: PlayerSprite} # generate level and game level_layout = _generate_level_layout(object_properties, agent_start) this_game = ascii_art.ascii_art_to_game( art=level_layout, what_lies_beneath=" ", sprites=sprites, drapes=drape_creators, update_schedule=[[AGENT_CHAR], [obj.character for obj in object_properties]]) # update necessary plot information this_game.the_plot["explanation_string"] = "" this_game.the_plot["instruction_string"] = "" # only used in meta case this_game.the_plot["char_to_color_shape"] = dict(char_to_color_shape) this_game.the_plot["char_to_color_shape_updated"] = False # used for meta this_game.the_plot["termination_countdown"] = None return this_game def make_metalearning_game( object_properties, concept_type, explain="full", transformations_allowed=0, additional_extant_properties=None, agent_start=None, value_multiplier=1., next_progressive_level=None, rng=None): """Constructs a metalearning version of the game. Args: object_properties: list of (character, position, position_type, shape, color, texture, value), for placing objects in the world. concept_type: one of ["color", "texture" "shape"], indicating which attribute has the odd-one-out. explain: One of "full" "reward", "properties" or "none." If none, no explanation. If "reward" the explanation describes whether the answer was correct or incorrect, and the features that show it. If "properties", will identify the properties of objects when adjacent to them. If "full", gives both properties + reward. transformations_allowed: number of transformations of object properties that the agent is allowed to make. Use 0 to match the original task, more to allow interesting interventions on the environment. additional_extant_properties: Optional dict, used to add properties that are desired but not yet present in the scene, for transformation. Should have as keys a subset of ["color", "texture", "shape"]. agent_start: Optional agent start position (mostly for testing). value_multiplier: multiplies the rewards. next_progressive_level: if not None, the next level key to progress to. rng: An optional numpy Random Generator for choosing agent_start (if not set), to set a fixed seed use e.g. `rng=np.random.default_rng(seed=...)` Returns: this_game: Pycolab engine running the specified game. """ if rng is None: rng = np.random.default_rng() char_to_color_shape = [] drape_creators = {} forbidden_locations = [] extant_attributes = {"color": set(), "texture": set(), "shape": set()} for obj in object_properties: # can't have player starting here forbidden_locations.append(obj.position) # explanations if explain not in ["none", "full", "reward", "properties"]: raise ValueError("Unrecognized explanation type: {}".format(explain)) if explain in ["full", "properties"]: properties_string = "tbd" # will be set later as needed. else: properties_string = "" explanation_string = "" if explain in ["full", "reward"]: explanation_string = "tbd" # will be set later as needed. # create object builders drape_creators[obj.character] = ascii_art.Partial( MetaObjectDrape, object_properties=obj, properties_string=properties_string, explanation_string=explanation_string) char_to_color_shape.append( (obj.character, " ".join((obj.color, obj.texture, obj.shape)))) extant_attributes["color"].add(obj.color) extant_attributes["texture"].add(obj.texture) extant_attributes["shape"].add(obj.shape) if additional_extant_properties is not None: for k, v in additional_extant_properties.items(): extant_attributes[k].update(set(v)) # set up agent start if agent_start is None: poss_starts = [] for x in range(1, 10): for y in range(1, 10): if (x, y) not in forbidden_locations: poss_starts.append((x, y)) agent_start = poss_starts[ rng.integers(len(poss_starts))] sprites = {AGENT_CHAR: MetaPlayerSprite} # generate level and game level_layout = _generate_level_layout(object_properties, agent_start) this_game = ascii_art.ascii_art_to_game( art=level_layout, what_lies_beneath=" ", sprites=sprites, drapes=drape_creators, update_schedule=[[AGENT_CHAR], [obj.character for obj in object_properties]]) # update necessary plot information if transformations_allowed > 0: this_game.the_plot["instruction_string"] = "Make an odd one out" else: this_game.the_plot["instruction_string"] = "Find the odd one out" this_game.the_plot["explanation_string"] = "" this_game.the_plot["char_to_color_shape"] = dict(char_to_color_shape) this_game.the_plot["char_to_color_shape_updated"] = True this_game.the_plot["transformations_allowed"] = transformations_allowed this_game.the_plot["transformations_happening_now"] = False this_game.the_plot["extant_attributes"] = extant_attributes this_game.the_plot["concept_type"] = concept_type this_game.the_plot["termination_countdown"] = None this_game.the_plot["value_multiplier"] = value_multiplier this_game.the_plot["next_progressive_level"] = next_progressive_level this_game.the_plot["explain"] = explain return this_game def main(argv): if len(argv) > 2: raise app.UsageError("Too many command-line arguments.") episode_type = argv[1] if episode_type == "basic": these_object_properties = [ ObjectProperties( POSSIBLE_OBJECT_CHARS[1], (1, 4), "against_wall", "triangle", "red", "solid", INCORRECT_REWARD), ObjectProperties( POSSIBLE_OBJECT_CHARS[2], (4, 1), "against_wall", "triangle", "blue", "noise", INCORRECT_REWARD), ObjectProperties( POSSIBLE_OBJECT_CHARS[3], (4, 4), "in_center", "square", "red", "noise", CORRECT_REWARD), ObjectProperties( POSSIBLE_OBJECT_CHARS[4], (6, 6), "in_center", "triangle", "blue", "solid", INCORRECT_REWARD), ] game = make_game(object_properties=these_object_properties, concept_type="shape") elif episode_type == "meta": these_object_properties = [ ObjectProperties( POSSIBLE_OBJECT_CHARS[1], (1, 4), "against_wall", "triangle", "blue", "noise", INCORRECT_REWARD), ObjectProperties( POSSIBLE_OBJECT_CHARS[2], (4, 1), "against_wall", "triangle", "blue", "noise", INCORRECT_REWARD), ObjectProperties( POSSIBLE_OBJECT_CHARS[3], (4, 4), "in_center", "triangle", "blue", "noise", INCORRECT_REWARD), ObjectProperties( POSSIBLE_OBJECT_CHARS[4], (6, 6), "in_center", "triangle", "blue", "noise", INCORRECT_REWARD), ] game = make_metalearning_game( object_properties=these_object_properties, concept_type="shape", transformations_allowed=5, agent_start=(1, 6), additional_extant_properties={ "color": ["red"], "shape": ["square"], "texture": ["solid"], }) else: raise ValueError("Unrecognized argument: %s" % episode_type) # Note that these colors are only for human UI foreground_colors = { AGENT_CHAR: (999, 999, 999), # Agent is white WALL_CHAR: (300, 300, 300), # Wall, dark grey FLOOR_CHAR: (0, 0, 0), # Floor } keys_to_actions = { # Basic movement. curses.KEY_UP: ACTIONS.MOVE_N, curses.KEY_DOWN: ACTIONS.MOVE_S, curses.KEY_LEFT: ACTIONS.MOVE_W, curses.KEY_RIGHT: ACTIONS.MOVE_E, -1: 11, # Do nothing. } if episode_type == "basic": foreground_colors.update({ POSSIBLE_OBJECT_CHARS[1]: (900, 100, 100), POSSIBLE_OBJECT_CHARS[2]: (100, 100, 900), POSSIBLE_OBJECT_CHARS[3]: (900, 100, 100), POSSIBLE_OBJECT_CHARS[4]: (100, 100, 900), }) elif episode_type == "meta": keys_to_actions.update({ "q": ACTIONS.TRANSFORM_COLOR, "w": ACTIONS.TRANSFORM_TEXTURE, "e": ACTIONS.TRANSFORM_SHAPE, }) foreground_colors.update({ POSSIBLE_OBJECT_CHARS[1]: (100, 100, 900), POSSIBLE_OBJECT_CHARS[2]: (100, 100, 900), POSSIBLE_OBJECT_CHARS[3]: (100, 100, 900), POSSIBLE_OBJECT_CHARS[4]: (100, 100, 900), }) background_colors = { c: (0, 0, 0) for c in foreground_colors } ui = human_ui.CursesUi( keys_to_actions=keys_to_actions, delay=10000, colour_fg=foreground_colors, colour_bg=background_colors) ui.play(game) if __name__ == "__main__": app.run(main)
package com.example.model.hotMatches import android.os.Parcel import android.os.Parcelable import com.google.gson.annotations.SerializedName data class HotMatche( @SerializedName("agg_score") val aggScore: List<Int?>?, @SerializedName("away_Info") val awayInfo: AwayInfo?, @SerializedName("away_position") val awayPosition: String?, @SerializedName("away_team_id") val awayTeamId: String?, @SerializedName("competition_id") val competitionId: String?, @SerializedName("coverage") val coverage: Coverage?, @SerializedName("environment") val environment: Environment?, @SerializedName("home_Info") val homeInfo: HomeInfo?, @SerializedName("home_position") val homePosition: String?, @SerializedName("home_team_id") val homeTeamId: String?, @SerializedName("id") val id: String?, @SerializedName("league_Info") val leagueInfo: LeagueInfo?, @SerializedName("match_time") val matchTime: Int?, @SerializedName("match_timing") val matchTiming: String?, @SerializedName("neutral") val neutral: Int?, @SerializedName("note") val note: String?, @SerializedName("odds") val odds: Odds?, @SerializedName("referee_id") val refereeId: String?, @SerializedName("related_id") val relatedId: String?, @SerializedName("round") val round: Round?, @SerializedName("season_id") val seasonId: String?, @SerializedName("status_id") val statusId: Int?, @SerializedName("update_timing") val updateTiming: String?, @SerializedName("updated_at") val updatedAt: Int?, @SerializedName("venueDetails") val venueDetails: VenueDetails?, @SerializedName("venue_id") val venueId: String? )
const app = document.querySelector('.weather-app'); const temp = document.querySelector('#temp'); const dateOutput = document.querySelector('.date'); const timeOutput = document.querySelector('.time'); const conditionOutput = document.querySelector('.condition'); const humadityOutput = document.querySelector('.humadity'); const windOutput = document.querySelector('.wind'); const form = document.getElementById('locationInput'); const search = document.querySelector('.search'); const btn = document.querySelector('.submit'); const cities = document.querySelectorAll('.city'); const cityName = document.getElementById('name'); const humidityOutput = document.getElementById('humidity'); const cloudOutput = document.getElementById('cloud'); const iconWeather = document.getElementById('icon-weather'); let cityInput = "Ижевск"; cities.forEach((city) => { city.addEventListener('click', (e) => { console.log(e.target.textContent); cityInput = e.target.textContent; fetchWeatherData(); app.style.opasity = "0"; }); }); form.addEventListener('submit', (e) => { if (search.value.length == 0) { alert('Введите название города'); } else { cityInput = search.value; fetchWeatherData(); search.value = ""; app.style.opacity = "0"; } e.preventDefault(); }); function dayOfTheWeek(day, month, year) { const date = new Date(`${year}-${month}-${day}`); const options = { weekday: 'long', year: 'numeric', month: 'long', day: 'numeric' }; return date.toLocaleString('ru-RU', options); }; function fetchWeatherData() { fetch(`http://api.weatherapi.com/v1/current.json?key=ce06abfb1516447da1b172157231912&q=${cityInput}`, { method: "post", }) .then(response => response.json()) .then(data => { if (data) { console.log(data); temp.innerHTML = data.current.temp_c + "&#176;" conditionOutput.innerHTML = data.current.condition.text; const date = data.location.localtime; const y = parseInt(date.substr(0, 4)); const m = parseInt(date.substr(5, 2)); const d = parseInt(date.substr(8, 2)); const time = date.split(' ')[1]; //const time = date.substr(11); dateOutput.innerHTML = `${dayOfTheWeek(d, m, y)}` cityName.textContent = data.location.name; timeOutput.innerHTML = time; cloudOutput.innerHTML = data.current.cloud + "%"; humidityOutput.innerHTML = data.current.humidity + "%"; //console.log(data.current.humidity) windOutput.innerHTML = data.current.wind_kph + "км/ч"; let timeOfDay = "day"; const code = data.current.condition.code; if (!data.current.is_day) { timeOfDay = "night"; } const iconId = data.current.condition.icon.substr("//cdn.weatherapi.com/weather/".length).split('/')[2]; iconWeather.src = `./icons/${timeOfDay}/` + iconId; if (code == 1000) { app.style.backgroundImage = `url(./images/${timeOfDay}/clear.jpg)`; btn.style.background = "#e5ba92"; if (timeOfDay == "night") { btn.style.background = "#181e27"; } } else if ( code == 1003 || code == 1006 || code == 1009 || code == 1030 || code == 1069 || code == 1087 || code == 1135 || code == 1273 || code == 1276 || code == 1279 || code == 1282 ) { app.style.backgroundImage = `url(./images/${timeOfDay}/cloudy.jpg)`; btn.style.background = "#fa6d1b"; if (timeOfDay == "night") { btn.style.background = "#181e27"; } } else if ( code == 1063 || code == 1069 || code == 1072 || code == 1150 || code == 1153 || code == 1180 || code == 1183 || code == 1186 || code == 1189 || code == 1192 || code == 1195 || code == 1204 || code == 1207 || code == 1240 || code == 1243 || code == 1246 || code == 1249 || code == 1252 ) { app.style.backgroundImage = `url(./images/${timeOfDay}/rainy.jpg)`; btn.style.background = "#647d75"; if (timeOfDay == "night") { btn.style.background = "#325c80"; } } else { app.style.backgroundImage = `url(./images/${timeOfDay}/snowy.jpg)`; btn.style.background = "4d72aa"; if (timeOfDay == "night") { btn.style.background = "#1b1b1b"; } } app.style.opacity = "1"; } }) .catch((e) => { console.log(e); //alert('Город не найден, попробуйте снова'); //app.style.opacity = "1"; }); } fetchWeatherData(); //app.style.opacity = "1";
import React, { ReactElement } from 'react'; import { useRouter } from 'next/router'; import { IMarkdownArticleProps } from 'src/interfaces/IMarkdownArticleProps'; import { MarkdownContainer } from 'src/components/global/markdownContainer'; import { Container } from 'src/components/global/container'; import { IStaticProps } from 'src/interfaces/IStaticProps'; import { Row } from 'src/components/elements/row'; interface IArticleProps { articles: { [key in string]: IMarkdownArticleProps; }; name: string; redirectLanguageToIndex: boolean; } /** * The Article component. * @param {IArticleProps} props - The props. */ const Article = (props: IArticleProps): ReactElement => { const router = useRouter(); const article = props.articles?.[router.locale as string]; return ( <Container redirectLanguageToIndex={props.redirectLanguageToIndex}> <Row> <MarkdownContainer {...article} /> </Row> </Container> ); }; /** * Returns the static paths. */ export const getStaticPaths = (): {} => { return { fallback: false, paths: process.env.markdown.paths.articles }; }; /** * Returns the static props. * @param {IStaticProps} context - The context. */ export const getStaticProps = async (context: IStaticProps): Promise<{}> => { const articles = process.env.markdown.pages.articles[context.params.name]; if (!articles?.[context.locale]) { return { notFound: true }; } return { props: { articles, name: context.params.name, redirectLanguageToIndex: Object.keys(articles).length === 1 } }; }; export default Article;